problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.1k
25.4k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 582
39.1k
| num_tokens
int64 271
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_42028
|
rasdani/github-patches
|
git_diff
|
plone__Products.CMFPlone-3562
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
addPloneSite swallows exceptions while importing extension profiles
If there's an exception while Products.CMFPlone.factory.addPloneSite is installing an extension profile, it is logged but does not abort execution. This causes problems if addPloneSite is used in a script and something is relying on the exit status of the script to determine whether the site was created successfully (like in https://github.com/plone/plone-backend/blob/5.2.x/skeleton/scripts/create_site.py). It also leads to confusion since it can hide the fact that the site was not fully configured.
Even worse, the exception is obscured in the log because Zope hits a different exception while trying to render a page showing the original exception. This happens because the not-yet-fully-configured Plone site is still set as the active zope.component site.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `Products/CMFPlone/factory.py`
Content:
```
1 from logging import getLogger
2 from plone.registry.interfaces import IRegistry
3 from plone.uuid.handlers import addAttributeUUID
4 from Products.CMFCore.interfaces import ISiteRoot
5 from Products.CMFPlone import PloneMessageFactory as _
6 from Products.CMFPlone.events import SiteManagerCreatedEvent
7 from plone.base.interfaces import INonInstallable
8 from Products.CMFPlone.Portal import PloneSite
9 from Products.GenericSetup.tool import SetupTool
10 from Products.statusmessages.interfaces import IStatusMessage
11 from zope.component import queryUtility
12 from zope.component.hooks import setSite
13 from zope.event import notify
14 from zope.interface import implementer
15 from zope.lifecycleevent import ObjectCreatedEvent
16
17 _TOOL_ID = 'portal_setup'
18 _DEFAULT_PROFILE = 'Products.CMFPlone:plone'
19 _TYPES_PROFILE = 'plone.app.contenttypes:default'
20 _CONTENT_PROFILE = 'plone.app.contenttypes:plone-content'
21
22 # A little hint for PloneTestCase (pre-Plone 6.0)
23 _IMREALLYPLONE5 = True
24
25 # Marker hints for code that needs to know the major Plone version
26 # Works the same way than zcml condition hints so it contains the current and the
27 # last ones
28 PLONE52MARKER = True
29 PLONE60MARKER = True
30
31 logger = getLogger('Plone')
32
33
34 @implementer(INonInstallable)
35 class NonInstallable:
36
37 def getNonInstallableProducts(self):
38 return [
39 'CMFDefault', 'Products.CMFDefault',
40 'CMFPlone', 'Products.CMFPlone', 'Products.CMFPlone.migrations',
41 'CMFTopic', 'Products.CMFTopic',
42 'CMFUid', 'Products.CMFUid',
43 'DCWorkflow', 'Products.DCWorkflow',
44 'PasswordResetTool', 'Products.PasswordResetTool',
45 'PlonePAS', 'Products.PlonePAS',
46 'PloneLanguageTool', 'Products.PloneLanguageTool',
47 'MimetypesRegistry', 'Products.MimetypesRegistry',
48 'PortalTransforms', 'Products.PortalTransforms',
49 'CMFDiffTool', 'Products.CMFDiffTool',
50 'CMFEditions', 'Products.CMFEditions',
51 'Products.NuPlone',
52 'borg.localrole',
53 'plone.app.caching',
54 'plone.app.dexterity',
55 'plone.app.discussion',
56 'plone.app.event',
57 'plone.app.intid',
58 'plone.app.linkintegrity',
59 'plone.app.querystring',
60 'plone.app.registry',
61 'plone.app.referenceablebehavior',
62 'plone.app.relationfield',
63 'plone.app.theming',
64 'plone.app.users',
65 'plone.app.widgets',
66 'plone.app.z3cform',
67 'plone.formwidget.recurrence',
68 'plone.keyring',
69 'plone.outputfilters',
70 'plone.portlet.static',
71 'plone.portlet.collection',
72 'plone.protect',
73 'plone.resource',
74 'plonetheme.barceloneta',
75 ]
76
77 def getNonInstallableProfiles(self):
78 return [_DEFAULT_PROFILE,
79 _CONTENT_PROFILE,
80 'Products.CMFDiffTool:CMFDiffTool',
81 'Products.CMFEditions:CMFEditions',
82 'Products.CMFPlone:dependencies',
83 'Products.CMFPlone:testfixture',
84 'Products.NuPlone:uninstall',
85 'Products.MimetypesRegistry:MimetypesRegistry',
86 'Products.PasswordResetTool:PasswordResetTool',
87 'Products.PortalTransforms:PortalTransforms',
88 'Products.PloneLanguageTool:PloneLanguageTool',
89 'Products.PlonePAS:PlonePAS',
90 'borg.localrole:default',
91 'plone.browserlayer:default',
92 'plone.keyring:default',
93 'plone.outputfilters:default',
94 'plone.portlet.static:default',
95 'plone.portlet.collection:default',
96 'plone.protect:default',
97 'plone.app.contenttypes:default',
98 'plone.app.dexterity:default',
99 'plone.app.discussion:default',
100 'plone.app.event:default',
101 'plone.app.linkintegrity:default',
102 'plone.app.registry:default',
103 'plone.app.relationfield:default',
104 'plone.app.theming:default',
105 'plone.app.users:default',
106 'plone.app.versioningbehavior:default',
107 'plone.app.z3cform:default',
108 'plone.formwidget.recurrence:default',
109 'plone.resource:default',
110 ]
111
112
113 def zmi_constructor(context):
114 """This is a dummy constructor for the ZMI."""
115 url = context.DestinationURL()
116 request = context.REQUEST
117 return request.response.redirect(url + '/@@plone-addsite?site_id=Plone')
118
119
120 def addPloneSite(context, site_id, title='Plone site', description='',
121 profile_id=_DEFAULT_PROFILE,
122 content_profile_id=_CONTENT_PROFILE, snapshot=False,
123 extension_ids=(), setup_content=True,
124 default_language='en', portal_timezone='UTC'):
125 """Add a PloneSite to the context."""
126
127 site = PloneSite(site_id)
128 notify(ObjectCreatedEvent(site))
129 context[site_id] = site
130
131 site = context[site_id]
132 site.setLanguage(default_language)
133 # Set the accepted language for the rest of the request. This makes sure
134 # the front-page text gets the correct translation also when your browser
135 # prefers non-English and you choose English as language for the Plone
136 # Site.
137 request = context.REQUEST
138 request['HTTP_ACCEPT_LANGUAGE'] = default_language
139
140 site[_TOOL_ID] = SetupTool(_TOOL_ID)
141 setup_tool = site[_TOOL_ID]
142
143 notify(SiteManagerCreatedEvent(site))
144 setSite(site)
145
146 setup_tool.setBaselineContext('profile-%s' % profile_id)
147 setup_tool.runAllImportStepsFromProfile('profile-%s' % profile_id)
148
149 reg = queryUtility(IRegistry, context=site)
150 reg['plone.portal_timezone'] = portal_timezone
151 reg['plone.available_timezones'] = [portal_timezone]
152 reg['plone.default_language'] = default_language
153 reg['plone.available_languages'] = [default_language]
154
155 # Install default content types profile if user do not select "example content"
156 # during site creation.
157 content_types_profile = content_profile_id if setup_content else _TYPES_PROFILE
158
159 setup_tool.runAllImportStepsFromProfile(f'profile-{content_types_profile}')
160
161 props = dict(
162 title=title,
163 description=description,
164 )
165 # Do this before applying extension profiles, so the settings from a
166 # properties.xml file are applied and not overwritten by this
167 site.manage_changeProperties(**props)
168
169 for extension_id in extension_ids:
170 try:
171 setup_tool.runAllImportStepsFromProfile(
172 'profile-%s' % extension_id)
173 except Exception as msg:
174 IStatusMessage(request).add(_(
175 'Could not install ${profile_id}: ${error_msg}! '
176 'Please try to install it manually using the "Addons" '
177 'controlpanel and report any issues to the '
178 'addon maintainers.',
179 mapping={
180 'profile_id': extension_id,
181 'error_msg': msg.args,
182 }),
183 type='error')
184 logger.exception(
185 'Error while installing addon {}. '
186 'See traceback below for details.'.format(extension_id))
187
188 if snapshot is True:
189 setup_tool.createSnapshot('initial_configuration')
190
191 return site
192
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/Products/CMFPlone/factory.py b/Products/CMFPlone/factory.py
--- a/Products/CMFPlone/factory.py
+++ b/Products/CMFPlone/factory.py
@@ -1,13 +1,10 @@
from logging import getLogger
from plone.registry.interfaces import IRegistry
-from plone.uuid.handlers import addAttributeUUID
-from Products.CMFCore.interfaces import ISiteRoot
from Products.CMFPlone import PloneMessageFactory as _
from Products.CMFPlone.events import SiteManagerCreatedEvent
from plone.base.interfaces import INonInstallable
from Products.CMFPlone.Portal import PloneSite
from Products.GenericSetup.tool import SetupTool
-from Products.statusmessages.interfaces import IStatusMessage
from zope.component import queryUtility
from zope.component.hooks import setSite
from zope.event import notify
@@ -143,49 +140,41 @@
notify(SiteManagerCreatedEvent(site))
setSite(site)
- setup_tool.setBaselineContext('profile-%s' % profile_id)
- setup_tool.runAllImportStepsFromProfile('profile-%s' % profile_id)
-
- reg = queryUtility(IRegistry, context=site)
- reg['plone.portal_timezone'] = portal_timezone
- reg['plone.available_timezones'] = [portal_timezone]
- reg['plone.default_language'] = default_language
- reg['plone.available_languages'] = [default_language]
-
- # Install default content types profile if user do not select "example content"
- # during site creation.
- content_types_profile = content_profile_id if setup_content else _TYPES_PROFILE
-
- setup_tool.runAllImportStepsFromProfile(f'profile-{content_types_profile}')
-
- props = dict(
- title=title,
- description=description,
- )
- # Do this before applying extension profiles, so the settings from a
- # properties.xml file are applied and not overwritten by this
- site.manage_changeProperties(**props)
-
- for extension_id in extension_ids:
- try:
- setup_tool.runAllImportStepsFromProfile(
- 'profile-%s' % extension_id)
- except Exception as msg:
- IStatusMessage(request).add(_(
- 'Could not install ${profile_id}: ${error_msg}! '
- 'Please try to install it manually using the "Addons" '
- 'controlpanel and report any issues to the '
- 'addon maintainers.',
- mapping={
- 'profile_id': extension_id,
- 'error_msg': msg.args,
- }),
- type='error')
- logger.exception(
- 'Error while installing addon {}. '
- 'See traceback below for details.'.format(extension_id))
-
- if snapshot is True:
- setup_tool.createSnapshot('initial_configuration')
-
- return site
+ try:
+ setup_tool.setBaselineContext('profile-%s' % profile_id)
+ setup_tool.runAllImportStepsFromProfile('profile-%s' % profile_id)
+
+ reg = queryUtility(IRegistry, context=site)
+ reg['plone.portal_timezone'] = portal_timezone
+ reg['plone.available_timezones'] = [portal_timezone]
+ reg['plone.default_language'] = default_language
+ reg['plone.available_languages'] = [default_language]
+
+ # Install default content types profile if user do not select "example content"
+ # during site creation.
+ content_types_profile = content_profile_id if setup_content else _TYPES_PROFILE
+
+ setup_tool.runAllImportStepsFromProfile(f'profile-{content_types_profile}')
+
+ props = dict(
+ title=title,
+ description=description,
+ )
+ # Do this before applying extension profiles, so the settings from a
+ # properties.xml file are applied and not overwritten by this
+ site.manage_changeProperties(**props)
+
+ for extension_id in extension_ids:
+ try:
+ setup_tool.runAllImportStepsFromProfile(f"profile-{extension_id}")
+ except Exception:
+ logger.error(f"Error while installing profile {extension_id}:")
+ raise
+
+ if snapshot is True:
+ setup_tool.createSnapshot('initial_configuration')
+
+ return site
+ except Exception:
+ setSite(None)
+ raise
|
{"golden_diff": "diff --git a/Products/CMFPlone/factory.py b/Products/CMFPlone/factory.py\n--- a/Products/CMFPlone/factory.py\n+++ b/Products/CMFPlone/factory.py\n@@ -1,13 +1,10 @@\n from logging import getLogger\n from plone.registry.interfaces import IRegistry\n-from plone.uuid.handlers import addAttributeUUID\n-from Products.CMFCore.interfaces import ISiteRoot\n from Products.CMFPlone import PloneMessageFactory as _\n from Products.CMFPlone.events import SiteManagerCreatedEvent\n from plone.base.interfaces import INonInstallable\n from Products.CMFPlone.Portal import PloneSite\n from Products.GenericSetup.tool import SetupTool\n-from Products.statusmessages.interfaces import IStatusMessage\n from zope.component import queryUtility\n from zope.component.hooks import setSite\n from zope.event import notify\n@@ -143,49 +140,41 @@\n notify(SiteManagerCreatedEvent(site))\n setSite(site)\n \n- setup_tool.setBaselineContext('profile-%s' % profile_id)\n- setup_tool.runAllImportStepsFromProfile('profile-%s' % profile_id)\n-\n- reg = queryUtility(IRegistry, context=site)\n- reg['plone.portal_timezone'] = portal_timezone\n- reg['plone.available_timezones'] = [portal_timezone]\n- reg['plone.default_language'] = default_language\n- reg['plone.available_languages'] = [default_language]\n-\n- # Install default content types profile if user do not select \"example content\"\n- # during site creation.\n- content_types_profile = content_profile_id if setup_content else _TYPES_PROFILE\n-\n- setup_tool.runAllImportStepsFromProfile(f'profile-{content_types_profile}')\n-\n- props = dict(\n- title=title,\n- description=description,\n- )\n- # Do this before applying extension profiles, so the settings from a\n- # properties.xml file are applied and not overwritten by this\n- site.manage_changeProperties(**props)\n-\n- for extension_id in extension_ids:\n- try:\n- setup_tool.runAllImportStepsFromProfile(\n- 'profile-%s' % extension_id)\n- except Exception as msg:\n- IStatusMessage(request).add(_(\n- 'Could not install ${profile_id}: ${error_msg}! '\n- 'Please try to install it manually using the \"Addons\" '\n- 'controlpanel and report any issues to the '\n- 'addon maintainers.',\n- mapping={\n- 'profile_id': extension_id,\n- 'error_msg': msg.args,\n- }),\n- type='error')\n- logger.exception(\n- 'Error while installing addon {}. '\n- 'See traceback below for details.'.format(extension_id))\n-\n- if snapshot is True:\n- setup_tool.createSnapshot('initial_configuration')\n-\n- return site\n+ try:\n+ setup_tool.setBaselineContext('profile-%s' % profile_id)\n+ setup_tool.runAllImportStepsFromProfile('profile-%s' % profile_id)\n+\n+ reg = queryUtility(IRegistry, context=site)\n+ reg['plone.portal_timezone'] = portal_timezone\n+ reg['plone.available_timezones'] = [portal_timezone]\n+ reg['plone.default_language'] = default_language\n+ reg['plone.available_languages'] = [default_language]\n+\n+ # Install default content types profile if user do not select \"example content\"\n+ # during site creation.\n+ content_types_profile = content_profile_id if setup_content else _TYPES_PROFILE\n+\n+ setup_tool.runAllImportStepsFromProfile(f'profile-{content_types_profile}')\n+\n+ props = dict(\n+ title=title,\n+ description=description,\n+ )\n+ # Do this before applying extension profiles, so the settings from a\n+ # properties.xml file are applied and not overwritten by this\n+ site.manage_changeProperties(**props)\n+\n+ for extension_id in extension_ids:\n+ try:\n+ setup_tool.runAllImportStepsFromProfile(f\"profile-{extension_id}\")\n+ except Exception:\n+ logger.error(f\"Error while installing profile {extension_id}:\")\n+ raise\n+\n+ if snapshot is True:\n+ setup_tool.createSnapshot('initial_configuration')\n+\n+ return site\n+ except Exception:\n+ setSite(None)\n+ raise\n", "issue": "addPloneSite swallows exceptions while importing extension profiles\nIf there's an exception while Products.CMFPlone.factory.addPloneSite is installing an extension profile, it is logged but does not abort execution. This causes problems if addPloneSite is used in a script and something is relying on the exit status of the script to determine whether the site was created successfully (like in https://github.com/plone/plone-backend/blob/5.2.x/skeleton/scripts/create_site.py). It also leads to confusion since it can hide the fact that the site was not fully configured.\r\n\r\nEven worse, the exception is obscured in the log because Zope hits a different exception while trying to render a page showing the original exception. This happens because the not-yet-fully-configured Plone site is still set as the active zope.component site.\n", "before_files": [{"content": "from logging import getLogger\nfrom plone.registry.interfaces import IRegistry\nfrom plone.uuid.handlers import addAttributeUUID\nfrom Products.CMFCore.interfaces import ISiteRoot\nfrom Products.CMFPlone import PloneMessageFactory as _\nfrom Products.CMFPlone.events import SiteManagerCreatedEvent\nfrom plone.base.interfaces import INonInstallable\nfrom Products.CMFPlone.Portal import PloneSite\nfrom Products.GenericSetup.tool import SetupTool\nfrom Products.statusmessages.interfaces import IStatusMessage\nfrom zope.component import queryUtility\nfrom zope.component.hooks import setSite\nfrom zope.event import notify\nfrom zope.interface import implementer\nfrom zope.lifecycleevent import ObjectCreatedEvent\n\n_TOOL_ID = 'portal_setup'\n_DEFAULT_PROFILE = 'Products.CMFPlone:plone'\n_TYPES_PROFILE = 'plone.app.contenttypes:default'\n_CONTENT_PROFILE = 'plone.app.contenttypes:plone-content'\n\n# A little hint for PloneTestCase (pre-Plone 6.0)\n_IMREALLYPLONE5 = True\n\n# Marker hints for code that needs to know the major Plone version\n# Works the same way than zcml condition hints so it contains the current and the\n# last ones\nPLONE52MARKER = True\nPLONE60MARKER = True\n\nlogger = getLogger('Plone')\n\n\n@implementer(INonInstallable)\nclass NonInstallable:\n\n def getNonInstallableProducts(self):\n return [\n 'CMFDefault', 'Products.CMFDefault',\n 'CMFPlone', 'Products.CMFPlone', 'Products.CMFPlone.migrations',\n 'CMFTopic', 'Products.CMFTopic',\n 'CMFUid', 'Products.CMFUid',\n 'DCWorkflow', 'Products.DCWorkflow',\n 'PasswordResetTool', 'Products.PasswordResetTool',\n 'PlonePAS', 'Products.PlonePAS',\n 'PloneLanguageTool', 'Products.PloneLanguageTool',\n 'MimetypesRegistry', 'Products.MimetypesRegistry',\n 'PortalTransforms', 'Products.PortalTransforms',\n 'CMFDiffTool', 'Products.CMFDiffTool',\n 'CMFEditions', 'Products.CMFEditions',\n 'Products.NuPlone',\n 'borg.localrole',\n 'plone.app.caching',\n 'plone.app.dexterity',\n 'plone.app.discussion',\n 'plone.app.event',\n 'plone.app.intid',\n 'plone.app.linkintegrity',\n 'plone.app.querystring',\n 'plone.app.registry',\n 'plone.app.referenceablebehavior',\n 'plone.app.relationfield',\n 'plone.app.theming',\n 'plone.app.users',\n 'plone.app.widgets',\n 'plone.app.z3cform',\n 'plone.formwidget.recurrence',\n 'plone.keyring',\n 'plone.outputfilters',\n 'plone.portlet.static',\n 'plone.portlet.collection',\n 'plone.protect',\n 'plone.resource',\n 'plonetheme.barceloneta',\n ]\n\n def getNonInstallableProfiles(self):\n return [_DEFAULT_PROFILE,\n _CONTENT_PROFILE,\n 'Products.CMFDiffTool:CMFDiffTool',\n 'Products.CMFEditions:CMFEditions',\n 'Products.CMFPlone:dependencies',\n 'Products.CMFPlone:testfixture',\n 'Products.NuPlone:uninstall',\n 'Products.MimetypesRegistry:MimetypesRegistry',\n 'Products.PasswordResetTool:PasswordResetTool',\n 'Products.PortalTransforms:PortalTransforms',\n 'Products.PloneLanguageTool:PloneLanguageTool',\n 'Products.PlonePAS:PlonePAS',\n 'borg.localrole:default',\n 'plone.browserlayer:default',\n 'plone.keyring:default',\n 'plone.outputfilters:default',\n 'plone.portlet.static:default',\n 'plone.portlet.collection:default',\n 'plone.protect:default',\n 'plone.app.contenttypes:default',\n 'plone.app.dexterity:default',\n 'plone.app.discussion:default',\n 'plone.app.event:default',\n 'plone.app.linkintegrity:default',\n 'plone.app.registry:default',\n 'plone.app.relationfield:default',\n 'plone.app.theming:default',\n 'plone.app.users:default',\n 'plone.app.versioningbehavior:default',\n 'plone.app.z3cform:default',\n 'plone.formwidget.recurrence:default',\n 'plone.resource:default',\n ]\n\n\ndef zmi_constructor(context):\n \"\"\"This is a dummy constructor for the ZMI.\"\"\"\n url = context.DestinationURL()\n request = context.REQUEST\n return request.response.redirect(url + '/@@plone-addsite?site_id=Plone')\n\n\ndef addPloneSite(context, site_id, title='Plone site', description='',\n profile_id=_DEFAULT_PROFILE,\n content_profile_id=_CONTENT_PROFILE, snapshot=False,\n extension_ids=(), setup_content=True,\n default_language='en', portal_timezone='UTC'):\n \"\"\"Add a PloneSite to the context.\"\"\"\n\n site = PloneSite(site_id)\n notify(ObjectCreatedEvent(site))\n context[site_id] = site\n\n site = context[site_id]\n site.setLanguage(default_language)\n # Set the accepted language for the rest of the request. This makes sure\n # the front-page text gets the correct translation also when your browser\n # prefers non-English and you choose English as language for the Plone\n # Site.\n request = context.REQUEST\n request['HTTP_ACCEPT_LANGUAGE'] = default_language\n\n site[_TOOL_ID] = SetupTool(_TOOL_ID)\n setup_tool = site[_TOOL_ID]\n\n notify(SiteManagerCreatedEvent(site))\n setSite(site)\n\n setup_tool.setBaselineContext('profile-%s' % profile_id)\n setup_tool.runAllImportStepsFromProfile('profile-%s' % profile_id)\n\n reg = queryUtility(IRegistry, context=site)\n reg['plone.portal_timezone'] = portal_timezone\n reg['plone.available_timezones'] = [portal_timezone]\n reg['plone.default_language'] = default_language\n reg['plone.available_languages'] = [default_language]\n\n # Install default content types profile if user do not select \"example content\"\n # during site creation.\n content_types_profile = content_profile_id if setup_content else _TYPES_PROFILE\n\n setup_tool.runAllImportStepsFromProfile(f'profile-{content_types_profile}')\n\n props = dict(\n title=title,\n description=description,\n )\n # Do this before applying extension profiles, so the settings from a\n # properties.xml file are applied and not overwritten by this\n site.manage_changeProperties(**props)\n\n for extension_id in extension_ids:\n try:\n setup_tool.runAllImportStepsFromProfile(\n 'profile-%s' % extension_id)\n except Exception as msg:\n IStatusMessage(request).add(_(\n 'Could not install ${profile_id}: ${error_msg}! '\n 'Please try to install it manually using the \"Addons\" '\n 'controlpanel and report any issues to the '\n 'addon maintainers.',\n mapping={\n 'profile_id': extension_id,\n 'error_msg': msg.args,\n }),\n type='error')\n logger.exception(\n 'Error while installing addon {}. '\n 'See traceback below for details.'.format(extension_id))\n\n if snapshot is True:\n setup_tool.createSnapshot('initial_configuration')\n\n return site\n", "path": "Products/CMFPlone/factory.py"}], "after_files": [{"content": "from logging import getLogger\nfrom plone.registry.interfaces import IRegistry\nfrom Products.CMFPlone import PloneMessageFactory as _\nfrom Products.CMFPlone.events import SiteManagerCreatedEvent\nfrom plone.base.interfaces import INonInstallable\nfrom Products.CMFPlone.Portal import PloneSite\nfrom Products.GenericSetup.tool import SetupTool\nfrom zope.component import queryUtility\nfrom zope.component.hooks import setSite\nfrom zope.event import notify\nfrom zope.interface import implementer\nfrom zope.lifecycleevent import ObjectCreatedEvent\n\n_TOOL_ID = 'portal_setup'\n_DEFAULT_PROFILE = 'Products.CMFPlone:plone'\n_TYPES_PROFILE = 'plone.app.contenttypes:default'\n_CONTENT_PROFILE = 'plone.app.contenttypes:plone-content'\n\n# A little hint for PloneTestCase (pre-Plone 6.0)\n_IMREALLYPLONE5 = True\n\n# Marker hints for code that needs to know the major Plone version\n# Works the same way than zcml condition hints so it contains the current and the\n# last ones\nPLONE52MARKER = True\nPLONE60MARKER = True\n\nlogger = getLogger('Plone')\n\n\n@implementer(INonInstallable)\nclass NonInstallable:\n\n def getNonInstallableProducts(self):\n return [\n 'CMFDefault', 'Products.CMFDefault',\n 'CMFPlone', 'Products.CMFPlone', 'Products.CMFPlone.migrations',\n 'CMFTopic', 'Products.CMFTopic',\n 'CMFUid', 'Products.CMFUid',\n 'DCWorkflow', 'Products.DCWorkflow',\n 'PasswordResetTool', 'Products.PasswordResetTool',\n 'PlonePAS', 'Products.PlonePAS',\n 'PloneLanguageTool', 'Products.PloneLanguageTool',\n 'MimetypesRegistry', 'Products.MimetypesRegistry',\n 'PortalTransforms', 'Products.PortalTransforms',\n 'CMFDiffTool', 'Products.CMFDiffTool',\n 'CMFEditions', 'Products.CMFEditions',\n 'Products.NuPlone',\n 'borg.localrole',\n 'plone.app.caching',\n 'plone.app.dexterity',\n 'plone.app.discussion',\n 'plone.app.event',\n 'plone.app.intid',\n 'plone.app.linkintegrity',\n 'plone.app.querystring',\n 'plone.app.registry',\n 'plone.app.referenceablebehavior',\n 'plone.app.relationfield',\n 'plone.app.theming',\n 'plone.app.users',\n 'plone.app.widgets',\n 'plone.app.z3cform',\n 'plone.formwidget.recurrence',\n 'plone.keyring',\n 'plone.outputfilters',\n 'plone.portlet.static',\n 'plone.portlet.collection',\n 'plone.protect',\n 'plone.resource',\n 'plonetheme.barceloneta',\n ]\n\n def getNonInstallableProfiles(self):\n return [_DEFAULT_PROFILE,\n _CONTENT_PROFILE,\n 'Products.CMFDiffTool:CMFDiffTool',\n 'Products.CMFEditions:CMFEditions',\n 'Products.CMFPlone:dependencies',\n 'Products.CMFPlone:testfixture',\n 'Products.NuPlone:uninstall',\n 'Products.MimetypesRegistry:MimetypesRegistry',\n 'Products.PasswordResetTool:PasswordResetTool',\n 'Products.PortalTransforms:PortalTransforms',\n 'Products.PloneLanguageTool:PloneLanguageTool',\n 'Products.PlonePAS:PlonePAS',\n 'borg.localrole:default',\n 'plone.browserlayer:default',\n 'plone.keyring:default',\n 'plone.outputfilters:default',\n 'plone.portlet.static:default',\n 'plone.portlet.collection:default',\n 'plone.protect:default',\n 'plone.app.contenttypes:default',\n 'plone.app.dexterity:default',\n 'plone.app.discussion:default',\n 'plone.app.event:default',\n 'plone.app.linkintegrity:default',\n 'plone.app.registry:default',\n 'plone.app.relationfield:default',\n 'plone.app.theming:default',\n 'plone.app.users:default',\n 'plone.app.versioningbehavior:default',\n 'plone.app.z3cform:default',\n 'plone.formwidget.recurrence:default',\n 'plone.resource:default',\n ]\n\n\ndef zmi_constructor(context):\n \"\"\"This is a dummy constructor for the ZMI.\"\"\"\n url = context.DestinationURL()\n request = context.REQUEST\n return request.response.redirect(url + '/@@plone-addsite?site_id=Plone')\n\n\ndef addPloneSite(context, site_id, title='Plone site', description='',\n profile_id=_DEFAULT_PROFILE,\n content_profile_id=_CONTENT_PROFILE, snapshot=False,\n extension_ids=(), setup_content=True,\n default_language='en', portal_timezone='UTC'):\n \"\"\"Add a PloneSite to the context.\"\"\"\n\n site = PloneSite(site_id)\n notify(ObjectCreatedEvent(site))\n context[site_id] = site\n\n site = context[site_id]\n site.setLanguage(default_language)\n # Set the accepted language for the rest of the request. This makes sure\n # the front-page text gets the correct translation also when your browser\n # prefers non-English and you choose English as language for the Plone\n # Site.\n request = context.REQUEST\n request['HTTP_ACCEPT_LANGUAGE'] = default_language\n\n site[_TOOL_ID] = SetupTool(_TOOL_ID)\n setup_tool = site[_TOOL_ID]\n\n notify(SiteManagerCreatedEvent(site))\n setSite(site)\n\n try:\n setup_tool.setBaselineContext('profile-%s' % profile_id)\n setup_tool.runAllImportStepsFromProfile('profile-%s' % profile_id)\n\n reg = queryUtility(IRegistry, context=site)\n reg['plone.portal_timezone'] = portal_timezone\n reg['plone.available_timezones'] = [portal_timezone]\n reg['plone.default_language'] = default_language\n reg['plone.available_languages'] = [default_language]\n\n # Install default content types profile if user do not select \"example content\"\n # during site creation.\n content_types_profile = content_profile_id if setup_content else _TYPES_PROFILE\n\n setup_tool.runAllImportStepsFromProfile(f'profile-{content_types_profile}')\n\n props = dict(\n title=title,\n description=description,\n )\n # Do this before applying extension profiles, so the settings from a\n # properties.xml file are applied and not overwritten by this\n site.manage_changeProperties(**props)\n\n for extension_id in extension_ids:\n try:\n setup_tool.runAllImportStepsFromProfile(f\"profile-{extension_id}\")\n except Exception:\n logger.error(f\"Error while installing profile {extension_id}:\")\n raise\n\n if snapshot is True:\n setup_tool.createSnapshot('initial_configuration')\n\n return site\n except Exception:\n setSite(None)\n raise\n", "path": "Products/CMFPlone/factory.py"}]}
| 2,572 | 957 |
gh_patches_debug_21777
|
rasdani/github-patches
|
git_diff
|
zulip__zulip-19818
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
markdown: Document built-in preprocessor priorities.
As a follow-up to #19783, it would be good to document the priorities assigned to the built-in preprocessors that the Python-Markdown library has. A couple of notes:
- This involves a bit of grunt work, the quickest way to do this is to loop over and print `md_engine.preprocessors._priorities` in `zerver/lib/templates.py`.
- Note that in `templates.py`, there are different cases where different sets of preprocessors are added, so one has to do the additional work to figure out which preprocessors are running in which of those cases and then document all the priorities that are for built-in preprocessors.
- The file to put these priorities in is: `zerver/lib/markdown/preprocessor_priorities..py`.
Thanks!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `zerver/lib/markdown/preprocessor_priorities.py`
Content:
```
1 # Note that in the Markdown preprocessor registry, the highest
2 # numeric value is considered the highest priority, so the dict
3 # below is ordered from highest-to-lowest priority.
4 PREPROCESSOR_PRIORITES = {
5 "generate_parameter_description": 535,
6 "generate_response_description": 531,
7 "generate_api_title": 531,
8 "generate_api_description": 530,
9 "generate_code_example": 525,
10 "generate_return_values": 510,
11 "generate_api_arguments": 505,
12 "include": 500,
13 "help_relative_links": 475,
14 "setting": 450,
15 "fenced_code_block": 25,
16 "tabbed_sections": -500,
17 "nested_code_blocks": -500,
18 "emoticon_translations": -505,
19 }
20
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/zerver/lib/markdown/preprocessor_priorities.py b/zerver/lib/markdown/preprocessor_priorities.py
--- a/zerver/lib/markdown/preprocessor_priorities.py
+++ b/zerver/lib/markdown/preprocessor_priorities.py
@@ -1,6 +1,7 @@
# Note that in the Markdown preprocessor registry, the highest
# numeric value is considered the highest priority, so the dict
# below is ordered from highest-to-lowest priority.
+# Priorities for the built-in preprocessors are commented out.
PREPROCESSOR_PRIORITES = {
"generate_parameter_description": 535,
"generate_response_description": 531,
@@ -10,9 +11,12 @@
"generate_return_values": 510,
"generate_api_arguments": 505,
"include": 500,
+ # "include_wrapper": 500,
"help_relative_links": 475,
"setting": 450,
+ # "normalize_whitespace": 30,
"fenced_code_block": 25,
+ # "html_block": 20,
"tabbed_sections": -500,
"nested_code_blocks": -500,
"emoticon_translations": -505,
|
{"golden_diff": "diff --git a/zerver/lib/markdown/preprocessor_priorities.py b/zerver/lib/markdown/preprocessor_priorities.py\n--- a/zerver/lib/markdown/preprocessor_priorities.py\n+++ b/zerver/lib/markdown/preprocessor_priorities.py\n@@ -1,6 +1,7 @@\n # Note that in the Markdown preprocessor registry, the highest\n # numeric value is considered the highest priority, so the dict\n # below is ordered from highest-to-lowest priority.\n+# Priorities for the built-in preprocessors are commented out.\n PREPROCESSOR_PRIORITES = {\n \"generate_parameter_description\": 535,\n \"generate_response_description\": 531,\n@@ -10,9 +11,12 @@\n \"generate_return_values\": 510,\n \"generate_api_arguments\": 505,\n \"include\": 500,\n+ # \"include_wrapper\": 500,\n \"help_relative_links\": 475,\n \"setting\": 450,\n+ # \"normalize_whitespace\": 30,\n \"fenced_code_block\": 25,\n+ # \"html_block\": 20,\n \"tabbed_sections\": -500,\n \"nested_code_blocks\": -500,\n \"emoticon_translations\": -505,\n", "issue": "markdown: Document built-in preprocessor priorities.\nAs a follow-up to #19783, it would be good to document the priorities assigned to the built-in preprocessors that the Python-Markdown library has. A couple of notes:\r\n- This involves a bit of grunt work, the quickest way to do this is to loop over and print `md_engine.preprocessors._priorities` in `zerver/lib/templates.py`.\r\n- Note that in `templates.py`, there are different cases where different sets of preprocessors are added, so one has to do the additional work to figure out which preprocessors are running in which of those cases and then document all the priorities that are for built-in preprocessors.\r\n- The file to put these priorities in is: `zerver/lib/markdown/preprocessor_priorities..py`.\r\n\r\nThanks!\n", "before_files": [{"content": "# Note that in the Markdown preprocessor registry, the highest\n# numeric value is considered the highest priority, so the dict\n# below is ordered from highest-to-lowest priority.\nPREPROCESSOR_PRIORITES = {\n \"generate_parameter_description\": 535,\n \"generate_response_description\": 531,\n \"generate_api_title\": 531,\n \"generate_api_description\": 530,\n \"generate_code_example\": 525,\n \"generate_return_values\": 510,\n \"generate_api_arguments\": 505,\n \"include\": 500,\n \"help_relative_links\": 475,\n \"setting\": 450,\n \"fenced_code_block\": 25,\n \"tabbed_sections\": -500,\n \"nested_code_blocks\": -500,\n \"emoticon_translations\": -505,\n}\n", "path": "zerver/lib/markdown/preprocessor_priorities.py"}], "after_files": [{"content": "# Note that in the Markdown preprocessor registry, the highest\n# numeric value is considered the highest priority, so the dict\n# below is ordered from highest-to-lowest priority.\n# Priorities for the built-in preprocessors are commented out.\nPREPROCESSOR_PRIORITES = {\n \"generate_parameter_description\": 535,\n \"generate_response_description\": 531,\n \"generate_api_title\": 531,\n \"generate_api_description\": 530,\n \"generate_code_example\": 525,\n \"generate_return_values\": 510,\n \"generate_api_arguments\": 505,\n \"include\": 500,\n # \"include_wrapper\": 500,\n \"help_relative_links\": 475,\n \"setting\": 450,\n # \"normalize_whitespace\": 30,\n \"fenced_code_block\": 25,\n # \"html_block\": 20,\n \"tabbed_sections\": -500,\n \"nested_code_blocks\": -500,\n \"emoticon_translations\": -505,\n}\n", "path": "zerver/lib/markdown/preprocessor_priorities.py"}]}
| 664 | 286 |
gh_patches_debug_33064
|
rasdani/github-patches
|
git_diff
|
Textualize__textual-3825
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add message `Collapsible.Toggled`
What it says on the tin.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/textual/widgets/_collapsible.py`
Content:
```
1 from __future__ import annotations
2
3 from rich.console import RenderableType
4 from rich.text import Text
5
6 from .. import events
7 from ..app import ComposeResult
8 from ..binding import Binding
9 from ..containers import Container
10 from ..css.query import NoMatches
11 from ..message import Message
12 from ..reactive import reactive
13 from ..widget import Widget
14
15 __all__ = ["Collapsible", "CollapsibleTitle"]
16
17
18 class CollapsibleTitle(Widget, can_focus=True):
19 """Title and symbol for the Collapsible."""
20
21 DEFAULT_CSS = """
22 CollapsibleTitle {
23 width: auto;
24 height: auto;
25 padding: 0 1 0 1;
26 }
27
28 CollapsibleTitle:hover {
29 background: $foreground 10%;
30 color: $text;
31 }
32
33 CollapsibleTitle:focus {
34 background: $accent;
35 color: $text;
36 }
37 """
38
39 BINDINGS = [Binding("enter", "toggle", "Toggle collapsible", show=False)]
40 """
41 | Key(s) | Description |
42 | :- | :- |
43 | enter | Toggle the collapsible. |
44 """
45
46 collapsed = reactive(True)
47
48 def __init__(
49 self,
50 *,
51 label: str,
52 collapsed_symbol: str,
53 expanded_symbol: str,
54 collapsed: bool,
55 ) -> None:
56 super().__init__()
57 self.collapsed_symbol = collapsed_symbol
58 self.expanded_symbol = expanded_symbol
59 self.label = label
60 self.collapse = collapsed
61
62 class Toggle(Message):
63 """Request toggle."""
64
65 async def _on_click(self, event: events.Click) -> None:
66 """Inform ancestor we want to toggle."""
67 event.stop()
68 self.post_message(self.Toggle())
69
70 def action_toggle(self) -> None:
71 """Toggle the state of the parent collapsible."""
72 self.post_message(self.Toggle())
73
74 def render(self) -> RenderableType:
75 """Compose right/down arrow and label."""
76 if self.collapsed:
77 return Text(f"{self.collapsed_symbol} {self.label}")
78 else:
79 return Text(f"{self.expanded_symbol} {self.label}")
80
81
82 class Collapsible(Widget):
83 """A collapsible container."""
84
85 collapsed = reactive(True)
86
87 DEFAULT_CSS = """
88 Collapsible {
89 width: 1fr;
90 height: auto;
91 background: $boost;
92 border-top: hkey $background;
93 padding-bottom: 1;
94 padding-left: 1;
95 }
96
97 Collapsible.-collapsed > Contents {
98 display: none;
99 }
100 """
101
102 class Contents(Container):
103 DEFAULT_CSS = """
104 Contents {
105 width: 100%;
106 height: auto;
107 padding: 1 0 0 3;
108 }
109 """
110
111 def __init__(
112 self,
113 *children: Widget,
114 title: str = "Toggle",
115 collapsed: bool = True,
116 collapsed_symbol: str = "▶",
117 expanded_symbol: str = "▼",
118 name: str | None = None,
119 id: str | None = None,
120 classes: str | None = None,
121 disabled: bool = False,
122 ) -> None:
123 """Initialize a Collapsible widget.
124
125 Args:
126 *children: Contents that will be collapsed/expanded.
127 title: Title of the collapsed/expanded contents.
128 collapsed: Default status of the contents.
129 collapsed_symbol: Collapsed symbol before the title.
130 expanded_symbol: Expanded symbol before the title.
131 name: The name of the collapsible.
132 id: The ID of the collapsible in the DOM.
133 classes: The CSS classes of the collapsible.
134 disabled: Whether the collapsible is disabled or not.
135 """
136 self._title = CollapsibleTitle(
137 label=title,
138 collapsed_symbol=collapsed_symbol,
139 expanded_symbol=expanded_symbol,
140 collapsed=collapsed,
141 )
142 self._contents_list: list[Widget] = list(children)
143 super().__init__(name=name, id=id, classes=classes, disabled=disabled)
144 self.collapsed = collapsed
145
146 def on_collapsible_title_toggle(self, event: CollapsibleTitle.Toggle) -> None:
147 event.stop()
148 self.collapsed = not self.collapsed
149
150 def _watch_collapsed(self, collapsed: bool) -> None:
151 """Update collapsed state when reactive is changed."""
152 self._update_collapsed(collapsed)
153
154 def _update_collapsed(self, collapsed: bool) -> None:
155 """Update children to match collapsed state."""
156 try:
157 self._title.collapsed = collapsed
158 self.set_class(collapsed, "-collapsed")
159 except NoMatches:
160 pass
161
162 def _on_mount(self) -> None:
163 """Initialise collapsed state."""
164 self._update_collapsed(self.collapsed)
165
166 def compose(self) -> ComposeResult:
167 yield self._title
168 yield self.Contents(*self._contents_list)
169
170 def compose_add_child(self, widget: Widget) -> None:
171 """When using the context manager compose syntax, we want to attach nodes to the contents.
172
173 Args:
174 widget: A Widget to add.
175 """
176 self._contents_list.append(widget)
177
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/textual/widgets/_collapsible.py b/src/textual/widgets/_collapsible.py
--- a/src/textual/widgets/_collapsible.py
+++ b/src/textual/widgets/_collapsible.py
@@ -99,6 +99,42 @@
}
"""
+ class Toggled(Message):
+ """Parent class subclassed by `Collapsible` messages.
+
+ Can be handled with `on(Collapsible.Toggled)` if you want to handle expansions
+ and collapsed in the same way, or you can handle the specific events individually.
+ """
+
+ def __init__(self, collapsible: Collapsible) -> None:
+ """Create an instance of the message.
+
+ Args:
+ collapsible: The `Collapsible` widget that was toggled.
+ """
+ self.collapsible: Collapsible = collapsible
+ """The collapsible that was toggled."""
+ super().__init__()
+
+ @property
+ def control(self) -> Collapsible:
+ """An alias for [Toggled.collapsible][textual.widgets.Collapsible.Toggled.collapsible]."""
+ return self.collapsible
+
+ class Expanded(Toggled):
+ """Event sent when the `Collapsible` widget is expanded.
+
+ Can be handled using `on_collapsible_expanded` in a subclass of
+ [`Collapsible`][textual.widgets.Collapsible] or in a parent widget in the DOM.
+ """
+
+ class Collapsed(Toggled):
+ """Event sent when the `Collapsible` widget is collapsed.
+
+ Can be handled using `on_collapsible_collapsed` in a subclass of
+ [`Collapsible`][textual.widgets.Collapsible] or in a parent widget in the DOM.
+ """
+
class Contents(Container):
DEFAULT_CSS = """
Contents {
@@ -143,9 +179,13 @@
super().__init__(name=name, id=id, classes=classes, disabled=disabled)
self.collapsed = collapsed
- def on_collapsible_title_toggle(self, event: CollapsibleTitle.Toggle) -> None:
+ def _on_collapsible_title_toggle(self, event: CollapsibleTitle.Toggle) -> None:
event.stop()
self.collapsed = not self.collapsed
+ if self.collapsed:
+ self.post_message(self.Collapsed(self))
+ else:
+ self.post_message(self.Expanded(self))
def _watch_collapsed(self, collapsed: bool) -> None:
"""Update collapsed state when reactive is changed."""
|
{"golden_diff": "diff --git a/src/textual/widgets/_collapsible.py b/src/textual/widgets/_collapsible.py\n--- a/src/textual/widgets/_collapsible.py\n+++ b/src/textual/widgets/_collapsible.py\n@@ -99,6 +99,42 @@\n }\n \"\"\"\n \n+ class Toggled(Message):\n+ \"\"\"Parent class subclassed by `Collapsible` messages.\n+\n+ Can be handled with `on(Collapsible.Toggled)` if you want to handle expansions\n+ and collapsed in the same way, or you can handle the specific events individually.\n+ \"\"\"\n+\n+ def __init__(self, collapsible: Collapsible) -> None:\n+ \"\"\"Create an instance of the message.\n+\n+ Args:\n+ collapsible: The `Collapsible` widget that was toggled.\n+ \"\"\"\n+ self.collapsible: Collapsible = collapsible\n+ \"\"\"The collapsible that was toggled.\"\"\"\n+ super().__init__()\n+\n+ @property\n+ def control(self) -> Collapsible:\n+ \"\"\"An alias for [Toggled.collapsible][textual.widgets.Collapsible.Toggled.collapsible].\"\"\"\n+ return self.collapsible\n+\n+ class Expanded(Toggled):\n+ \"\"\"Event sent when the `Collapsible` widget is expanded.\n+\n+ Can be handled using `on_collapsible_expanded` in a subclass of\n+ [`Collapsible`][textual.widgets.Collapsible] or in a parent widget in the DOM.\n+ \"\"\"\n+\n+ class Collapsed(Toggled):\n+ \"\"\"Event sent when the `Collapsible` widget is collapsed.\n+\n+ Can be handled using `on_collapsible_collapsed` in a subclass of\n+ [`Collapsible`][textual.widgets.Collapsible] or in a parent widget in the DOM.\n+ \"\"\"\n+\n class Contents(Container):\n DEFAULT_CSS = \"\"\"\n Contents {\n@@ -143,9 +179,13 @@\n super().__init__(name=name, id=id, classes=classes, disabled=disabled)\n self.collapsed = collapsed\n \n- def on_collapsible_title_toggle(self, event: CollapsibleTitle.Toggle) -> None:\n+ def _on_collapsible_title_toggle(self, event: CollapsibleTitle.Toggle) -> None:\n event.stop()\n self.collapsed = not self.collapsed\n+ if self.collapsed:\n+ self.post_message(self.Collapsed(self))\n+ else:\n+ self.post_message(self.Expanded(self))\n \n def _watch_collapsed(self, collapsed: bool) -> None:\n \"\"\"Update collapsed state when reactive is changed.\"\"\"\n", "issue": "Add message `Collapsible.Toggled`\nWhat it says on the tin.\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom rich.console import RenderableType\nfrom rich.text import Text\n\nfrom .. import events\nfrom ..app import ComposeResult\nfrom ..binding import Binding\nfrom ..containers import Container\nfrom ..css.query import NoMatches\nfrom ..message import Message\nfrom ..reactive import reactive\nfrom ..widget import Widget\n\n__all__ = [\"Collapsible\", \"CollapsibleTitle\"]\n\n\nclass CollapsibleTitle(Widget, can_focus=True):\n \"\"\"Title and symbol for the Collapsible.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n CollapsibleTitle {\n width: auto;\n height: auto;\n padding: 0 1 0 1;\n }\n\n CollapsibleTitle:hover {\n background: $foreground 10%;\n color: $text;\n }\n\n CollapsibleTitle:focus {\n background: $accent;\n color: $text;\n }\n \"\"\"\n\n BINDINGS = [Binding(\"enter\", \"toggle\", \"Toggle collapsible\", show=False)]\n \"\"\"\n | Key(s) | Description |\n | :- | :- |\n | enter | Toggle the collapsible. |\n \"\"\"\n\n collapsed = reactive(True)\n\n def __init__(\n self,\n *,\n label: str,\n collapsed_symbol: str,\n expanded_symbol: str,\n collapsed: bool,\n ) -> None:\n super().__init__()\n self.collapsed_symbol = collapsed_symbol\n self.expanded_symbol = expanded_symbol\n self.label = label\n self.collapse = collapsed\n\n class Toggle(Message):\n \"\"\"Request toggle.\"\"\"\n\n async def _on_click(self, event: events.Click) -> None:\n \"\"\"Inform ancestor we want to toggle.\"\"\"\n event.stop()\n self.post_message(self.Toggle())\n\n def action_toggle(self) -> None:\n \"\"\"Toggle the state of the parent collapsible.\"\"\"\n self.post_message(self.Toggle())\n\n def render(self) -> RenderableType:\n \"\"\"Compose right/down arrow and label.\"\"\"\n if self.collapsed:\n return Text(f\"{self.collapsed_symbol} {self.label}\")\n else:\n return Text(f\"{self.expanded_symbol} {self.label}\")\n\n\nclass Collapsible(Widget):\n \"\"\"A collapsible container.\"\"\"\n\n collapsed = reactive(True)\n\n DEFAULT_CSS = \"\"\"\n Collapsible {\n width: 1fr;\n height: auto;\n background: $boost;\n border-top: hkey $background;\n padding-bottom: 1;\n padding-left: 1;\n }\n\n Collapsible.-collapsed > Contents {\n display: none;\n }\n \"\"\"\n\n class Contents(Container):\n DEFAULT_CSS = \"\"\"\n Contents {\n width: 100%;\n height: auto;\n padding: 1 0 0 3;\n }\n \"\"\"\n\n def __init__(\n self,\n *children: Widget,\n title: str = \"Toggle\",\n collapsed: bool = True,\n collapsed_symbol: str = \"\u25b6\",\n expanded_symbol: str = \"\u25bc\",\n name: str | None = None,\n id: str | None = None,\n classes: str | None = None,\n disabled: bool = False,\n ) -> None:\n \"\"\"Initialize a Collapsible widget.\n\n Args:\n *children: Contents that will be collapsed/expanded.\n title: Title of the collapsed/expanded contents.\n collapsed: Default status of the contents.\n collapsed_symbol: Collapsed symbol before the title.\n expanded_symbol: Expanded symbol before the title.\n name: The name of the collapsible.\n id: The ID of the collapsible in the DOM.\n classes: The CSS classes of the collapsible.\n disabled: Whether the collapsible is disabled or not.\n \"\"\"\n self._title = CollapsibleTitle(\n label=title,\n collapsed_symbol=collapsed_symbol,\n expanded_symbol=expanded_symbol,\n collapsed=collapsed,\n )\n self._contents_list: list[Widget] = list(children)\n super().__init__(name=name, id=id, classes=classes, disabled=disabled)\n self.collapsed = collapsed\n\n def on_collapsible_title_toggle(self, event: CollapsibleTitle.Toggle) -> None:\n event.stop()\n self.collapsed = not self.collapsed\n\n def _watch_collapsed(self, collapsed: bool) -> None:\n \"\"\"Update collapsed state when reactive is changed.\"\"\"\n self._update_collapsed(collapsed)\n\n def _update_collapsed(self, collapsed: bool) -> None:\n \"\"\"Update children to match collapsed state.\"\"\"\n try:\n self._title.collapsed = collapsed\n self.set_class(collapsed, \"-collapsed\")\n except NoMatches:\n pass\n\n def _on_mount(self) -> None:\n \"\"\"Initialise collapsed state.\"\"\"\n self._update_collapsed(self.collapsed)\n\n def compose(self) -> ComposeResult:\n yield self._title\n yield self.Contents(*self._contents_list)\n\n def compose_add_child(self, widget: Widget) -> None:\n \"\"\"When using the context manager compose syntax, we want to attach nodes to the contents.\n\n Args:\n widget: A Widget to add.\n \"\"\"\n self._contents_list.append(widget)\n", "path": "src/textual/widgets/_collapsible.py"}], "after_files": [{"content": "from __future__ import annotations\n\nfrom rich.console import RenderableType\nfrom rich.text import Text\n\nfrom .. import events\nfrom ..app import ComposeResult\nfrom ..binding import Binding\nfrom ..containers import Container\nfrom ..css.query import NoMatches\nfrom ..message import Message\nfrom ..reactive import reactive\nfrom ..widget import Widget\n\n__all__ = [\"Collapsible\", \"CollapsibleTitle\"]\n\n\nclass CollapsibleTitle(Widget, can_focus=True):\n \"\"\"Title and symbol for the Collapsible.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n CollapsibleTitle {\n width: auto;\n height: auto;\n padding: 0 1 0 1;\n }\n\n CollapsibleTitle:hover {\n background: $foreground 10%;\n color: $text;\n }\n\n CollapsibleTitle:focus {\n background: $accent;\n color: $text;\n }\n \"\"\"\n\n BINDINGS = [Binding(\"enter\", \"toggle\", \"Toggle collapsible\", show=False)]\n \"\"\"\n | Key(s) | Description |\n | :- | :- |\n | enter | Toggle the collapsible. |\n \"\"\"\n\n collapsed = reactive(True)\n\n def __init__(\n self,\n *,\n label: str,\n collapsed_symbol: str,\n expanded_symbol: str,\n collapsed: bool,\n ) -> None:\n super().__init__()\n self.collapsed_symbol = collapsed_symbol\n self.expanded_symbol = expanded_symbol\n self.label = label\n self.collapse = collapsed\n\n class Toggle(Message):\n \"\"\"Request toggle.\"\"\"\n\n async def _on_click(self, event: events.Click) -> None:\n \"\"\"Inform ancestor we want to toggle.\"\"\"\n event.stop()\n self.post_message(self.Toggle())\n\n def action_toggle(self) -> None:\n \"\"\"Toggle the state of the parent collapsible.\"\"\"\n self.post_message(self.Toggle())\n\n def render(self) -> RenderableType:\n \"\"\"Compose right/down arrow and label.\"\"\"\n if self.collapsed:\n return Text(f\"{self.collapsed_symbol} {self.label}\")\n else:\n return Text(f\"{self.expanded_symbol} {self.label}\")\n\n\nclass Collapsible(Widget):\n \"\"\"A collapsible container.\"\"\"\n\n collapsed = reactive(True)\n\n DEFAULT_CSS = \"\"\"\n Collapsible {\n width: 1fr;\n height: auto;\n background: $boost;\n border-top: hkey $background;\n padding-bottom: 1;\n padding-left: 1;\n }\n\n Collapsible.-collapsed > Contents {\n display: none;\n }\n \"\"\"\n\n class Toggled(Message):\n \"\"\"Parent class subclassed by `Collapsible` messages.\n\n Can be handled with `on(Collapsible.Toggled)` if you want to handle expansions\n and collapsed in the same way, or you can handle the specific events individually.\n \"\"\"\n\n def __init__(self, collapsible: Collapsible) -> None:\n \"\"\"Create an instance of the message.\n\n Args:\n collapsible: The `Collapsible` widget that was toggled.\n \"\"\"\n self.collapsible: Collapsible = collapsible\n \"\"\"The collapsible that was toggled.\"\"\"\n super().__init__()\n\n @property\n def control(self) -> Collapsible:\n \"\"\"An alias for [Toggled.collapsible][textual.widgets.Collapsible.Toggled.collapsible].\"\"\"\n return self.collapsible\n\n class Expanded(Toggled):\n \"\"\"Event sent when the `Collapsible` widget is expanded.\n\n Can be handled using `on_collapsible_expanded` in a subclass of\n [`Collapsible`][textual.widgets.Collapsible] or in a parent widget in the DOM.\n \"\"\"\n\n class Collapsed(Toggled):\n \"\"\"Event sent when the `Collapsible` widget is collapsed.\n\n Can be handled using `on_collapsible_collapsed` in a subclass of\n [`Collapsible`][textual.widgets.Collapsible] or in a parent widget in the DOM.\n \"\"\"\n\n class Contents(Container):\n DEFAULT_CSS = \"\"\"\n Contents {\n width: 100%;\n height: auto;\n padding: 1 0 0 3;\n }\n \"\"\"\n\n def __init__(\n self,\n *children: Widget,\n title: str = \"Toggle\",\n collapsed: bool = True,\n collapsed_symbol: str = \"\u25b6\",\n expanded_symbol: str = \"\u25bc\",\n name: str | None = None,\n id: str | None = None,\n classes: str | None = None,\n disabled: bool = False,\n ) -> None:\n \"\"\"Initialize a Collapsible widget.\n\n Args:\n *children: Contents that will be collapsed/expanded.\n title: Title of the collapsed/expanded contents.\n collapsed: Default status of the contents.\n collapsed_symbol: Collapsed symbol before the title.\n expanded_symbol: Expanded symbol before the title.\n name: The name of the collapsible.\n id: The ID of the collapsible in the DOM.\n classes: The CSS classes of the collapsible.\n disabled: Whether the collapsible is disabled or not.\n \"\"\"\n self._title = CollapsibleTitle(\n label=title,\n collapsed_symbol=collapsed_symbol,\n expanded_symbol=expanded_symbol,\n collapsed=collapsed,\n )\n self._contents_list: list[Widget] = list(children)\n super().__init__(name=name, id=id, classes=classes, disabled=disabled)\n self.collapsed = collapsed\n\n def _on_collapsible_title_toggle(self, event: CollapsibleTitle.Toggle) -> None:\n event.stop()\n self.collapsed = not self.collapsed\n if self.collapsed:\n self.post_message(self.Collapsed(self))\n else:\n self.post_message(self.Expanded(self))\n\n def _watch_collapsed(self, collapsed: bool) -> None:\n \"\"\"Update collapsed state when reactive is changed.\"\"\"\n self._update_collapsed(collapsed)\n\n def _update_collapsed(self, collapsed: bool) -> None:\n \"\"\"Update children to match collapsed state.\"\"\"\n try:\n self._title.collapsed = collapsed\n self.set_class(collapsed, \"-collapsed\")\n except NoMatches:\n pass\n\n def _on_mount(self) -> None:\n \"\"\"Initialise collapsed state.\"\"\"\n self._update_collapsed(self.collapsed)\n\n def compose(self) -> ComposeResult:\n yield self._title\n yield self.Contents(*self._contents_list)\n\n def compose_add_child(self, widget: Widget) -> None:\n \"\"\"When using the context manager compose syntax, we want to attach nodes to the contents.\n\n Args:\n widget: A Widget to add.\n \"\"\"\n self._contents_list.append(widget)\n", "path": "src/textual/widgets/_collapsible.py"}]}
| 1,803 | 557 |
gh_patches_debug_33956
|
rasdani/github-patches
|
git_diff
|
hylang__hy-2299
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Slow startup when Hy is installed from a wheel
Testing the new release of 0.16.0, I see that startup is much slower when installing from the wheel than from the source distribution or directly from the repository. Likewise for older Hy releases. Even when I make sure the `__pycache__`s are included in the wheel and I can see they're installed. Either there's something wonky with my system, or wheel installation doesn't play nicely with premade byte-compiled files.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2
3 import os
4
5 import fastentrypoints # Monkey-patches setuptools.
6 from get_version import __version__
7 from setuptools import find_packages, setup
8
9 os.chdir(os.path.split(os.path.abspath(__file__))[0])
10
11 PKG = "hy"
12
13 long_description = """Hy is a Python <--> Lisp layer. It helps
14 make things work nicer, and lets Python and the Hy lisp variant play
15 nice together. """
16
17 setup(
18 name=PKG,
19 version=__version__,
20 install_requires=[
21 "funcparserlib ~= 1.0",
22 "colorama",
23 'astor>=0.8 ; python_version < "3.9"',
24 ],
25 python_requires=">= 3.7, < 3.11",
26 entry_points={
27 "console_scripts": [
28 "hy = hy.cmdline:hy_main",
29 "hy3 = hy.cmdline:hy_main",
30 "hyc = hy.cmdline:hyc_main",
31 "hyc3 = hy.cmdline:hyc_main",
32 "hy2py = hy.cmdline:hy2py_main",
33 "hy2py3 = hy.cmdline:hy2py_main",
34 ]
35 },
36 packages=find_packages(exclude=["tests*"]),
37 package_data={
38 "hy": ["*.hy", "__pycache__/*"],
39 "hy.contrib": ["*.hy", "__pycache__/*"],
40 "hy.core": ["*.hy", "__pycache__/*"],
41 "hy.extra": ["*.hy", "__pycache__/*"],
42 },
43 data_files=[("get_version", ["get_version.py"])],
44 author="Paul Tagliamonte",
45 author_email="[email protected]",
46 long_description=long_description,
47 description="Lisp and Python love each other.",
48 license="Expat",
49 url="http://hylang.org/",
50 platforms=["any"],
51 classifiers=[
52 "Development Status :: 4 - Beta",
53 "Intended Audience :: Developers",
54 "License :: DFSG approved",
55 "License :: OSI Approved :: MIT License", # Really "Expat". Ugh.
56 "Operating System :: OS Independent",
57 "Programming Language :: Lisp",
58 "Programming Language :: Python",
59 "Programming Language :: Python :: 3",
60 "Programming Language :: Python :: 3.7",
61 "Programming Language :: Python :: 3.8",
62 "Programming Language :: Python :: 3.9",
63 "Programming Language :: Python :: 3.10",
64 "Topic :: Software Development :: Code Generators",
65 "Topic :: Software Development :: Compilers",
66 "Topic :: Software Development :: Libraries",
67 ],
68 project_urls={
69 "Documentation": "https://docs.hylang.org/",
70 "Source": "https://github.com/hylang/hy",
71 },
72 )
73
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -5,6 +5,7 @@
import fastentrypoints # Monkey-patches setuptools.
from get_version import __version__
from setuptools import find_packages, setup
+from setuptools.command.install import install
os.chdir(os.path.split(os.path.abspath(__file__))[0])
@@ -14,14 +15,34 @@
make things work nicer, and lets Python and the Hy lisp variant play
nice together. """
+
+class install(install):
+ def run(self):
+ super().run()
+ import py_compile
+ from glob import glob
+
+ import hy # for compile hooks
+
+ for path in glob(os.path.join(self.install_lib, "**/*.hy"), recursive=True):
+ py_compile.compile(
+ path, invalidation_mode=py_compile.PycInvalidationMode.CHECKED_HASH
+ )
+
+
+# both setup_requires and install_requires
+# since we need to compile .hy files during setup
+requires = [
+ "funcparserlib ~= 1.0",
+ "colorama",
+ 'astor>=0.8 ; python_version < "3.9"',
+]
+
setup(
name=PKG,
version=__version__,
- install_requires=[
- "funcparserlib ~= 1.0",
- "colorama",
- 'astor>=0.8 ; python_version < "3.9"',
- ],
+ setup_requires=requires,
+ install_requires=requires,
python_requires=">= 3.7, < 3.11",
entry_points={
"console_scripts": [
@@ -35,10 +56,7 @@
},
packages=find_packages(exclude=["tests*"]),
package_data={
- "hy": ["*.hy", "__pycache__/*"],
- "hy.contrib": ["*.hy", "__pycache__/*"],
- "hy.core": ["*.hy", "__pycache__/*"],
- "hy.extra": ["*.hy", "__pycache__/*"],
+ "": ["*.hy"],
},
data_files=[("get_version", ["get_version.py"])],
author="Paul Tagliamonte",
@@ -69,4 +87,7 @@
"Documentation": "https://docs.hylang.org/",
"Source": "https://github.com/hylang/hy",
},
+ cmdclass={
+ "install": install,
+ },
)
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -5,6 +5,7 @@\n import fastentrypoints # Monkey-patches setuptools.\n from get_version import __version__\n from setuptools import find_packages, setup\n+from setuptools.command.install import install\n \n os.chdir(os.path.split(os.path.abspath(__file__))[0])\n \n@@ -14,14 +15,34 @@\n make things work nicer, and lets Python and the Hy lisp variant play\n nice together. \"\"\"\n \n+\n+class install(install):\n+ def run(self):\n+ super().run()\n+ import py_compile\n+ from glob import glob\n+\n+ import hy # for compile hooks\n+\n+ for path in glob(os.path.join(self.install_lib, \"**/*.hy\"), recursive=True):\n+ py_compile.compile(\n+ path, invalidation_mode=py_compile.PycInvalidationMode.CHECKED_HASH\n+ )\n+\n+\n+# both setup_requires and install_requires\n+# since we need to compile .hy files during setup\n+requires = [\n+ \"funcparserlib ~= 1.0\",\n+ \"colorama\",\n+ 'astor>=0.8 ; python_version < \"3.9\"',\n+]\n+\n setup(\n name=PKG,\n version=__version__,\n- install_requires=[\n- \"funcparserlib ~= 1.0\",\n- \"colorama\",\n- 'astor>=0.8 ; python_version < \"3.9\"',\n- ],\n+ setup_requires=requires,\n+ install_requires=requires,\n python_requires=\">= 3.7, < 3.11\",\n entry_points={\n \"console_scripts\": [\n@@ -35,10 +56,7 @@\n },\n packages=find_packages(exclude=[\"tests*\"]),\n package_data={\n- \"hy\": [\"*.hy\", \"__pycache__/*\"],\n- \"hy.contrib\": [\"*.hy\", \"__pycache__/*\"],\n- \"hy.core\": [\"*.hy\", \"__pycache__/*\"],\n- \"hy.extra\": [\"*.hy\", \"__pycache__/*\"],\n+ \"\": [\"*.hy\"],\n },\n data_files=[(\"get_version\", [\"get_version.py\"])],\n author=\"Paul Tagliamonte\",\n@@ -69,4 +87,7 @@\n \"Documentation\": \"https://docs.hylang.org/\",\n \"Source\": \"https://github.com/hylang/hy\",\n },\n+ cmdclass={\n+ \"install\": install,\n+ },\n )\n", "issue": "Slow startup when Hy is installed from a wheel\nTesting the new release of 0.16.0, I see that startup is much slower when installing from the wheel than from the source distribution or directly from the repository. Likewise for older Hy releases. Even when I make sure the `__pycache__`s are included in the wheel and I can see they're installed. Either there's something wonky with my system, or wheel installation doesn't play nicely with premade byte-compiled files.\n", "before_files": [{"content": "#!/usr/bin/env python\n\nimport os\n\nimport fastentrypoints # Monkey-patches setuptools.\nfrom get_version import __version__\nfrom setuptools import find_packages, setup\n\nos.chdir(os.path.split(os.path.abspath(__file__))[0])\n\nPKG = \"hy\"\n\nlong_description = \"\"\"Hy is a Python <--> Lisp layer. It helps\nmake things work nicer, and lets Python and the Hy lisp variant play\nnice together. \"\"\"\n\nsetup(\n name=PKG,\n version=__version__,\n install_requires=[\n \"funcparserlib ~= 1.0\",\n \"colorama\",\n 'astor>=0.8 ; python_version < \"3.9\"',\n ],\n python_requires=\">= 3.7, < 3.11\",\n entry_points={\n \"console_scripts\": [\n \"hy = hy.cmdline:hy_main\",\n \"hy3 = hy.cmdline:hy_main\",\n \"hyc = hy.cmdline:hyc_main\",\n \"hyc3 = hy.cmdline:hyc_main\",\n \"hy2py = hy.cmdline:hy2py_main\",\n \"hy2py3 = hy.cmdline:hy2py_main\",\n ]\n },\n packages=find_packages(exclude=[\"tests*\"]),\n package_data={\n \"hy\": [\"*.hy\", \"__pycache__/*\"],\n \"hy.contrib\": [\"*.hy\", \"__pycache__/*\"],\n \"hy.core\": [\"*.hy\", \"__pycache__/*\"],\n \"hy.extra\": [\"*.hy\", \"__pycache__/*\"],\n },\n data_files=[(\"get_version\", [\"get_version.py\"])],\n author=\"Paul Tagliamonte\",\n author_email=\"[email protected]\",\n long_description=long_description,\n description=\"Lisp and Python love each other.\",\n license=\"Expat\",\n url=\"http://hylang.org/\",\n platforms=[\"any\"],\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"License :: DFSG approved\",\n \"License :: OSI Approved :: MIT License\", # Really \"Expat\". Ugh.\n \"Operating System :: OS Independent\",\n \"Programming Language :: Lisp\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Topic :: Software Development :: Code Generators\",\n \"Topic :: Software Development :: Compilers\",\n \"Topic :: Software Development :: Libraries\",\n ],\n project_urls={\n \"Documentation\": \"https://docs.hylang.org/\",\n \"Source\": \"https://github.com/hylang/hy\",\n },\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\nimport os\n\nimport fastentrypoints # Monkey-patches setuptools.\nfrom get_version import __version__\nfrom setuptools import find_packages, setup\nfrom setuptools.command.install import install\n\nos.chdir(os.path.split(os.path.abspath(__file__))[0])\n\nPKG = \"hy\"\n\nlong_description = \"\"\"Hy is a Python <--> Lisp layer. It helps\nmake things work nicer, and lets Python and the Hy lisp variant play\nnice together. \"\"\"\n\n\nclass install(install):\n def run(self):\n super().run()\n import py_compile\n from glob import glob\n\n import hy # for compile hooks\n\n for path in glob(os.path.join(self.install_lib, \"**/*.hy\"), recursive=True):\n py_compile.compile(\n path, invalidation_mode=py_compile.PycInvalidationMode.CHECKED_HASH\n )\n\n\n# both setup_requires and install_requires\n# since we need to compile .hy files during setup\nrequires = [\n \"funcparserlib ~= 1.0\",\n \"colorama\",\n 'astor>=0.8 ; python_version < \"3.9\"',\n]\n\nsetup(\n name=PKG,\n version=__version__,\n setup_requires=requires,\n install_requires=requires,\n python_requires=\">= 3.7, < 3.11\",\n entry_points={\n \"console_scripts\": [\n \"hy = hy.cmdline:hy_main\",\n \"hy3 = hy.cmdline:hy_main\",\n \"hyc = hy.cmdline:hyc_main\",\n \"hyc3 = hy.cmdline:hyc_main\",\n \"hy2py = hy.cmdline:hy2py_main\",\n \"hy2py3 = hy.cmdline:hy2py_main\",\n ]\n },\n packages=find_packages(exclude=[\"tests*\"]),\n package_data={\n \"\": [\"*.hy\"],\n },\n data_files=[(\"get_version\", [\"get_version.py\"])],\n author=\"Paul Tagliamonte\",\n author_email=\"[email protected]\",\n long_description=long_description,\n description=\"Lisp and Python love each other.\",\n license=\"Expat\",\n url=\"http://hylang.org/\",\n platforms=[\"any\"],\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"License :: DFSG approved\",\n \"License :: OSI Approved :: MIT License\", # Really \"Expat\". Ugh.\n \"Operating System :: OS Independent\",\n \"Programming Language :: Lisp\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Topic :: Software Development :: Code Generators\",\n \"Topic :: Software Development :: Compilers\",\n \"Topic :: Software Development :: Libraries\",\n ],\n project_urls={\n \"Documentation\": \"https://docs.hylang.org/\",\n \"Source\": \"https://github.com/hylang/hy\",\n },\n cmdclass={\n \"install\": install,\n },\n)\n", "path": "setup.py"}]}
| 1,099 | 552 |
gh_patches_debug_33790
|
rasdani/github-patches
|
git_diff
|
yt-dlp__yt-dlp-3386
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Gofile.io] can't download
### Checklist
- [X] I'm reporting a broken site
- [X] I've verified that I'm running yt-dlp version **2022.04.08** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
- [X] I've checked that all provided URLs are alive and playable in a browser
- [X] I've checked that all URLs and arguments with special characters are [properly quoted or escaped](https://github.com/ytdl-org/youtube-dl#video-url-contains-an-ampersand-and-im-getting-some-strange-output-1-2839-or-v-is-not-recognized-as-an-internal-or-external-command)
- [X] I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues including closed ones. DO NOT post duplicates
- [X] I've read the [guidelines for opening an issue](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#opening-an-issue)
- [ ] I've read about [sharing account credentials](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#are-you-willing-to-share-account-details-if-needed) and I'm willing to share it if required
### Region
_No response_
### Description
hello,
i can't download from gofile.io, tried with cookies but fail too.
URL: https://gofile.io/d/cUkVvF
### Verbose log
```shell
C:\Users\Administrator>youtube-dl -UvF "https://gofile.io/d/cUkVvF"
[debug] Command-line config: ['-UvF', 'https://gofile.io/d/cUkVvF']
[debug] Encodings: locale cp1252, fs utf-8, out utf-8, err utf-8, pref cp1252
[debug] yt-dlp version 2022.04.08 [7884ade] (win_exe)
[debug] Python version 3.8.10 (CPython 64bit) - Windows-10-10.0.19042-SP0
[debug] Checking exe version: ffmpeg -bsfs
[debug] Checking exe version: ffprobe -bsfs
[debug] exe versions: ffmpeg 5.0-full_build-www.gyan.dev (setts), ffprobe 5.0-full_build-www.gyan.dev
[debug] Optional libraries: brotli, certifi, Cryptodome, mutagen, sqlite, websockets
[debug] Proxy map: {}
Latest version: 2022.04.08, Current version: 2022.04.08
yt-dlp is up to date (2022.04.08)
[Gofile] Getting a new guest account
[debug] [Gofile] Extracting URL: https://gofile.io/d/cUkVvF
[download] Downloading playlist: cUkVvF
[Gofile] Gofile: Getting filelist
ERROR: Unable to download JSON metadata: HTTP Error 401: Unauthorized (caused by <HTTPError 401: 'Unauthorized'>); please report this issue on https://github.com/yt-dlp/yt-dlp/issues?q= , filling out the appropriate issue template. Confirm you are on the latest version using yt-dlp -U
File "yt_dlp\extractor\common.py", line 767, in _request_webpage
File "yt_dlp\YoutubeDL.py", line 3601, in urlopen
File "urllib\request.py", line 531, in open
File "urllib\request.py", line 640, in http_response
File "urllib\request.py", line 569, in error
File "urllib\request.py", line 502, in _call_chain
File "urllib\request.py", line 649, in http_error_default
urllib.error.HTTPError: HTTP Error 401: Unauthorized
[Gofile] playlist cUkVvF: Downloading 0 videos
[download] Finished downloading playlist: cUkVvF
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `yt_dlp/extractor/gofile.py`
Content:
```
1 # coding: utf-8
2 from .common import InfoExtractor
3 from ..utils import (
4 ExtractorError,
5 try_get
6 )
7
8
9 class GofileIE(InfoExtractor):
10 _VALID_URL = r'https?://(?:www\.)?gofile\.io/d/(?P<id>[^/]+)'
11 _TESTS = [{
12 'url': 'https://gofile.io/d/AMZyDw',
13 'info_dict': {
14 'id': 'AMZyDw',
15 },
16 'playlist_mincount': 2,
17 'playlist': [{
18 'info_dict': {
19 'id': 'de571ac1-5edc-42e2-8ec2-bdac83ad4a31',
20 'filesize': 928116,
21 'ext': 'mp4',
22 'title': 'nuuh'
23 }
24 }]
25 }, { # URL to test mixed file types
26 'url': 'https://gofile.io/d/avt34h',
27 'info_dict': {
28 'id': 'avt34h',
29 },
30 'playlist_mincount': 1,
31 }, { # URL to test no video/audio error
32 'url': 'https://gofile.io/d/aB03lZ',
33 'info_dict': {
34 'id': 'aB03lZ',
35 },
36 'playlist_count': 0,
37 'skip': 'No video/audio found at provided URL.',
38 }]
39 _TOKEN = None
40
41 def _real_initialize(self):
42 token = self._get_cookies('https://gofile.io/').get('accountToken')
43 if token:
44 self._TOKEN = token.value
45 return
46
47 account_data = self._download_json(
48 'https://api.gofile.io/createAccount', None, note='Getting a new guest account')
49 self._TOKEN = account_data['data']['token']
50 self._set_cookie('gofile.io', 'accountToken', self._TOKEN)
51
52 def _entries(self, file_id):
53 files = self._download_json(
54 f'https://api.gofile.io/getContent?contentId={file_id}&token={self._TOKEN}&websiteToken=websiteToken&cache=true',
55 'Gofile', note='Getting filelist')
56
57 status = files['status']
58 if status != 'ok':
59 raise ExtractorError(f'{self.IE_NAME} said: status {status}', expected=True)
60
61 found_files = False
62 for file in (try_get(files, lambda x: x['data']['contents'], dict) or {}).values():
63 file_type, file_format = file.get('mimetype').split('/', 1)
64 if file_type not in ('video', 'audio') and file_format != 'vnd.mts':
65 continue
66
67 found_files = True
68 file_url = file.get('directLink')
69 if file_url:
70 yield {
71 'id': file['id'],
72 'title': file['name'].rsplit('.', 1)[0],
73 'url': file_url,
74 'filesize': file.get('size'),
75 'release_timestamp': file.get('createTime')
76 }
77
78 if not found_files:
79 raise ExtractorError('No video/audio found at provided URL.', expected=True)
80
81 def _real_extract(self, url):
82 file_id = self._match_id(url)
83 return self.playlist_result(self._entries(file_id), playlist_id=file_id)
84
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/yt_dlp/extractor/gofile.py b/yt_dlp/extractor/gofile.py
--- a/yt_dlp/extractor/gofile.py
+++ b/yt_dlp/extractor/gofile.py
@@ -19,22 +19,25 @@
'id': 'de571ac1-5edc-42e2-8ec2-bdac83ad4a31',
'filesize': 928116,
'ext': 'mp4',
- 'title': 'nuuh'
+ 'title': 'nuuh',
+ 'release_timestamp': 1638338704,
+ 'release_date': '20211201',
}
}]
- }, { # URL to test mixed file types
- 'url': 'https://gofile.io/d/avt34h',
+ }, {
+ 'url': 'https://gofile.io/d/is8lKr',
'info_dict': {
- 'id': 'avt34h',
- },
- 'playlist_mincount': 1,
- }, { # URL to test no video/audio error
- 'url': 'https://gofile.io/d/aB03lZ',
- 'info_dict': {
- 'id': 'aB03lZ',
+ 'id': 'TMjXd9',
+ 'ext': 'mp4',
},
'playlist_count': 0,
'skip': 'No video/audio found at provided URL.',
+ }, {
+ 'url': 'https://gofile.io/d/TMjXd9',
+ 'info_dict': {
+ 'id': 'TMjXd9',
+ },
+ 'playlist_count': 1,
}]
_TOKEN = None
@@ -50,9 +53,11 @@
self._set_cookie('gofile.io', 'accountToken', self._TOKEN)
def _entries(self, file_id):
- files = self._download_json(
- f'https://api.gofile.io/getContent?contentId={file_id}&token={self._TOKEN}&websiteToken=websiteToken&cache=true',
- 'Gofile', note='Getting filelist')
+ files = self._download_json('https://api.gofile.io/getContent', 'Gofile', note='Getting filelist', query={
+ 'contentId': file_id,
+ 'token': self._TOKEN,
+ 'websiteToken': 12345,
+ })
status = files['status']
if status != 'ok':
@@ -65,7 +70,7 @@
continue
found_files = True
- file_url = file.get('directLink')
+ file_url = file.get('link')
if file_url:
yield {
'id': file['id'],
|
{"golden_diff": "diff --git a/yt_dlp/extractor/gofile.py b/yt_dlp/extractor/gofile.py\n--- a/yt_dlp/extractor/gofile.py\n+++ b/yt_dlp/extractor/gofile.py\n@@ -19,22 +19,25 @@\n 'id': 'de571ac1-5edc-42e2-8ec2-bdac83ad4a31',\n 'filesize': 928116,\n 'ext': 'mp4',\n- 'title': 'nuuh'\n+ 'title': 'nuuh',\n+ 'release_timestamp': 1638338704,\n+ 'release_date': '20211201',\n }\n }]\n- }, { # URL to test mixed file types\n- 'url': 'https://gofile.io/d/avt34h',\n+ }, {\n+ 'url': 'https://gofile.io/d/is8lKr',\n 'info_dict': {\n- 'id': 'avt34h',\n- },\n- 'playlist_mincount': 1,\n- }, { # URL to test no video/audio error\n- 'url': 'https://gofile.io/d/aB03lZ',\n- 'info_dict': {\n- 'id': 'aB03lZ',\n+ 'id': 'TMjXd9',\n+ 'ext': 'mp4',\n },\n 'playlist_count': 0,\n 'skip': 'No video/audio found at provided URL.',\n+ }, {\n+ 'url': 'https://gofile.io/d/TMjXd9',\n+ 'info_dict': {\n+ 'id': 'TMjXd9',\n+ },\n+ 'playlist_count': 1,\n }]\n _TOKEN = None\n \n@@ -50,9 +53,11 @@\n self._set_cookie('gofile.io', 'accountToken', self._TOKEN)\n \n def _entries(self, file_id):\n- files = self._download_json(\n- f'https://api.gofile.io/getContent?contentId={file_id}&token={self._TOKEN}&websiteToken=websiteToken&cache=true',\n- 'Gofile', note='Getting filelist')\n+ files = self._download_json('https://api.gofile.io/getContent', 'Gofile', note='Getting filelist', query={\n+ 'contentId': file_id,\n+ 'token': self._TOKEN,\n+ 'websiteToken': 12345,\n+ })\n \n status = files['status']\n if status != 'ok':\n@@ -65,7 +70,7 @@\n continue\n \n found_files = True\n- file_url = file.get('directLink')\n+ file_url = file.get('link')\n if file_url:\n yield {\n 'id': file['id'],\n", "issue": "[Gofile.io] can't download\n### Checklist\n\n- [X] I'm reporting a broken site\n- [X] I've verified that I'm running yt-dlp version **2022.04.08** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)\n- [X] I've checked that all provided URLs are alive and playable in a browser\n- [X] I've checked that all URLs and arguments with special characters are [properly quoted or escaped](https://github.com/ytdl-org/youtube-dl#video-url-contains-an-ampersand-and-im-getting-some-strange-output-1-2839-or-v-is-not-recognized-as-an-internal-or-external-command)\n- [X] I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues including closed ones. DO NOT post duplicates\n- [X] I've read the [guidelines for opening an issue](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#opening-an-issue)\n- [ ] I've read about [sharing account credentials](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#are-you-willing-to-share-account-details-if-needed) and I'm willing to share it if required\n\n### Region\n\n_No response_\n\n### Description\n\nhello,\r\n\r\ni can't download from gofile.io, tried with cookies but fail too.\r\n\r\nURL: https://gofile.io/d/cUkVvF\n\n### Verbose log\n\n```shell\nC:\\Users\\Administrator>youtube-dl -UvF \"https://gofile.io/d/cUkVvF\"\r\n[debug] Command-line config: ['-UvF', 'https://gofile.io/d/cUkVvF']\r\n[debug] Encodings: locale cp1252, fs utf-8, out utf-8, err utf-8, pref cp1252\r\n[debug] yt-dlp version 2022.04.08 [7884ade] (win_exe)\r\n[debug] Python version 3.8.10 (CPython 64bit) - Windows-10-10.0.19042-SP0\r\n[debug] Checking exe version: ffmpeg -bsfs\r\n[debug] Checking exe version: ffprobe -bsfs\r\n[debug] exe versions: ffmpeg 5.0-full_build-www.gyan.dev (setts), ffprobe 5.0-full_build-www.gyan.dev\r\n[debug] Optional libraries: brotli, certifi, Cryptodome, mutagen, sqlite, websockets\r\n[debug] Proxy map: {}\r\nLatest version: 2022.04.08, Current version: 2022.04.08\r\nyt-dlp is up to date (2022.04.08)\r\n[Gofile] Getting a new guest account\r\n[debug] [Gofile] Extracting URL: https://gofile.io/d/cUkVvF\r\n[download] Downloading playlist: cUkVvF\r\n[Gofile] Gofile: Getting filelist\r\nERROR: Unable to download JSON metadata: HTTP Error 401: Unauthorized (caused by <HTTPError 401: 'Unauthorized'>); please report this issue on https://github.com/yt-dlp/yt-dlp/issues?q= , filling out the appropriate issue template. Confirm you are on the latest version using yt-dlp -U\r\n File \"yt_dlp\\extractor\\common.py\", line 767, in _request_webpage\r\n File \"yt_dlp\\YoutubeDL.py\", line 3601, in urlopen\r\n File \"urllib\\request.py\", line 531, in open\r\n File \"urllib\\request.py\", line 640, in http_response\r\n File \"urllib\\request.py\", line 569, in error\r\n File \"urllib\\request.py\", line 502, in _call_chain\r\n File \"urllib\\request.py\", line 649, in http_error_default\r\nurllib.error.HTTPError: HTTP Error 401: Unauthorized\r\n\r\n[Gofile] playlist cUkVvF: Downloading 0 videos\r\n[download] Finished downloading playlist: cUkVvF\n```\n\n", "before_files": [{"content": "# coding: utf-8\nfrom .common import InfoExtractor\nfrom ..utils import (\n ExtractorError,\n try_get\n)\n\n\nclass GofileIE(InfoExtractor):\n _VALID_URL = r'https?://(?:www\\.)?gofile\\.io/d/(?P<id>[^/]+)'\n _TESTS = [{\n 'url': 'https://gofile.io/d/AMZyDw',\n 'info_dict': {\n 'id': 'AMZyDw',\n },\n 'playlist_mincount': 2,\n 'playlist': [{\n 'info_dict': {\n 'id': 'de571ac1-5edc-42e2-8ec2-bdac83ad4a31',\n 'filesize': 928116,\n 'ext': 'mp4',\n 'title': 'nuuh'\n }\n }]\n }, { # URL to test mixed file types\n 'url': 'https://gofile.io/d/avt34h',\n 'info_dict': {\n 'id': 'avt34h',\n },\n 'playlist_mincount': 1,\n }, { # URL to test no video/audio error\n 'url': 'https://gofile.io/d/aB03lZ',\n 'info_dict': {\n 'id': 'aB03lZ',\n },\n 'playlist_count': 0,\n 'skip': 'No video/audio found at provided URL.',\n }]\n _TOKEN = None\n\n def _real_initialize(self):\n token = self._get_cookies('https://gofile.io/').get('accountToken')\n if token:\n self._TOKEN = token.value\n return\n\n account_data = self._download_json(\n 'https://api.gofile.io/createAccount', None, note='Getting a new guest account')\n self._TOKEN = account_data['data']['token']\n self._set_cookie('gofile.io', 'accountToken', self._TOKEN)\n\n def _entries(self, file_id):\n files = self._download_json(\n f'https://api.gofile.io/getContent?contentId={file_id}&token={self._TOKEN}&websiteToken=websiteToken&cache=true',\n 'Gofile', note='Getting filelist')\n\n status = files['status']\n if status != 'ok':\n raise ExtractorError(f'{self.IE_NAME} said: status {status}', expected=True)\n\n found_files = False\n for file in (try_get(files, lambda x: x['data']['contents'], dict) or {}).values():\n file_type, file_format = file.get('mimetype').split('/', 1)\n if file_type not in ('video', 'audio') and file_format != 'vnd.mts':\n continue\n\n found_files = True\n file_url = file.get('directLink')\n if file_url:\n yield {\n 'id': file['id'],\n 'title': file['name'].rsplit('.', 1)[0],\n 'url': file_url,\n 'filesize': file.get('size'),\n 'release_timestamp': file.get('createTime')\n }\n\n if not found_files:\n raise ExtractorError('No video/audio found at provided URL.', expected=True)\n\n def _real_extract(self, url):\n file_id = self._match_id(url)\n return self.playlist_result(self._entries(file_id), playlist_id=file_id)\n", "path": "yt_dlp/extractor/gofile.py"}], "after_files": [{"content": "# coding: utf-8\nfrom .common import InfoExtractor\nfrom ..utils import (\n ExtractorError,\n try_get\n)\n\n\nclass GofileIE(InfoExtractor):\n _VALID_URL = r'https?://(?:www\\.)?gofile\\.io/d/(?P<id>[^/]+)'\n _TESTS = [{\n 'url': 'https://gofile.io/d/AMZyDw',\n 'info_dict': {\n 'id': 'AMZyDw',\n },\n 'playlist_mincount': 2,\n 'playlist': [{\n 'info_dict': {\n 'id': 'de571ac1-5edc-42e2-8ec2-bdac83ad4a31',\n 'filesize': 928116,\n 'ext': 'mp4',\n 'title': 'nuuh',\n 'release_timestamp': 1638338704,\n 'release_date': '20211201',\n }\n }]\n }, {\n 'url': 'https://gofile.io/d/is8lKr',\n 'info_dict': {\n 'id': 'TMjXd9',\n 'ext': 'mp4',\n },\n 'playlist_count': 0,\n 'skip': 'No video/audio found at provided URL.',\n }, {\n 'url': 'https://gofile.io/d/TMjXd9',\n 'info_dict': {\n 'id': 'TMjXd9',\n },\n 'playlist_count': 1,\n }]\n _TOKEN = None\n\n def _real_initialize(self):\n token = self._get_cookies('https://gofile.io/').get('accountToken')\n if token:\n self._TOKEN = token.value\n return\n\n account_data = self._download_json(\n 'https://api.gofile.io/createAccount', None, note='Getting a new guest account')\n self._TOKEN = account_data['data']['token']\n self._set_cookie('gofile.io', 'accountToken', self._TOKEN)\n\n def _entries(self, file_id):\n files = self._download_json('https://api.gofile.io/getContent', 'Gofile', note='Getting filelist', query={\n 'contentId': file_id,\n 'token': self._TOKEN,\n 'websiteToken': 12345,\n })\n\n status = files['status']\n if status != 'ok':\n raise ExtractorError(f'{self.IE_NAME} said: status {status}', expected=True)\n\n found_files = False\n for file in (try_get(files, lambda x: x['data']['contents'], dict) or {}).values():\n file_type, file_format = file.get('mimetype').split('/', 1)\n if file_type not in ('video', 'audio') and file_format != 'vnd.mts':\n continue\n\n found_files = True\n file_url = file.get('link')\n if file_url:\n yield {\n 'id': file['id'],\n 'title': file['name'].rsplit('.', 1)[0],\n 'url': file_url,\n 'filesize': file.get('size'),\n 'release_timestamp': file.get('createTime')\n }\n\n if not found_files:\n raise ExtractorError('No video/audio found at provided URL.', expected=True)\n\n def _real_extract(self, url):\n file_id = self._match_id(url)\n return self.playlist_result(self._entries(file_id), playlist_id=file_id)\n", "path": "yt_dlp/extractor/gofile.py"}]}
| 2,142 | 648 |
gh_patches_debug_2123
|
rasdani/github-patches
|
git_diff
|
archlinux__archinstall-2278
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
no default value for uki key in configurations.json
(archinstall 2.7.0)
When using archinstall in a system with no support for uefi, the user is not prompted to choose a value for the uki key. However, when running archinstall with a config file without a uki key defined, it errors out since there is no such key defined with a default value.
At least thats what I understood from the problem looking at PR(https://github.com/archlinux/archinstall/pull/1519).
Adding uki: false in the config file fixed this for me.
by the way, how is uefi supported checked? I though it was impossible to know from OS side
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `archinstall/scripts/guided.py`
Content:
```
1 from pathlib import Path
2 from typing import Any, TYPE_CHECKING, Optional
3
4 import archinstall
5 from archinstall import info, debug
6 from archinstall import SysInfo
7 from archinstall.lib import locale
8 from archinstall.lib import disk
9 from archinstall.lib.global_menu import GlobalMenu
10 from archinstall.lib.configuration import ConfigurationOutput
11 from archinstall.lib.installer import Installer
12 from archinstall.lib.menu import Menu
13 from archinstall.lib.mirrors import use_mirrors, add_custom_mirrors
14 from archinstall.lib.models import AudioConfiguration
15 from archinstall.lib.models.bootloader import Bootloader
16 from archinstall.lib.models.network_configuration import NetworkConfiguration
17 from archinstall.lib.profile.profiles_handler import profile_handler
18
19 if TYPE_CHECKING:
20 _: Any
21
22
23 if archinstall.arguments.get('help'):
24 print("See `man archinstall` for help.")
25 exit(0)
26
27
28 def ask_user_questions():
29 """
30 First, we'll ask the user for a bunch of user input.
31 Not until we're satisfied with what we want to install
32 will we continue with the actual installation steps.
33 """
34
35 # ref: https://github.com/archlinux/archinstall/pull/831
36 # we'll set NTP to true by default since this is also
37 # the default value specified in the menu options; in
38 # case it will be changed by the user we'll also update
39 # the system immediately
40 global_menu = GlobalMenu(data_store=archinstall.arguments)
41
42 global_menu.enable('archinstall-language')
43
44 # Set which region to download packages from during the installation
45 global_menu.enable('mirror_config')
46
47 global_menu.enable('locale_config')
48
49 global_menu.enable('disk_config', mandatory=True)
50
51 # Specify disk encryption options
52 global_menu.enable('disk_encryption')
53
54 # Ask which boot-loader to use (will only ask if we're in UEFI mode, otherwise will default to GRUB)
55 global_menu.enable('bootloader')
56
57 global_menu.enable('uki')
58
59 global_menu.enable('swap')
60
61 # Get the hostname for the machine
62 global_menu.enable('hostname')
63
64 # Ask for a root password (optional, but triggers requirement for super-user if skipped)
65 global_menu.enable('!root-password', mandatory=True)
66
67 global_menu.enable('!users', mandatory=True)
68
69 # Ask for archinstall-specific profiles_bck (such as desktop environments etc)
70 global_menu.enable('profile_config')
71
72 # Ask about audio server selection if one is not already set
73 global_menu.enable('audio_config')
74
75 # Ask for preferred kernel:
76 global_menu.enable('kernels', mandatory=True)
77
78 global_menu.enable('packages')
79
80 if archinstall.arguments.get('advanced', False):
81 # Enable parallel downloads
82 global_menu.enable('parallel downloads')
83
84 # Ask or Call the helper function that asks the user to optionally configure a network.
85 global_menu.enable('network_config')
86
87 global_menu.enable('timezone')
88
89 global_menu.enable('ntp')
90
91 global_menu.enable('additional-repositories')
92
93 global_menu.enable('__separator__')
94
95 global_menu.enable('save_config')
96 global_menu.enable('install')
97 global_menu.enable('abort')
98
99 global_menu.run()
100
101
102 def perform_installation(mountpoint: Path):
103 """
104 Performs the installation steps on a block device.
105 Only requirement is that the block devices are
106 formatted and setup prior to entering this function.
107 """
108 info('Starting installation')
109 disk_config: disk.DiskLayoutConfiguration = archinstall.arguments['disk_config']
110
111 # Retrieve list of additional repositories and set boolean values appropriately
112 enable_testing = 'testing' in archinstall.arguments.get('additional-repositories', [])
113 enable_multilib = 'multilib' in archinstall.arguments.get('additional-repositories', [])
114 run_mkinitcpio = not archinstall.arguments.get('uki')
115 locale_config: locale.LocaleConfiguration = archinstall.arguments['locale_config']
116 disk_encryption: disk.DiskEncryption = archinstall.arguments.get('disk_encryption', None)
117
118 with Installer(
119 mountpoint,
120 disk_config,
121 disk_encryption=disk_encryption,
122 kernels=archinstall.arguments.get('kernels', ['linux'])
123 ) as installation:
124 # Mount all the drives to the desired mountpoint
125 if disk_config.config_type != disk.DiskLayoutType.Pre_mount:
126 installation.mount_ordered_layout()
127
128 installation.sanity_check()
129
130 if disk_config.config_type != disk.DiskLayoutType.Pre_mount:
131 if disk_encryption and disk_encryption.encryption_type != disk.EncryptionType.NoEncryption:
132 # generate encryption key files for the mounted luks devices
133 installation.generate_key_files()
134
135 # Set mirrors used by pacstrap (outside of installation)
136 if mirror_config := archinstall.arguments.get('mirror_config', None):
137 if mirror_config.mirror_regions:
138 use_mirrors(mirror_config.mirror_regions)
139 if mirror_config.custom_mirrors:
140 add_custom_mirrors(mirror_config.custom_mirrors)
141
142 installation.minimal_installation(
143 testing=enable_testing,
144 multilib=enable_multilib,
145 mkinitcpio=run_mkinitcpio,
146 hostname=archinstall.arguments.get('hostname', 'archlinux'),
147 locale_config=locale_config
148 )
149
150 if mirror_config := archinstall.arguments.get('mirror_config', None):
151 installation.set_mirrors(mirror_config) # Set the mirrors in the installation medium
152
153 if archinstall.arguments.get('swap'):
154 installation.setup_swap('zram')
155
156 if archinstall.arguments.get("bootloader") == Bootloader.Grub and SysInfo.has_uefi():
157 installation.add_additional_packages("grub")
158
159 installation.add_bootloader(
160 archinstall.arguments["bootloader"],
161 archinstall.arguments["uki"]
162 )
163
164 # If user selected to copy the current ISO network configuration
165 # Perform a copy of the config
166 network_config: Optional[NetworkConfiguration] = archinstall.arguments.get('network_config', None)
167
168 if network_config:
169 network_config.install_network_config(
170 installation,
171 archinstall.arguments.get('profile_config', None)
172 )
173
174 if users := archinstall.arguments.get('!users', None):
175 installation.create_users(users)
176
177 audio_config: Optional[AudioConfiguration] = archinstall.arguments.get('audio_config', None)
178 if audio_config:
179 audio_config.install_audio_config(installation)
180 else:
181 info("No audio server will be installed")
182
183 if archinstall.arguments.get('packages', None) and archinstall.arguments.get('packages', None)[0] != '':
184 installation.add_additional_packages(archinstall.arguments.get('packages', None))
185
186 if profile_config := archinstall.arguments.get('profile_config', None):
187 profile_handler.install_profile_config(installation, profile_config)
188
189 if timezone := archinstall.arguments.get('timezone', None):
190 installation.set_timezone(timezone)
191
192 if archinstall.arguments.get('ntp', False):
193 installation.activate_time_synchronization()
194
195 if archinstall.accessibility_tools_in_use():
196 installation.enable_espeakup()
197
198 if (root_pw := archinstall.arguments.get('!root-password', None)) and len(root_pw):
199 installation.user_set_pw('root', root_pw)
200
201 # This step must be after profile installs to allow profiles_bck to install language pre-requisites.
202 # After which, this step will set the language both for console and x11 if x11 was installed for instance.
203 installation.set_keyboard_language(locale_config.kb_layout)
204
205 if profile_config := archinstall.arguments.get('profile_config', None):
206 profile_config.profile.post_install(installation)
207
208 # If the user provided a list of services to be enabled, pass the list to the enable_service function.
209 # Note that while it's called enable_service, it can actually take a list of services and iterate it.
210 if archinstall.arguments.get('services', None):
211 installation.enable_service(archinstall.arguments.get('services', []))
212
213 # If the user provided custom commands to be run post-installation, execute them now.
214 if archinstall.arguments.get('custom-commands', None):
215 archinstall.run_custom_user_commands(archinstall.arguments['custom-commands'], installation)
216
217 installation.genfstab()
218
219 info("For post-installation tips, see https://wiki.archlinux.org/index.php/Installation_guide#Post-installation")
220
221 if not archinstall.arguments.get('silent'):
222 prompt = str(_('Would you like to chroot into the newly created installation and perform post-installation configuration?'))
223 choice = Menu(prompt, Menu.yes_no(), default_option=Menu.yes()).run()
224 if choice.value == Menu.yes():
225 try:
226 installation.drop_to_shell()
227 except:
228 pass
229
230 debug(f"Disk states after installing: {disk.disk_layouts()}")
231
232
233 if not archinstall.arguments.get('silent'):
234 ask_user_questions()
235
236 config_output = ConfigurationOutput(archinstall.arguments)
237
238 if not archinstall.arguments.get('silent'):
239 config_output.show()
240
241 config_output.save()
242
243 if archinstall.arguments.get('dry_run'):
244 exit(0)
245
246 if not archinstall.arguments.get('silent'):
247 input(str(_('Press Enter to continue.')))
248
249 fs_handler = disk.FilesystemHandler(
250 archinstall.arguments['disk_config'],
251 archinstall.arguments.get('disk_encryption', None)
252 )
253
254 fs_handler.perform_filesystem_operations()
255
256 perform_installation(archinstall.storage.get('MOUNT_POINT', Path('/mnt')))
257
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/archinstall/scripts/guided.py b/archinstall/scripts/guided.py
--- a/archinstall/scripts/guided.py
+++ b/archinstall/scripts/guided.py
@@ -158,7 +158,7 @@
installation.add_bootloader(
archinstall.arguments["bootloader"],
- archinstall.arguments["uki"]
+ archinstall.arguments.get('uki', False)
)
# If user selected to copy the current ISO network configuration
|
{"golden_diff": "diff --git a/archinstall/scripts/guided.py b/archinstall/scripts/guided.py\n--- a/archinstall/scripts/guided.py\n+++ b/archinstall/scripts/guided.py\n@@ -158,7 +158,7 @@\n \n \t\tinstallation.add_bootloader(\n \t\t\tarchinstall.arguments[\"bootloader\"],\n-\t\t\tarchinstall.arguments[\"uki\"]\n+\t\t\tarchinstall.arguments.get('uki', False)\n \t\t)\n \n \t\t# If user selected to copy the current ISO network configuration\n", "issue": "no default value for uki key in configurations.json\n(archinstall 2.7.0) \r\nWhen using archinstall in a system with no support for uefi, the user is not prompted to choose a value for the uki key. However, when running archinstall with a config file without a uki key defined, it errors out since there is no such key defined with a default value.\r\n\r\nAt least thats what I understood from the problem looking at PR(https://github.com/archlinux/archinstall/pull/1519).\r\n\r\nAdding uki: false in the config file fixed this for me.\r\n\r\nby the way, how is uefi supported checked? I though it was impossible to know from OS side\n", "before_files": [{"content": "from pathlib import Path\nfrom typing import Any, TYPE_CHECKING, Optional\n\nimport archinstall\nfrom archinstall import info, debug\nfrom archinstall import SysInfo\nfrom archinstall.lib import locale\nfrom archinstall.lib import disk\nfrom archinstall.lib.global_menu import GlobalMenu\nfrom archinstall.lib.configuration import ConfigurationOutput\nfrom archinstall.lib.installer import Installer\nfrom archinstall.lib.menu import Menu\nfrom archinstall.lib.mirrors import use_mirrors, add_custom_mirrors\nfrom archinstall.lib.models import AudioConfiguration\nfrom archinstall.lib.models.bootloader import Bootloader\nfrom archinstall.lib.models.network_configuration import NetworkConfiguration\nfrom archinstall.lib.profile.profiles_handler import profile_handler\n\nif TYPE_CHECKING:\n\t_: Any\n\n\nif archinstall.arguments.get('help'):\n\tprint(\"See `man archinstall` for help.\")\n\texit(0)\n\n\ndef ask_user_questions():\n\t\"\"\"\n\t\tFirst, we'll ask the user for a bunch of user input.\n\t\tNot until we're satisfied with what we want to install\n\t\twill we continue with the actual installation steps.\n\t\"\"\"\n\n\t# ref: https://github.com/archlinux/archinstall/pull/831\n\t# we'll set NTP to true by default since this is also\n\t# the default value specified in the menu options; in\n\t# case it will be changed by the user we'll also update\n\t# the system immediately\n\tglobal_menu = GlobalMenu(data_store=archinstall.arguments)\n\n\tglobal_menu.enable('archinstall-language')\n\n\t# Set which region to download packages from during the installation\n\tglobal_menu.enable('mirror_config')\n\n\tglobal_menu.enable('locale_config')\n\n\tglobal_menu.enable('disk_config', mandatory=True)\n\n\t# Specify disk encryption options\n\tglobal_menu.enable('disk_encryption')\n\n\t# Ask which boot-loader to use (will only ask if we're in UEFI mode, otherwise will default to GRUB)\n\tglobal_menu.enable('bootloader')\n\n\tglobal_menu.enable('uki')\n\n\tglobal_menu.enable('swap')\n\n\t# Get the hostname for the machine\n\tglobal_menu.enable('hostname')\n\n\t# Ask for a root password (optional, but triggers requirement for super-user if skipped)\n\tglobal_menu.enable('!root-password', mandatory=True)\n\n\tglobal_menu.enable('!users', mandatory=True)\n\n\t# Ask for archinstall-specific profiles_bck (such as desktop environments etc)\n\tglobal_menu.enable('profile_config')\n\n\t# Ask about audio server selection if one is not already set\n\tglobal_menu.enable('audio_config')\n\n\t# Ask for preferred kernel:\n\tglobal_menu.enable('kernels', mandatory=True)\n\n\tglobal_menu.enable('packages')\n\n\tif archinstall.arguments.get('advanced', False):\n\t\t# Enable parallel downloads\n\t\tglobal_menu.enable('parallel downloads')\n\n\t# Ask or Call the helper function that asks the user to optionally configure a network.\n\tglobal_menu.enable('network_config')\n\n\tglobal_menu.enable('timezone')\n\n\tglobal_menu.enable('ntp')\n\n\tglobal_menu.enable('additional-repositories')\n\n\tglobal_menu.enable('__separator__')\n\n\tglobal_menu.enable('save_config')\n\tglobal_menu.enable('install')\n\tglobal_menu.enable('abort')\n\n\tglobal_menu.run()\n\n\ndef perform_installation(mountpoint: Path):\n\t\"\"\"\n\tPerforms the installation steps on a block device.\n\tOnly requirement is that the block devices are\n\tformatted and setup prior to entering this function.\n\t\"\"\"\n\tinfo('Starting installation')\n\tdisk_config: disk.DiskLayoutConfiguration = archinstall.arguments['disk_config']\n\n\t# Retrieve list of additional repositories and set boolean values appropriately\n\tenable_testing = 'testing' in archinstall.arguments.get('additional-repositories', [])\n\tenable_multilib = 'multilib' in archinstall.arguments.get('additional-repositories', [])\n\trun_mkinitcpio = not archinstall.arguments.get('uki')\n\tlocale_config: locale.LocaleConfiguration = archinstall.arguments['locale_config']\n\tdisk_encryption: disk.DiskEncryption = archinstall.arguments.get('disk_encryption', None)\n\n\twith Installer(\n\t\tmountpoint,\n\t\tdisk_config,\n\t\tdisk_encryption=disk_encryption,\n\t\tkernels=archinstall.arguments.get('kernels', ['linux'])\n\t) as installation:\n\t\t# Mount all the drives to the desired mountpoint\n\t\tif disk_config.config_type != disk.DiskLayoutType.Pre_mount:\n\t\t\tinstallation.mount_ordered_layout()\n\n\t\tinstallation.sanity_check()\n\n\t\tif disk_config.config_type != disk.DiskLayoutType.Pre_mount:\n\t\t\tif disk_encryption and disk_encryption.encryption_type != disk.EncryptionType.NoEncryption:\n\t\t\t\t# generate encryption key files for the mounted luks devices\n\t\t\t\tinstallation.generate_key_files()\n\n\t\t# Set mirrors used by pacstrap (outside of installation)\n\t\tif mirror_config := archinstall.arguments.get('mirror_config', None):\n\t\t\tif mirror_config.mirror_regions:\n\t\t\t\tuse_mirrors(mirror_config.mirror_regions)\n\t\t\tif mirror_config.custom_mirrors:\n\t\t\t\tadd_custom_mirrors(mirror_config.custom_mirrors)\n\n\t\tinstallation.minimal_installation(\n\t\t\ttesting=enable_testing,\n\t\t\tmultilib=enable_multilib,\n\t\t\tmkinitcpio=run_mkinitcpio,\n\t\t\thostname=archinstall.arguments.get('hostname', 'archlinux'),\n\t\t\tlocale_config=locale_config\n\t\t)\n\n\t\tif mirror_config := archinstall.arguments.get('mirror_config', None):\n\t\t\tinstallation.set_mirrors(mirror_config) # Set the mirrors in the installation medium\n\n\t\tif archinstall.arguments.get('swap'):\n\t\t\tinstallation.setup_swap('zram')\n\n\t\tif archinstall.arguments.get(\"bootloader\") == Bootloader.Grub and SysInfo.has_uefi():\n\t\t\tinstallation.add_additional_packages(\"grub\")\n\n\t\tinstallation.add_bootloader(\n\t\t\tarchinstall.arguments[\"bootloader\"],\n\t\t\tarchinstall.arguments[\"uki\"]\n\t\t)\n\n\t\t# If user selected to copy the current ISO network configuration\n\t\t# Perform a copy of the config\n\t\tnetwork_config: Optional[NetworkConfiguration] = archinstall.arguments.get('network_config', None)\n\n\t\tif network_config:\n\t\t\tnetwork_config.install_network_config(\n\t\t\t\tinstallation,\n\t\t\t\tarchinstall.arguments.get('profile_config', None)\n\t\t\t)\n\n\t\tif users := archinstall.arguments.get('!users', None):\n\t\t\tinstallation.create_users(users)\n\n\t\taudio_config: Optional[AudioConfiguration] = archinstall.arguments.get('audio_config', None)\n\t\tif audio_config:\n\t\t\taudio_config.install_audio_config(installation)\n\t\telse:\n\t\t\tinfo(\"No audio server will be installed\")\n\n\t\tif archinstall.arguments.get('packages', None) and archinstall.arguments.get('packages', None)[0] != '':\n\t\t\tinstallation.add_additional_packages(archinstall.arguments.get('packages', None))\n\n\t\tif profile_config := archinstall.arguments.get('profile_config', None):\n\t\t\tprofile_handler.install_profile_config(installation, profile_config)\n\n\t\tif timezone := archinstall.arguments.get('timezone', None):\n\t\t\tinstallation.set_timezone(timezone)\n\n\t\tif archinstall.arguments.get('ntp', False):\n\t\t\tinstallation.activate_time_synchronization()\n\n\t\tif archinstall.accessibility_tools_in_use():\n\t\t\tinstallation.enable_espeakup()\n\n\t\tif (root_pw := archinstall.arguments.get('!root-password', None)) and len(root_pw):\n\t\t\tinstallation.user_set_pw('root', root_pw)\n\n\t\t# This step must be after profile installs to allow profiles_bck to install language pre-requisites.\n\t\t# After which, this step will set the language both for console and x11 if x11 was installed for instance.\n\t\tinstallation.set_keyboard_language(locale_config.kb_layout)\n\n\t\tif profile_config := archinstall.arguments.get('profile_config', None):\n\t\t\tprofile_config.profile.post_install(installation)\n\n\t\t# If the user provided a list of services to be enabled, pass the list to the enable_service function.\n\t\t# Note that while it's called enable_service, it can actually take a list of services and iterate it.\n\t\tif archinstall.arguments.get('services', None):\n\t\t\tinstallation.enable_service(archinstall.arguments.get('services', []))\n\n\t\t# If the user provided custom commands to be run post-installation, execute them now.\n\t\tif archinstall.arguments.get('custom-commands', None):\n\t\t\tarchinstall.run_custom_user_commands(archinstall.arguments['custom-commands'], installation)\n\n\t\tinstallation.genfstab()\n\n\t\tinfo(\"For post-installation tips, see https://wiki.archlinux.org/index.php/Installation_guide#Post-installation\")\n\n\t\tif not archinstall.arguments.get('silent'):\n\t\t\tprompt = str(_('Would you like to chroot into the newly created installation and perform post-installation configuration?'))\n\t\t\tchoice = Menu(prompt, Menu.yes_no(), default_option=Menu.yes()).run()\n\t\t\tif choice.value == Menu.yes():\n\t\t\t\ttry:\n\t\t\t\t\tinstallation.drop_to_shell()\n\t\t\t\texcept:\n\t\t\t\t\tpass\n\n\tdebug(f\"Disk states after installing: {disk.disk_layouts()}\")\n\n\nif not archinstall.arguments.get('silent'):\n\task_user_questions()\n\nconfig_output = ConfigurationOutput(archinstall.arguments)\n\nif not archinstall.arguments.get('silent'):\n\tconfig_output.show()\n\nconfig_output.save()\n\nif archinstall.arguments.get('dry_run'):\n\texit(0)\n\nif not archinstall.arguments.get('silent'):\n\tinput(str(_('Press Enter to continue.')))\n\nfs_handler = disk.FilesystemHandler(\n\tarchinstall.arguments['disk_config'],\n\tarchinstall.arguments.get('disk_encryption', None)\n)\n\nfs_handler.perform_filesystem_operations()\n\nperform_installation(archinstall.storage.get('MOUNT_POINT', Path('/mnt')))\n", "path": "archinstall/scripts/guided.py"}], "after_files": [{"content": "from pathlib import Path\nfrom typing import Any, TYPE_CHECKING, Optional\n\nimport archinstall\nfrom archinstall import info, debug\nfrom archinstall import SysInfo\nfrom archinstall.lib import locale\nfrom archinstall.lib import disk\nfrom archinstall.lib.global_menu import GlobalMenu\nfrom archinstall.lib.configuration import ConfigurationOutput\nfrom archinstall.lib.installer import Installer\nfrom archinstall.lib.menu import Menu\nfrom archinstall.lib.mirrors import use_mirrors, add_custom_mirrors\nfrom archinstall.lib.models import AudioConfiguration\nfrom archinstall.lib.models.bootloader import Bootloader\nfrom archinstall.lib.models.network_configuration import NetworkConfiguration\nfrom archinstall.lib.profile.profiles_handler import profile_handler\n\nif TYPE_CHECKING:\n\t_: Any\n\n\nif archinstall.arguments.get('help'):\n\tprint(\"See `man archinstall` for help.\")\n\texit(0)\n\n\ndef ask_user_questions():\n\t\"\"\"\n\t\tFirst, we'll ask the user for a bunch of user input.\n\t\tNot until we're satisfied with what we want to install\n\t\twill we continue with the actual installation steps.\n\t\"\"\"\n\n\t# ref: https://github.com/archlinux/archinstall/pull/831\n\t# we'll set NTP to true by default since this is also\n\t# the default value specified in the menu options; in\n\t# case it will be changed by the user we'll also update\n\t# the system immediately\n\tglobal_menu = GlobalMenu(data_store=archinstall.arguments)\n\n\tglobal_menu.enable('archinstall-language')\n\n\t# Set which region to download packages from during the installation\n\tglobal_menu.enable('mirror_config')\n\n\tglobal_menu.enable('locale_config')\n\n\tglobal_menu.enable('disk_config', mandatory=True)\n\n\t# Specify disk encryption options\n\tglobal_menu.enable('disk_encryption')\n\n\t# Ask which boot-loader to use (will only ask if we're in UEFI mode, otherwise will default to GRUB)\n\tglobal_menu.enable('bootloader')\n\n\tglobal_menu.enable('uki')\n\n\tglobal_menu.enable('swap')\n\n\t# Get the hostname for the machine\n\tglobal_menu.enable('hostname')\n\n\t# Ask for a root password (optional, but triggers requirement for super-user if skipped)\n\tglobal_menu.enable('!root-password', mandatory=True)\n\n\tglobal_menu.enable('!users', mandatory=True)\n\n\t# Ask for archinstall-specific profiles_bck (such as desktop environments etc)\n\tglobal_menu.enable('profile_config')\n\n\t# Ask about audio server selection if one is not already set\n\tglobal_menu.enable('audio_config')\n\n\t# Ask for preferred kernel:\n\tglobal_menu.enable('kernels', mandatory=True)\n\n\tglobal_menu.enable('packages')\n\n\tif archinstall.arguments.get('advanced', False):\n\t\t# Enable parallel downloads\n\t\tglobal_menu.enable('parallel downloads')\n\n\t# Ask or Call the helper function that asks the user to optionally configure a network.\n\tglobal_menu.enable('network_config')\n\n\tglobal_menu.enable('timezone')\n\n\tglobal_menu.enable('ntp')\n\n\tglobal_menu.enable('additional-repositories')\n\n\tglobal_menu.enable('__separator__')\n\n\tglobal_menu.enable('save_config')\n\tglobal_menu.enable('install')\n\tglobal_menu.enable('abort')\n\n\tglobal_menu.run()\n\n\ndef perform_installation(mountpoint: Path):\n\t\"\"\"\n\tPerforms the installation steps on a block device.\n\tOnly requirement is that the block devices are\n\tformatted and setup prior to entering this function.\n\t\"\"\"\n\tinfo('Starting installation')\n\tdisk_config: disk.DiskLayoutConfiguration = archinstall.arguments['disk_config']\n\n\t# Retrieve list of additional repositories and set boolean values appropriately\n\tenable_testing = 'testing' in archinstall.arguments.get('additional-repositories', [])\n\tenable_multilib = 'multilib' in archinstall.arguments.get('additional-repositories', [])\n\trun_mkinitcpio = not archinstall.arguments.get('uki')\n\tlocale_config: locale.LocaleConfiguration = archinstall.arguments['locale_config']\n\tdisk_encryption: disk.DiskEncryption = archinstall.arguments.get('disk_encryption', None)\n\n\twith Installer(\n\t\tmountpoint,\n\t\tdisk_config,\n\t\tdisk_encryption=disk_encryption,\n\t\tkernels=archinstall.arguments.get('kernels', ['linux'])\n\t) as installation:\n\t\t# Mount all the drives to the desired mountpoint\n\t\tif disk_config.config_type != disk.DiskLayoutType.Pre_mount:\n\t\t\tinstallation.mount_ordered_layout()\n\n\t\tinstallation.sanity_check()\n\n\t\tif disk_config.config_type != disk.DiskLayoutType.Pre_mount:\n\t\t\tif disk_encryption and disk_encryption.encryption_type != disk.EncryptionType.NoEncryption:\n\t\t\t\t# generate encryption key files for the mounted luks devices\n\t\t\t\tinstallation.generate_key_files()\n\n\t\t# Set mirrors used by pacstrap (outside of installation)\n\t\tif mirror_config := archinstall.arguments.get('mirror_config', None):\n\t\t\tif mirror_config.mirror_regions:\n\t\t\t\tuse_mirrors(mirror_config.mirror_regions)\n\t\t\tif mirror_config.custom_mirrors:\n\t\t\t\tadd_custom_mirrors(mirror_config.custom_mirrors)\n\n\t\tinstallation.minimal_installation(\n\t\t\ttesting=enable_testing,\n\t\t\tmultilib=enable_multilib,\n\t\t\tmkinitcpio=run_mkinitcpio,\n\t\t\thostname=archinstall.arguments.get('hostname', 'archlinux'),\n\t\t\tlocale_config=locale_config\n\t\t)\n\n\t\tif mirror_config := archinstall.arguments.get('mirror_config', None):\n\t\t\tinstallation.set_mirrors(mirror_config) # Set the mirrors in the installation medium\n\n\t\tif archinstall.arguments.get('swap'):\n\t\t\tinstallation.setup_swap('zram')\n\n\t\tif archinstall.arguments.get(\"bootloader\") == Bootloader.Grub and SysInfo.has_uefi():\n\t\t\tinstallation.add_additional_packages(\"grub\")\n\n\t\tinstallation.add_bootloader(\n\t\t\tarchinstall.arguments[\"bootloader\"],\n\t\t\tarchinstall.arguments.get('uki', False)\n\t\t)\n\n\t\t# If user selected to copy the current ISO network configuration\n\t\t# Perform a copy of the config\n\t\tnetwork_config: Optional[NetworkConfiguration] = archinstall.arguments.get('network_config', None)\n\n\t\tif network_config:\n\t\t\tnetwork_config.install_network_config(\n\t\t\t\tinstallation,\n\t\t\t\tarchinstall.arguments.get('profile_config', None)\n\t\t\t)\n\n\t\tif users := archinstall.arguments.get('!users', None):\n\t\t\tinstallation.create_users(users)\n\n\t\taudio_config: Optional[AudioConfiguration] = archinstall.arguments.get('audio_config', None)\n\t\tif audio_config:\n\t\t\taudio_config.install_audio_config(installation)\n\t\telse:\n\t\t\tinfo(\"No audio server will be installed\")\n\n\t\tif archinstall.arguments.get('packages', None) and archinstall.arguments.get('packages', None)[0] != '':\n\t\t\tinstallation.add_additional_packages(archinstall.arguments.get('packages', None))\n\n\t\tif profile_config := archinstall.arguments.get('profile_config', None):\n\t\t\tprofile_handler.install_profile_config(installation, profile_config)\n\n\t\tif timezone := archinstall.arguments.get('timezone', None):\n\t\t\tinstallation.set_timezone(timezone)\n\n\t\tif archinstall.arguments.get('ntp', False):\n\t\t\tinstallation.activate_time_synchronization()\n\n\t\tif archinstall.accessibility_tools_in_use():\n\t\t\tinstallation.enable_espeakup()\n\n\t\tif (root_pw := archinstall.arguments.get('!root-password', None)) and len(root_pw):\n\t\t\tinstallation.user_set_pw('root', root_pw)\n\n\t\t# This step must be after profile installs to allow profiles_bck to install language pre-requisites.\n\t\t# After which, this step will set the language both for console and x11 if x11 was installed for instance.\n\t\tinstallation.set_keyboard_language(locale_config.kb_layout)\n\n\t\tif profile_config := archinstall.arguments.get('profile_config', None):\n\t\t\tprofile_config.profile.post_install(installation)\n\n\t\t# If the user provided a list of services to be enabled, pass the list to the enable_service function.\n\t\t# Note that while it's called enable_service, it can actually take a list of services and iterate it.\n\t\tif archinstall.arguments.get('services', None):\n\t\t\tinstallation.enable_service(archinstall.arguments.get('services', []))\n\n\t\t# If the user provided custom commands to be run post-installation, execute them now.\n\t\tif archinstall.arguments.get('custom-commands', None):\n\t\t\tarchinstall.run_custom_user_commands(archinstall.arguments['custom-commands'], installation)\n\n\t\tinstallation.genfstab()\n\n\t\tinfo(\"For post-installation tips, see https://wiki.archlinux.org/index.php/Installation_guide#Post-installation\")\n\n\t\tif not archinstall.arguments.get('silent'):\n\t\t\tprompt = str(_('Would you like to chroot into the newly created installation and perform post-installation configuration?'))\n\t\t\tchoice = Menu(prompt, Menu.yes_no(), default_option=Menu.yes()).run()\n\t\t\tif choice.value == Menu.yes():\n\t\t\t\ttry:\n\t\t\t\t\tinstallation.drop_to_shell()\n\t\t\t\texcept:\n\t\t\t\t\tpass\n\n\tdebug(f\"Disk states after installing: {disk.disk_layouts()}\")\n\n\nif not archinstall.arguments.get('silent'):\n\task_user_questions()\n\nconfig_output = ConfigurationOutput(archinstall.arguments)\n\nif not archinstall.arguments.get('silent'):\n\tconfig_output.show()\n\nconfig_output.save()\n\nif archinstall.arguments.get('dry_run'):\n\texit(0)\n\nif not archinstall.arguments.get('silent'):\n\tinput(str(_('Press Enter to continue.')))\n\nfs_handler = disk.FilesystemHandler(\n\tarchinstall.arguments['disk_config'],\n\tarchinstall.arguments.get('disk_encryption', None)\n)\n\nfs_handler.perform_filesystem_operations()\n\nperform_installation(archinstall.storage.get('MOUNT_POINT', Path('/mnt')))\n", "path": "archinstall/scripts/guided.py"}]}
| 3,182 | 110 |
gh_patches_debug_22100
|
rasdani/github-patches
|
git_diff
|
microsoft__playwright-python-525
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Installation issues when using pdm
[pdm](https://github.com/frostming/pdm/) is a new tool for Python environment managing.
It works very well, but when I try to install playwright, it fails to install. You can find the details here:
https://github.com/frostming/pdm/issues/269
The PDM developer points out that playwright seems to be missing some required fields in the wheels configs (see last comment in the linked issue).
I would highly appreciate if you could look into this issue. Cheers! 🍺
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # Copyright (c) Microsoft Corporation.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import glob
16 import os
17 import shutil
18 import subprocess
19 import sys
20 import zipfile
21 from pathlib import Path
22
23 import setuptools
24 from wheel.bdist_wheel import bdist_wheel as BDistWheelCommand
25
26 driver_version = "1.9.0-1614037901000"
27
28 def extractall(zip: zipfile.ZipFile, path: str) -> None:
29 for name in zip.namelist():
30 member = zip.getinfo(name)
31 extracted_path = zip.extract(member, path)
32 attr = member.external_attr >> 16
33 if attr != 0:
34 os.chmod(extracted_path, attr)
35
36
37 class PlaywrightBDistWheelCommand(BDistWheelCommand):
38 def run(self) -> None:
39 if os.path.exists("build"):
40 shutil.rmtree("build")
41 if os.path.exists("dist"):
42 shutil.rmtree("dist")
43 if os.path.exists("playwright.egg-info"):
44 shutil.rmtree("playwright.egg-info")
45 super().run()
46 os.makedirs("driver", exist_ok=True)
47 os.makedirs("playwright/driver", exist_ok=True)
48 for platform in ["mac", "linux", "win32", "win32_x64"]:
49 zip_file = f"playwright-{driver_version}-{platform}.zip"
50 if not os.path.exists("driver/" + zip_file):
51 url = "https://playwright.azureedge.net/builds/driver/"
52 url = url + "next/"
53 url = url + zip_file
54 print("Fetching ", url)
55 subprocess.check_call(
56 ["curl", "--http1.1", url, "-o", "driver/" + zip_file]
57 )
58 base_wheel_location = glob.glob("dist/*.whl")[0]
59 without_platform = base_wheel_location[:-7]
60 platform_map = {
61 "darwin": "mac",
62 "linux": "linux",
63 "win32": "win32_x64" if sys.maxsize > 2 ** 32 else "win32",
64 }
65 for platform in ["mac", "linux", "win32", "win32_x64"]:
66 zip_file = f"driver/playwright-{driver_version}-{platform}.zip"
67 with zipfile.ZipFile(zip_file, "r") as zip:
68 extractall(zip, f"driver/{platform}")
69 if platform_map[sys.platform] == platform:
70 with zipfile.ZipFile(zip_file, "r") as zip:
71 extractall(zip, "playwright/driver")
72 wheel = ""
73 if platform == "mac":
74 wheel = "macosx_10_13_x86_64.whl"
75 if platform == "linux":
76 wheel = "manylinux1_x86_64.whl"
77 if platform == "win32":
78 wheel = "win32.whl"
79 if platform == "win32_x64":
80 wheel = "win_amd64.whl"
81 wheel_location = without_platform + wheel
82 shutil.copy(base_wheel_location, wheel_location)
83 with zipfile.ZipFile(wheel_location, "a") as zip:
84 driver_root = os.path.abspath(f"driver/{platform}")
85 for dir_path, _, files in os.walk(driver_root):
86 for file in files:
87 from_path = os.path.join(dir_path, file)
88 to_path = os.path.relpath(from_path, driver_root)
89 zip.write(from_path, f"playwright/driver/{to_path}")
90 if platform == "mac":
91 # Ship mac both as 10_13 as and 11_0 universal to work across Macs.
92 universal_location = without_platform + "macosx_11_0_universal2.whl"
93 shutil.copyfile(wheel_location, universal_location)
94 with zipfile.ZipFile(universal_location, "a") as zip:
95 zip.writestr("playwright/driver/README.md", "Universal Mac package")
96
97 os.remove(base_wheel_location)
98
99
100 setuptools.setup(
101 name="playwright",
102 author="Microsoft Corporation",
103 author_email="",
104 description="A high-level API to automate web browsers",
105 long_description=Path("README.md").read_text(encoding="utf-8"),
106 long_description_content_type="text/markdown",
107 url="https://github.com/Microsoft/playwright-python",
108 packages=["playwright"],
109 include_package_data=True,
110 install_requires=[
111 "greenlet==1.0.0",
112 "pyee>=8.0.1",
113 "typing-extensions;python_version<='3.8'",
114 ],
115 classifiers=[
116 "Topic :: Software Development :: Testing",
117 "Topic :: Internet :: WWW/HTTP :: Browsers",
118 "Intended Audience :: Developers",
119 "Programming Language :: Python :: 3",
120 "Programming Language :: Python :: 3.7",
121 "Programming Language :: Python :: 3.8",
122 "Programming Language :: Python :: 3.9",
123 "License :: OSI Approved :: Apache Software License",
124 "Operating System :: OS Independent",
125 ],
126 python_requires=">=3.7",
127 cmdclass={"bdist_wheel": PlaywrightBDistWheelCommand},
128 use_scm_version={
129 "version_scheme": "post-release",
130 "write_to": "playwright/_repo_version.py",
131 "write_to_template": 'version = "{version}"\n',
132 },
133 setup_requires=["setuptools_scm", "wheel"],
134 entry_points={
135 "console_scripts": [
136 "playwright=playwright.__main__:main",
137 ],
138 },
139 )
140
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -21,10 +21,12 @@
from pathlib import Path
import setuptools
+from auditwheel.wheeltools import InWheel
from wheel.bdist_wheel import bdist_wheel as BDistWheelCommand
driver_version = "1.9.0-1614037901000"
+
def extractall(zip: zipfile.ZipFile, path: str) -> None:
for name in zip.namelist():
member = zip.getinfo(name)
@@ -95,6 +97,18 @@
zip.writestr("playwright/driver/README.md", "Universal Mac package")
os.remove(base_wheel_location)
+ for whlfile in glob.glob("dist/*.whl"):
+
+ os.makedirs("wheelhouse", exist_ok=True)
+ with InWheel(
+ in_wheel=whlfile,
+ out_wheel=os.path.join("wheelhouse", os.path.basename(whlfile)),
+ ret_self=True,
+ ):
+ print("Updating RECORD file of %s" % whlfile)
+ shutil.rmtree("dist")
+ print("Copying new wheels")
+ shutil.move("wheelhouse", "dist")
setuptools.setup(
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -21,10 +21,12 @@\n from pathlib import Path\n \n import setuptools\n+from auditwheel.wheeltools import InWheel\n from wheel.bdist_wheel import bdist_wheel as BDistWheelCommand\n \n driver_version = \"1.9.0-1614037901000\"\n \n+\n def extractall(zip: zipfile.ZipFile, path: str) -> None:\n for name in zip.namelist():\n member = zip.getinfo(name)\n@@ -95,6 +97,18 @@\n zip.writestr(\"playwright/driver/README.md\", \"Universal Mac package\")\n \n os.remove(base_wheel_location)\n+ for whlfile in glob.glob(\"dist/*.whl\"):\n+\n+ os.makedirs(\"wheelhouse\", exist_ok=True)\n+ with InWheel(\n+ in_wheel=whlfile,\n+ out_wheel=os.path.join(\"wheelhouse\", os.path.basename(whlfile)),\n+ ret_self=True,\n+ ):\n+ print(\"Updating RECORD file of %s\" % whlfile)\n+ shutil.rmtree(\"dist\")\n+ print(\"Copying new wheels\")\n+ shutil.move(\"wheelhouse\", \"dist\")\n \n \n setuptools.setup(\n", "issue": "Installation issues when using pdm\n[pdm](https://github.com/frostming/pdm/) is a new tool for Python environment managing.\r\nIt works very well, but when I try to install playwright, it fails to install. You can find the details here: \r\nhttps://github.com/frostming/pdm/issues/269\r\n\r\nThe PDM developer points out that playwright seems to be missing some required fields in the wheels configs (see last comment in the linked issue).\r\n\r\nI would highly appreciate if you could look into this issue. Cheers! \ud83c\udf7a\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport glob\nimport os\nimport shutil\nimport subprocess\nimport sys\nimport zipfile\nfrom pathlib import Path\n\nimport setuptools\nfrom wheel.bdist_wheel import bdist_wheel as BDistWheelCommand\n\ndriver_version = \"1.9.0-1614037901000\"\n\ndef extractall(zip: zipfile.ZipFile, path: str) -> None:\n for name in zip.namelist():\n member = zip.getinfo(name)\n extracted_path = zip.extract(member, path)\n attr = member.external_attr >> 16\n if attr != 0:\n os.chmod(extracted_path, attr)\n\n\nclass PlaywrightBDistWheelCommand(BDistWheelCommand):\n def run(self) -> None:\n if os.path.exists(\"build\"):\n shutil.rmtree(\"build\")\n if os.path.exists(\"dist\"):\n shutil.rmtree(\"dist\")\n if os.path.exists(\"playwright.egg-info\"):\n shutil.rmtree(\"playwright.egg-info\")\n super().run()\n os.makedirs(\"driver\", exist_ok=True)\n os.makedirs(\"playwright/driver\", exist_ok=True)\n for platform in [\"mac\", \"linux\", \"win32\", \"win32_x64\"]:\n zip_file = f\"playwright-{driver_version}-{platform}.zip\"\n if not os.path.exists(\"driver/\" + zip_file):\n url = \"https://playwright.azureedge.net/builds/driver/\"\n url = url + \"next/\"\n url = url + zip_file\n print(\"Fetching \", url)\n subprocess.check_call(\n [\"curl\", \"--http1.1\", url, \"-o\", \"driver/\" + zip_file]\n )\n base_wheel_location = glob.glob(\"dist/*.whl\")[0]\n without_platform = base_wheel_location[:-7]\n platform_map = {\n \"darwin\": \"mac\",\n \"linux\": \"linux\",\n \"win32\": \"win32_x64\" if sys.maxsize > 2 ** 32 else \"win32\",\n }\n for platform in [\"mac\", \"linux\", \"win32\", \"win32_x64\"]:\n zip_file = f\"driver/playwright-{driver_version}-{platform}.zip\"\n with zipfile.ZipFile(zip_file, \"r\") as zip:\n extractall(zip, f\"driver/{platform}\")\n if platform_map[sys.platform] == platform:\n with zipfile.ZipFile(zip_file, \"r\") as zip:\n extractall(zip, \"playwright/driver\")\n wheel = \"\"\n if platform == \"mac\":\n wheel = \"macosx_10_13_x86_64.whl\"\n if platform == \"linux\":\n wheel = \"manylinux1_x86_64.whl\"\n if platform == \"win32\":\n wheel = \"win32.whl\"\n if platform == \"win32_x64\":\n wheel = \"win_amd64.whl\"\n wheel_location = without_platform + wheel\n shutil.copy(base_wheel_location, wheel_location)\n with zipfile.ZipFile(wheel_location, \"a\") as zip:\n driver_root = os.path.abspath(f\"driver/{platform}\")\n for dir_path, _, files in os.walk(driver_root):\n for file in files:\n from_path = os.path.join(dir_path, file)\n to_path = os.path.relpath(from_path, driver_root)\n zip.write(from_path, f\"playwright/driver/{to_path}\")\n if platform == \"mac\":\n # Ship mac both as 10_13 as and 11_0 universal to work across Macs.\n universal_location = without_platform + \"macosx_11_0_universal2.whl\"\n shutil.copyfile(wheel_location, universal_location)\n with zipfile.ZipFile(universal_location, \"a\") as zip:\n zip.writestr(\"playwright/driver/README.md\", \"Universal Mac package\")\n\n os.remove(base_wheel_location)\n\n\nsetuptools.setup(\n name=\"playwright\",\n author=\"Microsoft Corporation\",\n author_email=\"\",\n description=\"A high-level API to automate web browsers\",\n long_description=Path(\"README.md\").read_text(encoding=\"utf-8\"),\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/Microsoft/playwright-python\",\n packages=[\"playwright\"],\n include_package_data=True,\n install_requires=[\n \"greenlet==1.0.0\",\n \"pyee>=8.0.1\",\n \"typing-extensions;python_version<='3.8'\",\n ],\n classifiers=[\n \"Topic :: Software Development :: Testing\",\n \"Topic :: Internet :: WWW/HTTP :: Browsers\",\n \"Intended Audience :: Developers\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: OS Independent\",\n ],\n python_requires=\">=3.7\",\n cmdclass={\"bdist_wheel\": PlaywrightBDistWheelCommand},\n use_scm_version={\n \"version_scheme\": \"post-release\",\n \"write_to\": \"playwright/_repo_version.py\",\n \"write_to_template\": 'version = \"{version}\"\\n',\n },\n setup_requires=[\"setuptools_scm\", \"wheel\"],\n entry_points={\n \"console_scripts\": [\n \"playwright=playwright.__main__:main\",\n ],\n },\n)\n", "path": "setup.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport glob\nimport os\nimport shutil\nimport subprocess\nimport sys\nimport zipfile\nfrom pathlib import Path\n\nimport setuptools\nfrom auditwheel.wheeltools import InWheel\nfrom wheel.bdist_wheel import bdist_wheel as BDistWheelCommand\n\ndriver_version = \"1.9.0-1614037901000\"\n\n\ndef extractall(zip: zipfile.ZipFile, path: str) -> None:\n for name in zip.namelist():\n member = zip.getinfo(name)\n extracted_path = zip.extract(member, path)\n attr = member.external_attr >> 16\n if attr != 0:\n os.chmod(extracted_path, attr)\n\n\nclass PlaywrightBDistWheelCommand(BDistWheelCommand):\n def run(self) -> None:\n if os.path.exists(\"build\"):\n shutil.rmtree(\"build\")\n if os.path.exists(\"dist\"):\n shutil.rmtree(\"dist\")\n if os.path.exists(\"playwright.egg-info\"):\n shutil.rmtree(\"playwright.egg-info\")\n super().run()\n os.makedirs(\"driver\", exist_ok=True)\n os.makedirs(\"playwright/driver\", exist_ok=True)\n for platform in [\"mac\", \"linux\", \"win32\", \"win32_x64\"]:\n zip_file = f\"playwright-{driver_version}-{platform}.zip\"\n if not os.path.exists(\"driver/\" + zip_file):\n url = \"https://playwright.azureedge.net/builds/driver/\"\n url = url + \"next/\"\n url = url + zip_file\n print(\"Fetching \", url)\n subprocess.check_call(\n [\"curl\", \"--http1.1\", url, \"-o\", \"driver/\" + zip_file]\n )\n base_wheel_location = glob.glob(\"dist/*.whl\")[0]\n without_platform = base_wheel_location[:-7]\n platform_map = {\n \"darwin\": \"mac\",\n \"linux\": \"linux\",\n \"win32\": \"win32_x64\" if sys.maxsize > 2 ** 32 else \"win32\",\n }\n for platform in [\"mac\", \"linux\", \"win32\", \"win32_x64\"]:\n zip_file = f\"driver/playwright-{driver_version}-{platform}.zip\"\n with zipfile.ZipFile(zip_file, \"r\") as zip:\n extractall(zip, f\"driver/{platform}\")\n if platform_map[sys.platform] == platform:\n with zipfile.ZipFile(zip_file, \"r\") as zip:\n extractall(zip, \"playwright/driver\")\n wheel = \"\"\n if platform == \"mac\":\n wheel = \"macosx_10_13_x86_64.whl\"\n if platform == \"linux\":\n wheel = \"manylinux1_x86_64.whl\"\n if platform == \"win32\":\n wheel = \"win32.whl\"\n if platform == \"win32_x64\":\n wheel = \"win_amd64.whl\"\n wheel_location = without_platform + wheel\n shutil.copy(base_wheel_location, wheel_location)\n with zipfile.ZipFile(wheel_location, \"a\") as zip:\n driver_root = os.path.abspath(f\"driver/{platform}\")\n for dir_path, _, files in os.walk(driver_root):\n for file in files:\n from_path = os.path.join(dir_path, file)\n to_path = os.path.relpath(from_path, driver_root)\n zip.write(from_path, f\"playwright/driver/{to_path}\")\n if platform == \"mac\":\n # Ship mac both as 10_13 as and 11_0 universal to work across Macs.\n universal_location = without_platform + \"macosx_11_0_universal2.whl\"\n shutil.copyfile(wheel_location, universal_location)\n with zipfile.ZipFile(universal_location, \"a\") as zip:\n zip.writestr(\"playwright/driver/README.md\", \"Universal Mac package\")\n\n os.remove(base_wheel_location)\n for whlfile in glob.glob(\"dist/*.whl\"):\n\n os.makedirs(\"wheelhouse\", exist_ok=True)\n with InWheel(\n in_wheel=whlfile,\n out_wheel=os.path.join(\"wheelhouse\", os.path.basename(whlfile)),\n ret_self=True,\n ):\n print(\"Updating RECORD file of %s\" % whlfile)\n shutil.rmtree(\"dist\")\n print(\"Copying new wheels\")\n shutil.move(\"wheelhouse\", \"dist\")\n\n\nsetuptools.setup(\n name=\"playwright\",\n author=\"Microsoft Corporation\",\n author_email=\"\",\n description=\"A high-level API to automate web browsers\",\n long_description=Path(\"README.md\").read_text(encoding=\"utf-8\"),\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/Microsoft/playwright-python\",\n packages=[\"playwright\"],\n include_package_data=True,\n install_requires=[\n \"greenlet==1.0.0\",\n \"pyee>=8.0.1\",\n \"typing-extensions;python_version<='3.8'\",\n ],\n classifiers=[\n \"Topic :: Software Development :: Testing\",\n \"Topic :: Internet :: WWW/HTTP :: Browsers\",\n \"Intended Audience :: Developers\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: OS Independent\",\n ],\n python_requires=\">=3.7\",\n cmdclass={\"bdist_wheel\": PlaywrightBDistWheelCommand},\n use_scm_version={\n \"version_scheme\": \"post-release\",\n \"write_to\": \"playwright/_repo_version.py\",\n \"write_to_template\": 'version = \"{version}\"\\n',\n },\n setup_requires=[\"setuptools_scm\", \"wheel\"],\n entry_points={\n \"console_scripts\": [\n \"playwright=playwright.__main__:main\",\n ],\n },\n)\n", "path": "setup.py"}]}
| 2,000 | 283 |
gh_patches_debug_25931
|
rasdani/github-patches
|
git_diff
|
joke2k__faker-1103
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Implementation of person id number for cs_CZ - Czech (rodné číslo)
Can you implement randomizer which will generate a proper person ID number (rodné číslo) for Czech local?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `faker/providers/ssn/cs_CZ/__init__.py`
Content:
```
1 from .. import Provider as BaseProvider
2
3
4 class Provider(BaseProvider):
5 vat_id_formats = (
6 'CZ########',
7 'CZ#########',
8 'CZ##########',
9 )
10
11 def vat_id(self):
12 """
13 http://ec.europa.eu/taxation_customs/vies/faq.html#item_11
14 :return: A random Czech VAT ID
15 """
16
17 return self.bothify(self.random_element(self.vat_id_formats))
18
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/faker/providers/ssn/cs_CZ/__init__.py b/faker/providers/ssn/cs_CZ/__init__.py
--- a/faker/providers/ssn/cs_CZ/__init__.py
+++ b/faker/providers/ssn/cs_CZ/__init__.py
@@ -1,3 +1,5 @@
+from math import ceil
+
from .. import Provider as BaseProvider
@@ -8,6 +10,8 @@
'CZ##########',
)
+ national_id_months = ['%.2d' % i for i in range(1, 13)] + ['%.2d' % i for i in range(51, 63)]
+
def vat_id(self):
"""
http://ec.europa.eu/taxation_customs/vies/faq.html#item_11
@@ -15,3 +19,24 @@
"""
return self.bothify(self.random_element(self.vat_id_formats))
+
+ def birth_number(self):
+ """
+ Birth Number (Czech/Slovak: rodné číslo (RČ))
+ https://en.wikipedia.org/wiki/National_identification_number#Czech_Republic_and_Slovakia
+ """
+ birthdate = self.generator.date_of_birth()
+ year = '%.2d' % (birthdate.year % 100)
+ month = self.random_element(self.national_id_months)
+ day = '%.2d' % birthdate.day
+ if birthdate.year > 1953:
+ sn = self.random_number(4, True)
+ else:
+ sn = self.random_number(3, True)
+ number = int('{}{}{}{}'.format(year, month, day, sn))
+ birth_number = str(ceil(number / 11) * 11)
+ if year == '00':
+ birth_number = '00' + birth_number
+ elif year[0] == '0':
+ birth_number = '0' + birth_number
+ return '{}/{}'.format(birth_number[:6], birth_number[6::])
|
{"golden_diff": "diff --git a/faker/providers/ssn/cs_CZ/__init__.py b/faker/providers/ssn/cs_CZ/__init__.py\n--- a/faker/providers/ssn/cs_CZ/__init__.py\n+++ b/faker/providers/ssn/cs_CZ/__init__.py\n@@ -1,3 +1,5 @@\n+from math import ceil\n+\n from .. import Provider as BaseProvider\n \n \n@@ -8,6 +10,8 @@\n 'CZ##########',\n )\n \n+ national_id_months = ['%.2d' % i for i in range(1, 13)] + ['%.2d' % i for i in range(51, 63)]\n+\n def vat_id(self):\n \"\"\"\n http://ec.europa.eu/taxation_customs/vies/faq.html#item_11\n@@ -15,3 +19,24 @@\n \"\"\"\n \n return self.bothify(self.random_element(self.vat_id_formats))\n+\n+ def birth_number(self):\n+ \"\"\"\n+ Birth Number (Czech/Slovak: rodn\u00e9 \u010d\u00edslo (R\u010c))\n+ https://en.wikipedia.org/wiki/National_identification_number#Czech_Republic_and_Slovakia\n+ \"\"\"\n+ birthdate = self.generator.date_of_birth()\n+ year = '%.2d' % (birthdate.year % 100)\n+ month = self.random_element(self.national_id_months)\n+ day = '%.2d' % birthdate.day\n+ if birthdate.year > 1953:\n+ sn = self.random_number(4, True)\n+ else:\n+ sn = self.random_number(3, True)\n+ number = int('{}{}{}{}'.format(year, month, day, sn))\n+ birth_number = str(ceil(number / 11) * 11)\n+ if year == '00':\n+ birth_number = '00' + birth_number\n+ elif year[0] == '0':\n+ birth_number = '0' + birth_number\n+ return '{}/{}'.format(birth_number[:6], birth_number[6::])\n", "issue": "Implementation of person id number for cs_CZ - Czech (rodn\u00e9 \u010d\u00edslo)\nCan you implement randomizer which will generate a proper person ID number (rodn\u00e9 \u010d\u00edslo) for Czech local?\n", "before_files": [{"content": "from .. import Provider as BaseProvider\n\n\nclass Provider(BaseProvider):\n vat_id_formats = (\n 'CZ########',\n 'CZ#########',\n 'CZ##########',\n )\n\n def vat_id(self):\n \"\"\"\n http://ec.europa.eu/taxation_customs/vies/faq.html#item_11\n :return: A random Czech VAT ID\n \"\"\"\n\n return self.bothify(self.random_element(self.vat_id_formats))\n", "path": "faker/providers/ssn/cs_CZ/__init__.py"}], "after_files": [{"content": "from math import ceil\n\nfrom .. import Provider as BaseProvider\n\n\nclass Provider(BaseProvider):\n vat_id_formats = (\n 'CZ########',\n 'CZ#########',\n 'CZ##########',\n )\n\n national_id_months = ['%.2d' % i for i in range(1, 13)] + ['%.2d' % i for i in range(51, 63)]\n\n def vat_id(self):\n \"\"\"\n http://ec.europa.eu/taxation_customs/vies/faq.html#item_11\n :return: A random Czech VAT ID\n \"\"\"\n\n return self.bothify(self.random_element(self.vat_id_formats))\n\n def birth_number(self):\n \"\"\"\n Birth Number (Czech/Slovak: rodn\u00e9 \u010d\u00edslo (R\u010c))\n https://en.wikipedia.org/wiki/National_identification_number#Czech_Republic_and_Slovakia\n \"\"\"\n birthdate = self.generator.date_of_birth()\n year = '%.2d' % (birthdate.year % 100)\n month = self.random_element(self.national_id_months)\n day = '%.2d' % birthdate.day\n if birthdate.year > 1953:\n sn = self.random_number(4, True)\n else:\n sn = self.random_number(3, True)\n number = int('{}{}{}{}'.format(year, month, day, sn))\n birth_number = str(ceil(number / 11) * 11)\n if year == '00':\n birth_number = '00' + birth_number\n elif year[0] == '0':\n birth_number = '0' + birth_number\n return '{}/{}'.format(birth_number[:6], birth_number[6::])\n", "path": "faker/providers/ssn/cs_CZ/__init__.py"}]}
| 439 | 470 |
gh_patches_debug_40763
|
rasdani/github-patches
|
git_diff
|
instadeepai__Mava-575
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[TEST] Jax Parameter Server
### What do you want to test?
Jax parameter server components
### Outline of test structure
* Unit tests
* Test components and hooks
### Definition of done
Passing checks, cover all hooks, edge cases considered
### Mandatory checklist before making a PR
* [ ] The success criteria laid down in “Definition of done” are met.
* [ ] Test code is documented - docstrings for methods and classes, static types for arguments.
* [ ] Documentation is updated - README, CONTRIBUTING, or other documentation.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mava/components/jax/updating/parameter_server.py`
Content:
```
1 # python3
2 # Copyright 2021 InstaDeep Ltd. All rights reserved.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 """Parameter server Component for Mava systems."""
17 import abc
18 import time
19 from dataclasses import dataclass
20 from typing import Callable, Optional
21
22 import numpy as np
23 from acme.jax import savers
24
25 from mava.components.jax.component import Component
26 from mava.core_jax import SystemParameterServer
27
28
29 @dataclass
30 class ParameterServerConfig:
31 checkpoint: bool = True
32 checkpoint_subpath: str = "~/mava/"
33 checkpoint_minute_interval: int = 5
34 non_blocking_sleep_seconds: int = 10
35
36
37 class ParameterServer(Component):
38 @abc.abstractmethod
39 def __init__(
40 self,
41 config: ParameterServerConfig = ParameterServerConfig(),
42 ) -> None:
43 """Mock system Component."""
44 self.config = config
45
46 @abc.abstractmethod
47 def on_parameter_server_init_start(self, server: SystemParameterServer) -> None:
48 """_summary_
49
50 Args:
51 server : _description_
52 """
53 pass
54
55 # Get
56 @abc.abstractmethod
57 def on_parameter_server_get_parameters(self, server: SystemParameterServer) -> None:
58 """_summary_"""
59 pass
60
61 # Set
62 @abc.abstractmethod
63 def on_parameter_server_set_parameters(self, server: SystemParameterServer) -> None:
64 """_summary_"""
65 pass
66
67 # Add
68 @abc.abstractmethod
69 def on_parameter_server_add_to_parameters(
70 self, server: SystemParameterServer
71 ) -> None:
72 """_summary_"""
73 pass
74
75 # Save variables using checkpointer
76 @abc.abstractmethod
77 def on_parameter_server_run_loop(self, server: SystemParameterServer) -> None:
78 """_summary_
79
80 Args:
81 server : _description_
82 """
83 pass
84
85 @staticmethod
86 def name() -> str:
87 """Component type name, e.g. 'dataset' or 'executor'."""
88 return "parameter_server"
89
90 @staticmethod
91 def config_class() -> Optional[Callable]:
92 """Config class used for Component.
93
94 Returns:
95 config class/dataclass for Component.
96 """
97 return ParameterServerConfig
98
99
100 class DefaultParameterServer(ParameterServer):
101 def __init__(
102 self,
103 config: ParameterServerConfig = ParameterServerConfig(),
104 ) -> None:
105 """Mock system Component."""
106 self.config = config
107
108 def on_parameter_server_init_start(self, server: SystemParameterServer) -> None:
109 """_summary_
110
111 Args:
112 server : _description_
113 """
114 networks = server.store.network_factory()
115
116 # # Create parameters
117 server.store.parameters = {
118 "trainer_steps": np.zeros(1, dtype=np.int32),
119 "trainer_walltime": np.zeros(1, dtype=np.float32),
120 "evaluator_steps": np.zeros(1, dtype=np.int32),
121 "evaluator_episodes": np.zeros(1, dtype=np.int32),
122 "executor_episodes": np.zeros(1, dtype=np.int32),
123 "executor_steps": np.zeros(1, dtype=np.int32),
124 }
125
126 # Network parameters
127 for net_type_key in networks.keys():
128 for agent_net_key in networks[net_type_key].keys():
129 # Ensure obs and target networks are sonnet modules
130 server.store.parameters[f"{net_type_key}-{agent_net_key}"] = networks[
131 net_type_key
132 ][agent_net_key].params
133
134 # Create the checkpointer
135 if self.config.checkpoint:
136 server.store.last_checkpoint_time = 0
137 server.store.checkpoint_minute_interval = (
138 self.config.checkpoint_minute_interval
139 )
140
141 # Only save variables that are not empty.
142 save_variables = {}
143 for key in server.store.parameters.keys():
144 var = server.store.parameters[key]
145 # Don't store empty tuple (e.g. empty observation_network) variables
146 if not (type(var) == tuple and len(var) == 0):
147 save_variables[key] = var
148 server.store.system_checkpointer = savers.Checkpointer(
149 save_variables, self.config.checkpoint_subpath, time_delta_minutes=0
150 )
151
152 # Get
153 def on_parameter_server_get_parameters(self, server: SystemParameterServer) -> None:
154 """_summary_"""
155 names = server.store._param_names
156
157 if type(names) == str:
158 get_params = server.store.parameters[names] # type: ignore
159 else:
160 get_params = {}
161 for var_key in names:
162 get_params[var_key] = server.store.parameters[var_key]
163 server.store.get_parameters = get_params
164
165 # Set
166 def on_parameter_server_set_parameters(self, server: SystemParameterServer) -> None:
167 """_summary_"""
168 params = server.store._set_params
169 names = params.keys()
170
171 if type(names) == str:
172 params = {names: params} # type: ignore
173 names = [names] # type: ignore
174
175 for var_key in names:
176 assert var_key in server.store.parameters
177 if type(server.store.parameters[var_key]) == tuple:
178 raise NotImplementedError
179 # # Loop through tuple
180 # for var_i in range(len(server.store.parameters[var_key])):
181 # server.store.parameters[var_key][var_i].assign(params[var_key][var_i])
182 else:
183 server.store.parameters[var_key] = params[var_key]
184
185 # Add
186 def on_parameter_server_add_to_parameters(
187 self, server: SystemParameterServer
188 ) -> None:
189 """_summary_"""
190 params = server.store._add_to_params
191 names = params.keys()
192
193 if type(names) == str:
194 params = {names: params} # type: ignore
195 names = [names] # type: ignore
196
197 for var_key in names:
198 assert var_key in server.store.parameters
199 server.store.parameters[var_key] += params[var_key]
200
201 # Save variables using checkpointer
202 def on_parameter_server_run_loop(self, server: SystemParameterServer) -> None:
203 """_summary_
204
205 Args:
206 server : _description_
207 """
208 if (
209 server.store.system_checkpointer
210 and server.store.last_checkpoint_time
211 + server.store.checkpoint_minute_interval * 60
212 + 1
213 < time.time()
214 ):
215 server.store.system_checkpointer.save()
216 server.store.last_checkpoint_time = time.time()
217 print("Updated variables checkpoint.")
218
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mava/components/jax/updating/parameter_server.py b/mava/components/jax/updating/parameter_server.py
--- a/mava/components/jax/updating/parameter_server.py
+++ b/mava/components/jax/updating/parameter_server.py
@@ -17,7 +17,7 @@
import abc
import time
from dataclasses import dataclass
-from typing import Callable, Optional
+from typing import Any, Callable, Dict, Optional, Sequence, Union
import numpy as np
from acme.jax import savers
@@ -134,9 +134,6 @@
# Create the checkpointer
if self.config.checkpoint:
server.store.last_checkpoint_time = 0
- server.store.checkpoint_minute_interval = (
- self.config.checkpoint_minute_interval
- )
# Only save variables that are not empty.
save_variables = {}
@@ -152,7 +149,7 @@
# Get
def on_parameter_server_get_parameters(self, server: SystemParameterServer) -> None:
"""_summary_"""
- names = server.store._param_names
+ names: Union[str, Sequence[str]] = server.store._param_names
if type(names) == str:
get_params = server.store.parameters[names] # type: ignore
@@ -165,13 +162,9 @@
# Set
def on_parameter_server_set_parameters(self, server: SystemParameterServer) -> None:
"""_summary_"""
- params = server.store._set_params
+ params: Dict[str, Any] = server.store._set_params
names = params.keys()
- if type(names) == str:
- params = {names: params} # type: ignore
- names = [names] # type: ignore
-
for var_key in names:
assert var_key in server.store.parameters
if type(server.store.parameters[var_key]) == tuple:
@@ -187,13 +180,9 @@
self, server: SystemParameterServer
) -> None:
"""_summary_"""
- params = server.store._add_to_params
+ params: Dict[str, Any] = server.store._add_to_params
names = params.keys()
- if type(names) == str:
- params = {names: params} # type: ignore
- names = [names] # type: ignore
-
for var_key in names:
assert var_key in server.store.parameters
server.store.parameters[var_key] += params[var_key]
@@ -206,9 +195,9 @@
server : _description_
"""
if (
- server.store.system_checkpointer
+ self.config.checkpoint
and server.store.last_checkpoint_time
- + server.store.checkpoint_minute_interval * 60
+ + self.config.checkpoint_minute_interval * 60
+ 1
< time.time()
):
|
{"golden_diff": "diff --git a/mava/components/jax/updating/parameter_server.py b/mava/components/jax/updating/parameter_server.py\n--- a/mava/components/jax/updating/parameter_server.py\n+++ b/mava/components/jax/updating/parameter_server.py\n@@ -17,7 +17,7 @@\n import abc\n import time\n from dataclasses import dataclass\n-from typing import Callable, Optional\n+from typing import Any, Callable, Dict, Optional, Sequence, Union\n \n import numpy as np\n from acme.jax import savers\n@@ -134,9 +134,6 @@\n # Create the checkpointer\n if self.config.checkpoint:\n server.store.last_checkpoint_time = 0\n- server.store.checkpoint_minute_interval = (\n- self.config.checkpoint_minute_interval\n- )\n \n # Only save variables that are not empty.\n save_variables = {}\n@@ -152,7 +149,7 @@\n # Get\n def on_parameter_server_get_parameters(self, server: SystemParameterServer) -> None:\n \"\"\"_summary_\"\"\"\n- names = server.store._param_names\n+ names: Union[str, Sequence[str]] = server.store._param_names\n \n if type(names) == str:\n get_params = server.store.parameters[names] # type: ignore\n@@ -165,13 +162,9 @@\n # Set\n def on_parameter_server_set_parameters(self, server: SystemParameterServer) -> None:\n \"\"\"_summary_\"\"\"\n- params = server.store._set_params\n+ params: Dict[str, Any] = server.store._set_params\n names = params.keys()\n \n- if type(names) == str:\n- params = {names: params} # type: ignore\n- names = [names] # type: ignore\n-\n for var_key in names:\n assert var_key in server.store.parameters\n if type(server.store.parameters[var_key]) == tuple:\n@@ -187,13 +180,9 @@\n self, server: SystemParameterServer\n ) -> None:\n \"\"\"_summary_\"\"\"\n- params = server.store._add_to_params\n+ params: Dict[str, Any] = server.store._add_to_params\n names = params.keys()\n \n- if type(names) == str:\n- params = {names: params} # type: ignore\n- names = [names] # type: ignore\n-\n for var_key in names:\n assert var_key in server.store.parameters\n server.store.parameters[var_key] += params[var_key]\n@@ -206,9 +195,9 @@\n server : _description_\n \"\"\"\n if (\n- server.store.system_checkpointer\n+ self.config.checkpoint\n and server.store.last_checkpoint_time\n- + server.store.checkpoint_minute_interval * 60\n+ + self.config.checkpoint_minute_interval * 60\n + 1\n < time.time()\n ):\n", "issue": "[TEST] Jax Parameter Server\n### What do you want to test?\r\nJax parameter server components\r\n\r\n### Outline of test structure\r\n* Unit tests\r\n* Test components and hooks\r\n\r\n### Definition of done\r\nPassing checks, cover all hooks, edge cases considered\r\n\r\n### Mandatory checklist before making a PR\r\n* [ ] The success criteria laid down in \u201cDefinition of done\u201d are met.\r\n* [ ] Test code is documented - docstrings for methods and classes, static types for arguments.\r\n* [ ] Documentation is updated - README, CONTRIBUTING, or other documentation.\n", "before_files": [{"content": "# python3\n# Copyright 2021 InstaDeep Ltd. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Parameter server Component for Mava systems.\"\"\"\nimport abc\nimport time\nfrom dataclasses import dataclass\nfrom typing import Callable, Optional\n\nimport numpy as np\nfrom acme.jax import savers\n\nfrom mava.components.jax.component import Component\nfrom mava.core_jax import SystemParameterServer\n\n\n@dataclass\nclass ParameterServerConfig:\n checkpoint: bool = True\n checkpoint_subpath: str = \"~/mava/\"\n checkpoint_minute_interval: int = 5\n non_blocking_sleep_seconds: int = 10\n\n\nclass ParameterServer(Component):\n @abc.abstractmethod\n def __init__(\n self,\n config: ParameterServerConfig = ParameterServerConfig(),\n ) -> None:\n \"\"\"Mock system Component.\"\"\"\n self.config = config\n\n @abc.abstractmethod\n def on_parameter_server_init_start(self, server: SystemParameterServer) -> None:\n \"\"\"_summary_\n\n Args:\n server : _description_\n \"\"\"\n pass\n\n # Get\n @abc.abstractmethod\n def on_parameter_server_get_parameters(self, server: SystemParameterServer) -> None:\n \"\"\"_summary_\"\"\"\n pass\n\n # Set\n @abc.abstractmethod\n def on_parameter_server_set_parameters(self, server: SystemParameterServer) -> None:\n \"\"\"_summary_\"\"\"\n pass\n\n # Add\n @abc.abstractmethod\n def on_parameter_server_add_to_parameters(\n self, server: SystemParameterServer\n ) -> None:\n \"\"\"_summary_\"\"\"\n pass\n\n # Save variables using checkpointer\n @abc.abstractmethod\n def on_parameter_server_run_loop(self, server: SystemParameterServer) -> None:\n \"\"\"_summary_\n\n Args:\n server : _description_\n \"\"\"\n pass\n\n @staticmethod\n def name() -> str:\n \"\"\"Component type name, e.g. 'dataset' or 'executor'.\"\"\"\n return \"parameter_server\"\n\n @staticmethod\n def config_class() -> Optional[Callable]:\n \"\"\"Config class used for Component.\n\n Returns:\n config class/dataclass for Component.\n \"\"\"\n return ParameterServerConfig\n\n\nclass DefaultParameterServer(ParameterServer):\n def __init__(\n self,\n config: ParameterServerConfig = ParameterServerConfig(),\n ) -> None:\n \"\"\"Mock system Component.\"\"\"\n self.config = config\n\n def on_parameter_server_init_start(self, server: SystemParameterServer) -> None:\n \"\"\"_summary_\n\n Args:\n server : _description_\n \"\"\"\n networks = server.store.network_factory()\n\n # # Create parameters\n server.store.parameters = {\n \"trainer_steps\": np.zeros(1, dtype=np.int32),\n \"trainer_walltime\": np.zeros(1, dtype=np.float32),\n \"evaluator_steps\": np.zeros(1, dtype=np.int32),\n \"evaluator_episodes\": np.zeros(1, dtype=np.int32),\n \"executor_episodes\": np.zeros(1, dtype=np.int32),\n \"executor_steps\": np.zeros(1, dtype=np.int32),\n }\n\n # Network parameters\n for net_type_key in networks.keys():\n for agent_net_key in networks[net_type_key].keys():\n # Ensure obs and target networks are sonnet modules\n server.store.parameters[f\"{net_type_key}-{agent_net_key}\"] = networks[\n net_type_key\n ][agent_net_key].params\n\n # Create the checkpointer\n if self.config.checkpoint:\n server.store.last_checkpoint_time = 0\n server.store.checkpoint_minute_interval = (\n self.config.checkpoint_minute_interval\n )\n\n # Only save variables that are not empty.\n save_variables = {}\n for key in server.store.parameters.keys():\n var = server.store.parameters[key]\n # Don't store empty tuple (e.g. empty observation_network) variables\n if not (type(var) == tuple and len(var) == 0):\n save_variables[key] = var\n server.store.system_checkpointer = savers.Checkpointer(\n save_variables, self.config.checkpoint_subpath, time_delta_minutes=0\n )\n\n # Get\n def on_parameter_server_get_parameters(self, server: SystemParameterServer) -> None:\n \"\"\"_summary_\"\"\"\n names = server.store._param_names\n\n if type(names) == str:\n get_params = server.store.parameters[names] # type: ignore\n else:\n get_params = {}\n for var_key in names:\n get_params[var_key] = server.store.parameters[var_key]\n server.store.get_parameters = get_params\n\n # Set\n def on_parameter_server_set_parameters(self, server: SystemParameterServer) -> None:\n \"\"\"_summary_\"\"\"\n params = server.store._set_params\n names = params.keys()\n\n if type(names) == str:\n params = {names: params} # type: ignore\n names = [names] # type: ignore\n\n for var_key in names:\n assert var_key in server.store.parameters\n if type(server.store.parameters[var_key]) == tuple:\n raise NotImplementedError\n # # Loop through tuple\n # for var_i in range(len(server.store.parameters[var_key])):\n # server.store.parameters[var_key][var_i].assign(params[var_key][var_i])\n else:\n server.store.parameters[var_key] = params[var_key]\n\n # Add\n def on_parameter_server_add_to_parameters(\n self, server: SystemParameterServer\n ) -> None:\n \"\"\"_summary_\"\"\"\n params = server.store._add_to_params\n names = params.keys()\n\n if type(names) == str:\n params = {names: params} # type: ignore\n names = [names] # type: ignore\n\n for var_key in names:\n assert var_key in server.store.parameters\n server.store.parameters[var_key] += params[var_key]\n\n # Save variables using checkpointer\n def on_parameter_server_run_loop(self, server: SystemParameterServer) -> None:\n \"\"\"_summary_\n\n Args:\n server : _description_\n \"\"\"\n if (\n server.store.system_checkpointer\n and server.store.last_checkpoint_time\n + server.store.checkpoint_minute_interval * 60\n + 1\n < time.time()\n ):\n server.store.system_checkpointer.save()\n server.store.last_checkpoint_time = time.time()\n print(\"Updated variables checkpoint.\")\n", "path": "mava/components/jax/updating/parameter_server.py"}], "after_files": [{"content": "# python3\n# Copyright 2021 InstaDeep Ltd. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Parameter server Component for Mava systems.\"\"\"\nimport abc\nimport time\nfrom dataclasses import dataclass\nfrom typing import Any, Callable, Dict, Optional, Sequence, Union\n\nimport numpy as np\nfrom acme.jax import savers\n\nfrom mava.components.jax.component import Component\nfrom mava.core_jax import SystemParameterServer\n\n\n@dataclass\nclass ParameterServerConfig:\n checkpoint: bool = True\n checkpoint_subpath: str = \"~/mava/\"\n checkpoint_minute_interval: int = 5\n non_blocking_sleep_seconds: int = 10\n\n\nclass ParameterServer(Component):\n @abc.abstractmethod\n def __init__(\n self,\n config: ParameterServerConfig = ParameterServerConfig(),\n ) -> None:\n \"\"\"Mock system Component.\"\"\"\n self.config = config\n\n @abc.abstractmethod\n def on_parameter_server_init_start(self, server: SystemParameterServer) -> None:\n \"\"\"_summary_\n\n Args:\n server : _description_\n \"\"\"\n pass\n\n # Get\n @abc.abstractmethod\n def on_parameter_server_get_parameters(self, server: SystemParameterServer) -> None:\n \"\"\"_summary_\"\"\"\n pass\n\n # Set\n @abc.abstractmethod\n def on_parameter_server_set_parameters(self, server: SystemParameterServer) -> None:\n \"\"\"_summary_\"\"\"\n pass\n\n # Add\n @abc.abstractmethod\n def on_parameter_server_add_to_parameters(\n self, server: SystemParameterServer\n ) -> None:\n \"\"\"_summary_\"\"\"\n pass\n\n # Save variables using checkpointer\n @abc.abstractmethod\n def on_parameter_server_run_loop(self, server: SystemParameterServer) -> None:\n \"\"\"_summary_\n\n Args:\n server : _description_\n \"\"\"\n pass\n\n @staticmethod\n def name() -> str:\n \"\"\"Component type name, e.g. 'dataset' or 'executor'.\"\"\"\n return \"parameter_server\"\n\n @staticmethod\n def config_class() -> Optional[Callable]:\n \"\"\"Config class used for Component.\n\n Returns:\n config class/dataclass for Component.\n \"\"\"\n return ParameterServerConfig\n\n\nclass DefaultParameterServer(ParameterServer):\n def __init__(\n self,\n config: ParameterServerConfig = ParameterServerConfig(),\n ) -> None:\n \"\"\"Mock system Component.\"\"\"\n self.config = config\n\n def on_parameter_server_init_start(self, server: SystemParameterServer) -> None:\n \"\"\"_summary_\n\n Args:\n server : _description_\n \"\"\"\n networks = server.store.network_factory()\n\n # # Create parameters\n server.store.parameters = {\n \"trainer_steps\": np.zeros(1, dtype=np.int32),\n \"trainer_walltime\": np.zeros(1, dtype=np.float32),\n \"evaluator_steps\": np.zeros(1, dtype=np.int32),\n \"evaluator_episodes\": np.zeros(1, dtype=np.int32),\n \"executor_episodes\": np.zeros(1, dtype=np.int32),\n \"executor_steps\": np.zeros(1, dtype=np.int32),\n }\n\n # Network parameters\n for net_type_key in networks.keys():\n for agent_net_key in networks[net_type_key].keys():\n # Ensure obs and target networks are sonnet modules\n server.store.parameters[f\"{net_type_key}-{agent_net_key}\"] = networks[\n net_type_key\n ][agent_net_key].params\n\n # Create the checkpointer\n if self.config.checkpoint:\n server.store.last_checkpoint_time = 0\n\n # Only save variables that are not empty.\n save_variables = {}\n for key in server.store.parameters.keys():\n var = server.store.parameters[key]\n # Don't store empty tuple (e.g. empty observation_network) variables\n if not (type(var) == tuple and len(var) == 0):\n save_variables[key] = var\n server.store.system_checkpointer = savers.Checkpointer(\n save_variables, self.config.checkpoint_subpath, time_delta_minutes=0\n )\n\n # Get\n def on_parameter_server_get_parameters(self, server: SystemParameterServer) -> None:\n \"\"\"_summary_\"\"\"\n names: Union[str, Sequence[str]] = server.store._param_names\n\n if type(names) == str:\n get_params = server.store.parameters[names] # type: ignore\n else:\n get_params = {}\n for var_key in names:\n get_params[var_key] = server.store.parameters[var_key]\n server.store.get_parameters = get_params\n\n # Set\n def on_parameter_server_set_parameters(self, server: SystemParameterServer) -> None:\n \"\"\"_summary_\"\"\"\n params: Dict[str, Any] = server.store._set_params\n names = params.keys()\n\n for var_key in names:\n assert var_key in server.store.parameters\n if type(server.store.parameters[var_key]) == tuple:\n raise NotImplementedError\n # # Loop through tuple\n # for var_i in range(len(server.store.parameters[var_key])):\n # server.store.parameters[var_key][var_i].assign(params[var_key][var_i])\n else:\n server.store.parameters[var_key] = params[var_key]\n\n # Add\n def on_parameter_server_add_to_parameters(\n self, server: SystemParameterServer\n ) -> None:\n \"\"\"_summary_\"\"\"\n params: Dict[str, Any] = server.store._add_to_params\n names = params.keys()\n\n for var_key in names:\n assert var_key in server.store.parameters\n server.store.parameters[var_key] += params[var_key]\n\n # Save variables using checkpointer\n def on_parameter_server_run_loop(self, server: SystemParameterServer) -> None:\n \"\"\"_summary_\n\n Args:\n server : _description_\n \"\"\"\n if (\n self.config.checkpoint\n and server.store.last_checkpoint_time\n + self.config.checkpoint_minute_interval * 60\n + 1\n < time.time()\n ):\n server.store.system_checkpointer.save()\n server.store.last_checkpoint_time = time.time()\n print(\"Updated variables checkpoint.\")\n", "path": "mava/components/jax/updating/parameter_server.py"}]}
| 2,448 | 653 |
gh_patches_debug_17777
|
rasdani/github-patches
|
git_diff
|
googleapis__google-cloud-python-5424
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Monitoring alias package is missing new service clients
https://github.com/GoogleCloudPlatform/google-cloud-python/blob/master/monitoring/google/cloud/monitoring.py is missing the new clients added to https://github.com/GoogleCloudPlatform/google-cloud-python/blob/master/monitoring/google/cloud/monitoring_v3/__init__.py
Should be a relatively easy fix.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `monitoring/google/cloud/monitoring.py`
Content:
```
1 # Copyright 2017, Google LLC All rights reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from __future__ import absolute_import
16
17 from google.cloud.monitoring_v3.query import Query
18 from google.cloud.monitoring_v3 import GroupServiceClient
19 from google.cloud.monitoring_v3 import MetricServiceClient
20 from google.cloud.monitoring_v3 import enums
21 from google.cloud.monitoring_v3 import types
22
23 __all__ = (
24 'enums',
25 'types',
26 'GroupServiceClient',
27 'Query',
28 'MetricServiceClient', )
29
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/monitoring/google/cloud/monitoring.py b/monitoring/google/cloud/monitoring.py
--- a/monitoring/google/cloud/monitoring.py
+++ b/monitoring/google/cloud/monitoring.py
@@ -15,14 +15,21 @@
from __future__ import absolute_import
from google.cloud.monitoring_v3.query import Query
+from google.cloud.monitoring_v3 import AlertPolicyServiceClient
from google.cloud.monitoring_v3 import GroupServiceClient
from google.cloud.monitoring_v3 import MetricServiceClient
+from google.cloud.monitoring_v3 import NotificationChannelServiceClient
+from google.cloud.monitoring_v3 import UptimeCheckServiceClient
from google.cloud.monitoring_v3 import enums
from google.cloud.monitoring_v3 import types
__all__ = (
'enums',
'types',
+ 'AlertPolicyServiceClient',
'GroupServiceClient',
+ 'MetricServiceClient',
+ 'NotificationChannelServiceClient',
+ 'UptimeCheckServiceClient',
'Query',
- 'MetricServiceClient', )
+)
|
{"golden_diff": "diff --git a/monitoring/google/cloud/monitoring.py b/monitoring/google/cloud/monitoring.py\n--- a/monitoring/google/cloud/monitoring.py\n+++ b/monitoring/google/cloud/monitoring.py\n@@ -15,14 +15,21 @@\n from __future__ import absolute_import\n \n from google.cloud.monitoring_v3.query import Query\n+from google.cloud.monitoring_v3 import AlertPolicyServiceClient\n from google.cloud.monitoring_v3 import GroupServiceClient\n from google.cloud.monitoring_v3 import MetricServiceClient\n+from google.cloud.monitoring_v3 import NotificationChannelServiceClient\n+from google.cloud.monitoring_v3 import UptimeCheckServiceClient\n from google.cloud.monitoring_v3 import enums\n from google.cloud.monitoring_v3 import types\n \n __all__ = (\n 'enums',\n 'types',\n+ 'AlertPolicyServiceClient',\n 'GroupServiceClient',\n+ 'MetricServiceClient',\n+ 'NotificationChannelServiceClient',\n+ 'UptimeCheckServiceClient',\n 'Query',\n- 'MetricServiceClient', )\n+)\n", "issue": "Monitoring alias package is missing new service clients\nhttps://github.com/GoogleCloudPlatform/google-cloud-python/blob/master/monitoring/google/cloud/monitoring.py is missing the new clients added to https://github.com/GoogleCloudPlatform/google-cloud-python/blob/master/monitoring/google/cloud/monitoring_v3/__init__.py\r\n\r\nShould be a relatively easy fix.\n", "before_files": [{"content": "# Copyright 2017, Google LLC All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\n\nfrom google.cloud.monitoring_v3.query import Query\nfrom google.cloud.monitoring_v3 import GroupServiceClient\nfrom google.cloud.monitoring_v3 import MetricServiceClient\nfrom google.cloud.monitoring_v3 import enums\nfrom google.cloud.monitoring_v3 import types\n\n__all__ = (\n 'enums',\n 'types',\n 'GroupServiceClient',\n 'Query',\n 'MetricServiceClient', )\n", "path": "monitoring/google/cloud/monitoring.py"}], "after_files": [{"content": "# Copyright 2017, Google LLC All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\n\nfrom google.cloud.monitoring_v3.query import Query\nfrom google.cloud.monitoring_v3 import AlertPolicyServiceClient\nfrom google.cloud.monitoring_v3 import GroupServiceClient\nfrom google.cloud.monitoring_v3 import MetricServiceClient\nfrom google.cloud.monitoring_v3 import NotificationChannelServiceClient\nfrom google.cloud.monitoring_v3 import UptimeCheckServiceClient\nfrom google.cloud.monitoring_v3 import enums\nfrom google.cloud.monitoring_v3 import types\n\n__all__ = (\n 'enums',\n 'types',\n 'AlertPolicyServiceClient',\n 'GroupServiceClient',\n 'MetricServiceClient',\n 'NotificationChannelServiceClient',\n 'UptimeCheckServiceClient',\n 'Query',\n)\n", "path": "monitoring/google/cloud/monitoring.py"}]}
| 614 | 233 |
gh_patches_debug_35214
|
rasdani/github-patches
|
git_diff
|
conan-io__conan-center-index-6951
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[request] stb/20210818
### Package Details
* Package Name/Version: **stb/20210818**
There has been +1800 commits added to stb since Feb 2 of 2020, I greatly suggest updating it.
The above mentioned version is newly released by the upstream project and not yet available as a recipe. Please add this version.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `recipes/stb/all/conanfile.py`
Content:
```
1 from conans import ConanFile, tools
2 import os
3
4 class StbConan(ConanFile):
5 name = "stb"
6 description = "single-file public domain libraries for C/C++"
7 topics = ("conan", "stb", "single-file")
8 url = "https://github.com/conan-io/conan-center-index"
9 homepage = "https://github.com/nothings/stb"
10 license = ("Unlicense", "MIT")
11 no_copy_source = True
12 _source_subfolder = "source_subfolder"
13
14 def source(self):
15 commit = os.path.splitext(os.path.basename(self.conan_data["sources"][self.version]["url"]))[0]
16 tools.get(**self.conan_data["sources"][self.version])
17 extracted_dir = self.name + "-" + commit
18 os.rename(extracted_dir, self._source_subfolder)
19
20 def package(self):
21 self.copy("LICENSE", src=self._source_subfolder, dst="licenses")
22 self.copy("*.h", src=self._source_subfolder, dst="include")
23 self.copy("stb_vorbis.c", src=self._source_subfolder, dst="include")
24 tools.rmdir(os.path.join(self.package_folder, "include", "tests"))
25
26 def package_id(self):
27 self.info.header_only()
28
29 def package_info(self):
30 self.cpp_info.defines.append('STB_TEXTEDIT_KEYTYPE=unsigned')
31
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/recipes/stb/all/conanfile.py b/recipes/stb/all/conanfile.py
--- a/recipes/stb/all/conanfile.py
+++ b/recipes/stb/all/conanfile.py
@@ -1,27 +1,53 @@
from conans import ConanFile, tools
import os
+required_conan_version = ">=1.33.0"
+
+
class StbConan(ConanFile):
name = "stb"
description = "single-file public domain libraries for C/C++"
- topics = ("conan", "stb", "single-file")
+ topics = ("stb", "single-file")
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://github.com/nothings/stb"
license = ("Unlicense", "MIT")
no_copy_source = True
- _source_subfolder = "source_subfolder"
+
+ options = {
+ "with_deprecated": [True, False]
+ }
+
+ default_options = {
+ "with_deprecated": True
+ }
+
+ @property
+ def _source_subfolder(self):
+ return "source_subfolder"
+
+ @property
+ def _version(self):
+ # HACK: Used to circumvent the incompatibility
+ # of the format cci.YYYYMMDD in tools.Version
+ return str(self.version)[4:]
+
+ def config_options(self):
+ if tools.Version(self._version) < "20210713":
+ del self.options.with_deprecated
def source(self):
- commit = os.path.splitext(os.path.basename(self.conan_data["sources"][self.version]["url"]))[0]
- tools.get(**self.conan_data["sources"][self.version])
- extracted_dir = self.name + "-" + commit
- os.rename(extracted_dir, self._source_subfolder)
+ tools.get(**self.conan_data["sources"][self.version], strip_root=True, destination=self._source_subfolder)
def package(self):
self.copy("LICENSE", src=self._source_subfolder, dst="licenses")
self.copy("*.h", src=self._source_subfolder, dst="include")
self.copy("stb_vorbis.c", src=self._source_subfolder, dst="include")
tools.rmdir(os.path.join(self.package_folder, "include", "tests"))
+ if tools.Version(self._version) >= "20210713":
+ tools.rmdir(os.path.join(self.package_folder, "include", "deprecated"))
+ if self.options.get_safe("with_deprecated", False):
+ self.copy("*.h", src=os.path.join(self._source_subfolder, "deprecated"), dst="include")
+ self.copy("stb_image.c", src=os.path.join(self._source_subfolder, "deprecated"), dst="include")
def package_id(self):
self.info.header_only()
|
{"golden_diff": "diff --git a/recipes/stb/all/conanfile.py b/recipes/stb/all/conanfile.py\n--- a/recipes/stb/all/conanfile.py\n+++ b/recipes/stb/all/conanfile.py\n@@ -1,27 +1,53 @@\n from conans import ConanFile, tools\n import os\n \n+required_conan_version = \">=1.33.0\"\n+\n+\n class StbConan(ConanFile):\n name = \"stb\"\n description = \"single-file public domain libraries for C/C++\"\n- topics = (\"conan\", \"stb\", \"single-file\")\n+ topics = (\"stb\", \"single-file\")\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://github.com/nothings/stb\"\n license = (\"Unlicense\", \"MIT\")\n no_copy_source = True\n- _source_subfolder = \"source_subfolder\"\n+\n+ options = {\n+ \"with_deprecated\": [True, False]\n+ }\n+\n+ default_options = {\n+ \"with_deprecated\": True\n+ }\n+\n+ @property\n+ def _source_subfolder(self):\n+ return \"source_subfolder\"\n+\n+ @property\n+ def _version(self):\n+ # HACK: Used to circumvent the incompatibility\n+ # of the format cci.YYYYMMDD in tools.Version\n+ return str(self.version)[4:]\n+\n+ def config_options(self):\n+ if tools.Version(self._version) < \"20210713\":\n+ del self.options.with_deprecated\n \n def source(self):\n- commit = os.path.splitext(os.path.basename(self.conan_data[\"sources\"][self.version][\"url\"]))[0]\n- tools.get(**self.conan_data[\"sources\"][self.version])\n- extracted_dir = self.name + \"-\" + commit\n- os.rename(extracted_dir, self._source_subfolder)\n+ tools.get(**self.conan_data[\"sources\"][self.version], strip_root=True, destination=self._source_subfolder)\n \n def package(self):\n self.copy(\"LICENSE\", src=self._source_subfolder, dst=\"licenses\")\n self.copy(\"*.h\", src=self._source_subfolder, dst=\"include\")\n self.copy(\"stb_vorbis.c\", src=self._source_subfolder, dst=\"include\")\n tools.rmdir(os.path.join(self.package_folder, \"include\", \"tests\"))\n+ if tools.Version(self._version) >= \"20210713\":\n+ tools.rmdir(os.path.join(self.package_folder, \"include\", \"deprecated\"))\n+ if self.options.get_safe(\"with_deprecated\", False):\n+ self.copy(\"*.h\", src=os.path.join(self._source_subfolder, \"deprecated\"), dst=\"include\")\n+ self.copy(\"stb_image.c\", src=os.path.join(self._source_subfolder, \"deprecated\"), dst=\"include\")\n \n def package_id(self):\n self.info.header_only()\n", "issue": "[request] stb/20210818\n### Package Details\r\n * Package Name/Version: **stb/20210818**\r\n\r\nThere has been +1800 commits added to stb since Feb 2 of 2020, I greatly suggest updating it.\r\n\r\nThe above mentioned version is newly released by the upstream project and not yet available as a recipe. Please add this version.\r\n\n", "before_files": [{"content": "from conans import ConanFile, tools\nimport os\n\nclass StbConan(ConanFile):\n name = \"stb\"\n description = \"single-file public domain libraries for C/C++\"\n topics = (\"conan\", \"stb\", \"single-file\")\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://github.com/nothings/stb\"\n license = (\"Unlicense\", \"MIT\")\n no_copy_source = True\n _source_subfolder = \"source_subfolder\"\n\n def source(self):\n commit = os.path.splitext(os.path.basename(self.conan_data[\"sources\"][self.version][\"url\"]))[0]\n tools.get(**self.conan_data[\"sources\"][self.version])\n extracted_dir = self.name + \"-\" + commit\n os.rename(extracted_dir, self._source_subfolder)\n\n def package(self):\n self.copy(\"LICENSE\", src=self._source_subfolder, dst=\"licenses\")\n self.copy(\"*.h\", src=self._source_subfolder, dst=\"include\")\n self.copy(\"stb_vorbis.c\", src=self._source_subfolder, dst=\"include\")\n tools.rmdir(os.path.join(self.package_folder, \"include\", \"tests\"))\n\n def package_id(self):\n self.info.header_only()\n \n def package_info(self):\n self.cpp_info.defines.append('STB_TEXTEDIT_KEYTYPE=unsigned')\n", "path": "recipes/stb/all/conanfile.py"}], "after_files": [{"content": "from conans import ConanFile, tools\nimport os\n\nrequired_conan_version = \">=1.33.0\"\n\n\nclass StbConan(ConanFile):\n name = \"stb\"\n description = \"single-file public domain libraries for C/C++\"\n topics = (\"stb\", \"single-file\")\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://github.com/nothings/stb\"\n license = (\"Unlicense\", \"MIT\")\n no_copy_source = True\n\n options = {\n \"with_deprecated\": [True, False]\n }\n\n default_options = {\n \"with_deprecated\": True\n }\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n @property\n def _version(self):\n # HACK: Used to circumvent the incompatibility\n # of the format cci.YYYYMMDD in tools.Version\n return str(self.version)[4:]\n\n def config_options(self):\n if tools.Version(self._version) < \"20210713\":\n del self.options.with_deprecated\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version], strip_root=True, destination=self._source_subfolder)\n\n def package(self):\n self.copy(\"LICENSE\", src=self._source_subfolder, dst=\"licenses\")\n self.copy(\"*.h\", src=self._source_subfolder, dst=\"include\")\n self.copy(\"stb_vorbis.c\", src=self._source_subfolder, dst=\"include\")\n tools.rmdir(os.path.join(self.package_folder, \"include\", \"tests\"))\n if tools.Version(self._version) >= \"20210713\":\n tools.rmdir(os.path.join(self.package_folder, \"include\", \"deprecated\"))\n if self.options.get_safe(\"with_deprecated\", False):\n self.copy(\"*.h\", src=os.path.join(self._source_subfolder, \"deprecated\"), dst=\"include\")\n self.copy(\"stb_image.c\", src=os.path.join(self._source_subfolder, \"deprecated\"), dst=\"include\")\n\n def package_id(self):\n self.info.header_only()\n \n def package_info(self):\n self.cpp_info.defines.append('STB_TEXTEDIT_KEYTYPE=unsigned')\n", "path": "recipes/stb/all/conanfile.py"}]}
| 712 | 653 |
gh_patches_debug_15650
|
rasdani/github-patches
|
git_diff
|
evennia__evennia-1733
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Disabling webclient does not actually disable it
#### Steps to reproduce the issue / Reasons for adding feature:
1. Set WEBCLIENT_ENABLED to False
2. Link disappears from index page
3. Go to http://example.com/webclient
4. Webclient loads.
#### Error output / Expected result of feature
Setting WEBCLIENT_ENABLED to False should disable it, not hide it.
#### Extra information, such as Evennia revision/repo/branch, operating system and ideas for how to solve / implement:
The status check needs to happen in urls.py, not just on the navbar template. If disabled, the url for the webclient should not be added to the list of urlpatterns.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `evennia/web/webclient/views.py`
Content:
```
1
2 """
3 This contains a simple view for rendering the webclient
4 page and serve it eventual static content.
5
6 """
7 from __future__ import print_function
8 from django.shortcuts import render
9 from django.contrib.auth import login, authenticate
10
11 from evennia.accounts.models import AccountDB
12 from evennia.utils import logger
13
14
15 def webclient(request):
16 """
17 Webclient page template loading.
18
19 """
20 # auto-login is now handled by evennia.web.utils.middleware
21
22 # make sure to store the browser session's hash so the webclient can get to it!
23 pagevars = {'browser_sessid': request.session.session_key}
24
25 return render(request, 'webclient.html', pagevars)
26
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/evennia/web/webclient/views.py b/evennia/web/webclient/views.py
--- a/evennia/web/webclient/views.py
+++ b/evennia/web/webclient/views.py
@@ -5,6 +5,8 @@
"""
from __future__ import print_function
+from django.conf import settings
+from django.http import Http404
from django.shortcuts import render
from django.contrib.auth import login, authenticate
@@ -19,6 +21,10 @@
"""
# auto-login is now handled by evennia.web.utils.middleware
+ # check if webclient should be enabled
+ if not settings.WEBCLIENT_ENABLED:
+ raise Http404
+
# make sure to store the browser session's hash so the webclient can get to it!
pagevars = {'browser_sessid': request.session.session_key}
|
{"golden_diff": "diff --git a/evennia/web/webclient/views.py b/evennia/web/webclient/views.py\n--- a/evennia/web/webclient/views.py\n+++ b/evennia/web/webclient/views.py\n@@ -5,6 +5,8 @@\n \n \"\"\"\n from __future__ import print_function\n+from django.conf import settings\n+from django.http import Http404\n from django.shortcuts import render\n from django.contrib.auth import login, authenticate\n \n@@ -19,6 +21,10 @@\n \"\"\"\n # auto-login is now handled by evennia.web.utils.middleware\n \n+ # check if webclient should be enabled\n+ if not settings.WEBCLIENT_ENABLED:\n+ raise Http404\n+ \n # make sure to store the browser session's hash so the webclient can get to it!\n pagevars = {'browser_sessid': request.session.session_key}\n", "issue": "Disabling webclient does not actually disable it\n#### Steps to reproduce the issue / Reasons for adding feature:\r\n\r\n1. Set WEBCLIENT_ENABLED to False\r\n2. Link disappears from index page\r\n3. Go to http://example.com/webclient\r\n4. Webclient loads.\r\n\r\n#### Error output / Expected result of feature\r\nSetting WEBCLIENT_ENABLED to False should disable it, not hide it.\r\n\r\n#### Extra information, such as Evennia revision/repo/branch, operating system and ideas for how to solve / implement:\r\nThe status check needs to happen in urls.py, not just on the navbar template. If disabled, the url for the webclient should not be added to the list of urlpatterns.\n", "before_files": [{"content": "\n\"\"\"\nThis contains a simple view for rendering the webclient\npage and serve it eventual static content.\n\n\"\"\"\nfrom __future__ import print_function\nfrom django.shortcuts import render\nfrom django.contrib.auth import login, authenticate\n\nfrom evennia.accounts.models import AccountDB\nfrom evennia.utils import logger\n\n\ndef webclient(request):\n \"\"\"\n Webclient page template loading.\n\n \"\"\"\n # auto-login is now handled by evennia.web.utils.middleware\n \n # make sure to store the browser session's hash so the webclient can get to it!\n pagevars = {'browser_sessid': request.session.session_key}\n\n return render(request, 'webclient.html', pagevars)\n", "path": "evennia/web/webclient/views.py"}], "after_files": [{"content": "\n\"\"\"\nThis contains a simple view for rendering the webclient\npage and serve it eventual static content.\n\n\"\"\"\nfrom __future__ import print_function\nfrom django.conf import settings\nfrom django.http import Http404\nfrom django.shortcuts import render\nfrom django.contrib.auth import login, authenticate\n\nfrom evennia.accounts.models import AccountDB\nfrom evennia.utils import logger\n\n\ndef webclient(request):\n \"\"\"\n Webclient page template loading.\n\n \"\"\"\n # auto-login is now handled by evennia.web.utils.middleware\n \n # check if webclient should be enabled\n if not settings.WEBCLIENT_ENABLED:\n raise Http404\n \n # make sure to store the browser session's hash so the webclient can get to it!\n pagevars = {'browser_sessid': request.session.session_key}\n\n return render(request, 'webclient.html', pagevars)\n", "path": "evennia/web/webclient/views.py"}]}
| 589 | 190 |
gh_patches_debug_43232
|
rasdani/github-patches
|
git_diff
|
chainer__chainer-2204
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove trigger option of snapshot and snapshot_object
They have the same functionality as the trigger argument of Trainer.extend and are redundant. I think they confuse users and they might misunderstand the trigger feature, and so they should be removed in the next major update.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `chainer/training/extensions/_snapshot.py`
Content:
```
1 import os
2 import shutil
3 import tempfile
4
5 from chainer.serializers import npz
6 from chainer.training import extension
7
8
9 def snapshot_object(target, filename, savefun=npz.save_npz,
10 trigger=(1, 'epoch')):
11 """Returns a trainer extension to take snapshots of a given object.
12
13 This extension serializes the given object and saves it to the output
14 directory.
15
16 This extension is called once for each epoch by default. The default
17 priority is -100, which is lower than that of most built-in extensions.
18
19 Args:
20 target: Object to serialize.
21 filename (str): Name of the file into which the object is serialized.
22 It can be a format string, where the trainer object is passed to
23 the :meth:`str.format` method. For example,
24 ``'snapshot_{.updater.iteration}'`` is converted to
25 ``'snapshot_10000'`` at the 10,000th iteration.
26 savefun: Function to save the object. It takes two arguments: the
27 output file path and the object to serialize.
28 trigger: Trigger that decides when to take snapshot. It can be either
29 an already built trigger object (i.e., a callable object that
30 accepts a trainer object and returns a bool value), or a tuple in
31 the form ``<int>, 'epoch'`` or ``<int>, 'iteration'``. In latter
32 case, the tuple is passed to IntervalTrigger.
33
34 Returns:
35 An extension function.
36
37 """
38 @extension.make_extension(trigger=trigger, priority=-100)
39 def snapshot_object(trainer):
40 _snapshot_object(trainer, target, filename.format(trainer), savefun)
41
42 return snapshot_object
43
44
45 def snapshot(savefun=npz.save_npz,
46 filename='snapshot_iter_{.updater.iteration}',
47 trigger=(1, 'epoch')):
48 """Returns a trainer extension to take snapshots of the trainer.
49
50 This extension serializes the trainer object and saves it to the output
51 directory. It is used to support resuming the training loop from the saved
52 state.
53
54 This extension is called once for each epoch by default. The default
55 priority is -100, which is lower than that of most built-in extensions.
56
57 .. note::
58 This extension first writes the serialized object to a temporary file
59 and then rename it to the target file name. Thus, if the program stops
60 right before the renaming, the temporary file might be left in the
61 output directory.
62
63 Args:
64 savefun: Function to save the trainer. It takes two arguments: the
65 output file path and the trainer object.
66 filename (str): Name of the file into which the trainer is serialized.
67 It can be a format string, where the trainer object is passed to
68 the :meth:`str.format` method.
69 trigger: Trigger that decides when to take snapshot. It can be either
70 an already built trigger object (i.e., a callable object that
71 accepts a trainer object and returns a bool value), or a tuple in
72 the form ``<int>, 'epoch'`` or ``<int>, 'iteration'``. In latter
73 case, the tuple is passed to IntervalTrigger.
74
75 """
76 @extension.make_extension(trigger=trigger, priority=-100)
77 def snapshot(trainer):
78 _snapshot_object(trainer, trainer, filename.format(trainer), savefun)
79
80 return snapshot
81
82
83 def _snapshot_object(trainer, target, filename, savefun):
84 fn = filename.format(trainer)
85 prefix = 'tmp' + fn
86 fd, tmppath = tempfile.mkstemp(prefix=prefix, dir=trainer.out)
87 try:
88 savefun(tmppath, target)
89 except Exception:
90 os.close(fd)
91 os.remove(tmppath)
92 raise
93 os.close(fd)
94 shutil.move(tmppath, os.path.join(trainer.out, fn))
95
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/chainer/training/extensions/_snapshot.py b/chainer/training/extensions/_snapshot.py
--- a/chainer/training/extensions/_snapshot.py
+++ b/chainer/training/extensions/_snapshot.py
@@ -6,15 +6,19 @@
from chainer.training import extension
-def snapshot_object(target, filename, savefun=npz.save_npz,
- trigger=(1, 'epoch')):
+def snapshot_object(target, filename, savefun=npz.save_npz):
"""Returns a trainer extension to take snapshots of a given object.
This extension serializes the given object and saves it to the output
directory.
- This extension is called once for each epoch by default. The default
- priority is -100, which is lower than that of most built-in extensions.
+ This extension is called once per epoch by default. To take a
+ snapshot at a different interval, a trigger object specifying the
+ required interval can be passed along with this extension
+ to the `extend()` method of the trainer.
+
+ The default priority is -100, which is lower than that of most
+ built-in extensions.
Args:
target: Object to serialize.
@@ -25,17 +29,12 @@
``'snapshot_10000'`` at the 10,000th iteration.
savefun: Function to save the object. It takes two arguments: the
output file path and the object to serialize.
- trigger: Trigger that decides when to take snapshot. It can be either
- an already built trigger object (i.e., a callable object that
- accepts a trainer object and returns a bool value), or a tuple in
- the form ``<int>, 'epoch'`` or ``<int>, 'iteration'``. In latter
- case, the tuple is passed to IntervalTrigger.
Returns:
An extension function.
"""
- @extension.make_extension(trigger=trigger, priority=-100)
+ @extension.make_extension(trigger=(1, 'epoch'), priority=-100)
def snapshot_object(trainer):
_snapshot_object(trainer, target, filename.format(trainer), savefun)
@@ -43,16 +42,20 @@
def snapshot(savefun=npz.save_npz,
- filename='snapshot_iter_{.updater.iteration}',
- trigger=(1, 'epoch')):
+ filename='snapshot_iter_{.updater.iteration}'):
"""Returns a trainer extension to take snapshots of the trainer.
This extension serializes the trainer object and saves it to the output
directory. It is used to support resuming the training loop from the saved
state.
- This extension is called once for each epoch by default. The default
- priority is -100, which is lower than that of most built-in extensions.
+ This extension is called once per epoch by default. To take a
+ snapshot at a different interval, a trigger object specifying the
+ required interval can be passed along with this extension
+ to the `extend()` method of the trainer.
+
+ The default priority is -100, which is lower than that of most
+ built-in extensions.
.. note::
This extension first writes the serialized object to a temporary file
@@ -66,14 +69,9 @@
filename (str): Name of the file into which the trainer is serialized.
It can be a format string, where the trainer object is passed to
the :meth:`str.format` method.
- trigger: Trigger that decides when to take snapshot. It can be either
- an already built trigger object (i.e., a callable object that
- accepts a trainer object and returns a bool value), or a tuple in
- the form ``<int>, 'epoch'`` or ``<int>, 'iteration'``. In latter
- case, the tuple is passed to IntervalTrigger.
"""
- @extension.make_extension(trigger=trigger, priority=-100)
+ @extension.make_extension(trigger=(1, 'epoch'), priority=-100)
def snapshot(trainer):
_snapshot_object(trainer, trainer, filename.format(trainer), savefun)
|
{"golden_diff": "diff --git a/chainer/training/extensions/_snapshot.py b/chainer/training/extensions/_snapshot.py\n--- a/chainer/training/extensions/_snapshot.py\n+++ b/chainer/training/extensions/_snapshot.py\n@@ -6,15 +6,19 @@\n from chainer.training import extension\n \n \n-def snapshot_object(target, filename, savefun=npz.save_npz,\n- trigger=(1, 'epoch')):\n+def snapshot_object(target, filename, savefun=npz.save_npz):\n \"\"\"Returns a trainer extension to take snapshots of a given object.\n \n This extension serializes the given object and saves it to the output\n directory.\n \n- This extension is called once for each epoch by default. The default\n- priority is -100, which is lower than that of most built-in extensions.\n+ This extension is called once per epoch by default. To take a\n+ snapshot at a different interval, a trigger object specifying the\n+ required interval can be passed along with this extension\n+ to the `extend()` method of the trainer.\n+\n+ The default priority is -100, which is lower than that of most\n+ built-in extensions.\n \n Args:\n target: Object to serialize.\n@@ -25,17 +29,12 @@\n ``'snapshot_10000'`` at the 10,000th iteration.\n savefun: Function to save the object. It takes two arguments: the\n output file path and the object to serialize.\n- trigger: Trigger that decides when to take snapshot. It can be either\n- an already built trigger object (i.e., a callable object that\n- accepts a trainer object and returns a bool value), or a tuple in\n- the form ``<int>, 'epoch'`` or ``<int>, 'iteration'``. In latter\n- case, the tuple is passed to IntervalTrigger.\n \n Returns:\n An extension function.\n \n \"\"\"\n- @extension.make_extension(trigger=trigger, priority=-100)\n+ @extension.make_extension(trigger=(1, 'epoch'), priority=-100)\n def snapshot_object(trainer):\n _snapshot_object(trainer, target, filename.format(trainer), savefun)\n \n@@ -43,16 +42,20 @@\n \n \n def snapshot(savefun=npz.save_npz,\n- filename='snapshot_iter_{.updater.iteration}',\n- trigger=(1, 'epoch')):\n+ filename='snapshot_iter_{.updater.iteration}'):\n \"\"\"Returns a trainer extension to take snapshots of the trainer.\n \n This extension serializes the trainer object and saves it to the output\n directory. It is used to support resuming the training loop from the saved\n state.\n \n- This extension is called once for each epoch by default. The default\n- priority is -100, which is lower than that of most built-in extensions.\n+ This extension is called once per epoch by default. To take a\n+ snapshot at a different interval, a trigger object specifying the\n+ required interval can be passed along with this extension\n+ to the `extend()` method of the trainer.\n+\n+ The default priority is -100, which is lower than that of most\n+ built-in extensions.\n \n .. note::\n This extension first writes the serialized object to a temporary file\n@@ -66,14 +69,9 @@\n filename (str): Name of the file into which the trainer is serialized.\n It can be a format string, where the trainer object is passed to\n the :meth:`str.format` method.\n- trigger: Trigger that decides when to take snapshot. It can be either\n- an already built trigger object (i.e., a callable object that\n- accepts a trainer object and returns a bool value), or a tuple in\n- the form ``<int>, 'epoch'`` or ``<int>, 'iteration'``. In latter\n- case, the tuple is passed to IntervalTrigger.\n \n \"\"\"\n- @extension.make_extension(trigger=trigger, priority=-100)\n+ @extension.make_extension(trigger=(1, 'epoch'), priority=-100)\n def snapshot(trainer):\n _snapshot_object(trainer, trainer, filename.format(trainer), savefun)\n", "issue": "Remove trigger option of snapshot and snapshot_object\nThey have the same functionality as the trigger argument of Trainer.extend and are redundant. I think they confuse users and they might misunderstand the trigger feature, and so they should be removed in the next major update.\n", "before_files": [{"content": "import os\nimport shutil\nimport tempfile\n\nfrom chainer.serializers import npz\nfrom chainer.training import extension\n\n\ndef snapshot_object(target, filename, savefun=npz.save_npz,\n trigger=(1, 'epoch')):\n \"\"\"Returns a trainer extension to take snapshots of a given object.\n\n This extension serializes the given object and saves it to the output\n directory.\n\n This extension is called once for each epoch by default. The default\n priority is -100, which is lower than that of most built-in extensions.\n\n Args:\n target: Object to serialize.\n filename (str): Name of the file into which the object is serialized.\n It can be a format string, where the trainer object is passed to\n the :meth:`str.format` method. For example,\n ``'snapshot_{.updater.iteration}'`` is converted to\n ``'snapshot_10000'`` at the 10,000th iteration.\n savefun: Function to save the object. It takes two arguments: the\n output file path and the object to serialize.\n trigger: Trigger that decides when to take snapshot. It can be either\n an already built trigger object (i.e., a callable object that\n accepts a trainer object and returns a bool value), or a tuple in\n the form ``<int>, 'epoch'`` or ``<int>, 'iteration'``. In latter\n case, the tuple is passed to IntervalTrigger.\n\n Returns:\n An extension function.\n\n \"\"\"\n @extension.make_extension(trigger=trigger, priority=-100)\n def snapshot_object(trainer):\n _snapshot_object(trainer, target, filename.format(trainer), savefun)\n\n return snapshot_object\n\n\ndef snapshot(savefun=npz.save_npz,\n filename='snapshot_iter_{.updater.iteration}',\n trigger=(1, 'epoch')):\n \"\"\"Returns a trainer extension to take snapshots of the trainer.\n\n This extension serializes the trainer object and saves it to the output\n directory. It is used to support resuming the training loop from the saved\n state.\n\n This extension is called once for each epoch by default. The default\n priority is -100, which is lower than that of most built-in extensions.\n\n .. note::\n This extension first writes the serialized object to a temporary file\n and then rename it to the target file name. Thus, if the program stops\n right before the renaming, the temporary file might be left in the\n output directory.\n\n Args:\n savefun: Function to save the trainer. It takes two arguments: the\n output file path and the trainer object.\n filename (str): Name of the file into which the trainer is serialized.\n It can be a format string, where the trainer object is passed to\n the :meth:`str.format` method.\n trigger: Trigger that decides when to take snapshot. It can be either\n an already built trigger object (i.e., a callable object that\n accepts a trainer object and returns a bool value), or a tuple in\n the form ``<int>, 'epoch'`` or ``<int>, 'iteration'``. In latter\n case, the tuple is passed to IntervalTrigger.\n\n \"\"\"\n @extension.make_extension(trigger=trigger, priority=-100)\n def snapshot(trainer):\n _snapshot_object(trainer, trainer, filename.format(trainer), savefun)\n\n return snapshot\n\n\ndef _snapshot_object(trainer, target, filename, savefun):\n fn = filename.format(trainer)\n prefix = 'tmp' + fn\n fd, tmppath = tempfile.mkstemp(prefix=prefix, dir=trainer.out)\n try:\n savefun(tmppath, target)\n except Exception:\n os.close(fd)\n os.remove(tmppath)\n raise\n os.close(fd)\n shutil.move(tmppath, os.path.join(trainer.out, fn))\n", "path": "chainer/training/extensions/_snapshot.py"}], "after_files": [{"content": "import os\nimport shutil\nimport tempfile\n\nfrom chainer.serializers import npz\nfrom chainer.training import extension\n\n\ndef snapshot_object(target, filename, savefun=npz.save_npz):\n \"\"\"Returns a trainer extension to take snapshots of a given object.\n\n This extension serializes the given object and saves it to the output\n directory.\n\n This extension is called once per epoch by default. To take a\n snapshot at a different interval, a trigger object specifying the\n required interval can be passed along with this extension\n to the `extend()` method of the trainer.\n\n The default priority is -100, which is lower than that of most\n built-in extensions.\n\n Args:\n target: Object to serialize.\n filename (str): Name of the file into which the object is serialized.\n It can be a format string, where the trainer object is passed to\n the :meth:`str.format` method. For example,\n ``'snapshot_{.updater.iteration}'`` is converted to\n ``'snapshot_10000'`` at the 10,000th iteration.\n savefun: Function to save the object. It takes two arguments: the\n output file path and the object to serialize.\n\n Returns:\n An extension function.\n\n \"\"\"\n @extension.make_extension(trigger=(1, 'epoch'), priority=-100)\n def snapshot_object(trainer):\n _snapshot_object(trainer, target, filename.format(trainer), savefun)\n\n return snapshot_object\n\n\ndef snapshot(savefun=npz.save_npz,\n filename='snapshot_iter_{.updater.iteration}'):\n \"\"\"Returns a trainer extension to take snapshots of the trainer.\n\n This extension serializes the trainer object and saves it to the output\n directory. It is used to support resuming the training loop from the saved\n state.\n\n This extension is called once per epoch by default. To take a\n snapshot at a different interval, a trigger object specifying the\n required interval can be passed along with this extension\n to the `extend()` method of the trainer.\n\n The default priority is -100, which is lower than that of most\n built-in extensions.\n\n .. note::\n This extension first writes the serialized object to a temporary file\n and then rename it to the target file name. Thus, if the program stops\n right before the renaming, the temporary file might be left in the\n output directory.\n\n Args:\n savefun: Function to save the trainer. It takes two arguments: the\n output file path and the trainer object.\n filename (str): Name of the file into which the trainer is serialized.\n It can be a format string, where the trainer object is passed to\n the :meth:`str.format` method.\n\n \"\"\"\n @extension.make_extension(trigger=(1, 'epoch'), priority=-100)\n def snapshot(trainer):\n _snapshot_object(trainer, trainer, filename.format(trainer), savefun)\n\n return snapshot\n\n\ndef _snapshot_object(trainer, target, filename, savefun):\n fn = filename.format(trainer)\n prefix = 'tmp' + fn\n fd, tmppath = tempfile.mkstemp(prefix=prefix, dir=trainer.out)\n try:\n savefun(tmppath, target)\n except Exception:\n os.close(fd)\n os.remove(tmppath)\n raise\n os.close(fd)\n shutil.move(tmppath, os.path.join(trainer.out, fn))\n", "path": "chainer/training/extensions/_snapshot.py"}]}
| 1,350 | 937 |
gh_patches_debug_27650
|
rasdani/github-patches
|
git_diff
|
biolab__orange3-4217
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
2 x Transpose + Preprocess loses information
**Describe the bug**
Second transpose cannot retrieve the domain after Preprocess.
**To Reproduce**
Steps to reproduce the behavior:
1. File (brown-selected).
2. Transpose.
3. Preprocesss (say Normalize).
4. Transpose.
**Orange version:**
3.24.dev
**Expected behavior**
Second Transpose puts columns names into a string variable.
**Screenshots**
<img width="1232" alt="Screen Shot 2019-11-14 at 09 33 02" src="https://user-images.githubusercontent.com/12524972/68839832-c910d600-06c1-11ea-9286-5bf033a9802f.png">
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `Orange/preprocess/normalize.py`
Content:
```
1 import numpy as np
2
3 from Orange.data import ContinuousVariable, Domain
4 from Orange.statistics import distribution
5 from Orange.util import Reprable
6 from .preprocess import Normalize
7 from .transformation import Normalizer as Norm
8 __all__ = ["Normalizer"]
9
10
11 class Normalizer(Reprable):
12 def __init__(self,
13 zero_based=True,
14 norm_type=Normalize.NormalizeBySD,
15 transform_class=False,
16 center=True,
17 normalize_datetime=False):
18 self.zero_based = zero_based
19 self.norm_type = norm_type
20 self.transform_class = transform_class
21 self.center = center
22 self.normalize_datetime = normalize_datetime
23
24 def __call__(self, data):
25 dists = distribution.get_distributions(data)
26 new_attrs = [self.normalize(dists[i], var) for
27 (i, var) in enumerate(data.domain.attributes)]
28
29 new_class_vars = data.domain.class_vars
30 if self.transform_class:
31 attr_len = len(data.domain.attributes)
32 new_class_vars = [self.normalize(dists[i + attr_len], var) for
33 (i, var) in enumerate(data.domain.class_vars)]
34
35 domain = Domain(new_attrs, new_class_vars, data.domain.metas)
36 return data.transform(domain)
37
38 def normalize(self, dist, var):
39 if not var.is_continuous or (var.is_time and not self.normalize_datetime):
40 return var
41 elif self.norm_type == Normalize.NormalizeBySD:
42 return self.normalize_by_sd(dist, var)
43 elif self.norm_type == Normalize.NormalizeBySpan:
44 return self.normalize_by_span(dist, var)
45
46 def normalize_by_sd(self, dist, var):
47 avg, sd = (dist.mean(), dist.standard_deviation()) if dist.size else (0, 1)
48 if sd == 0:
49 sd = 1
50 if self.center:
51 compute_val = Norm(var, avg, 1 / sd)
52 else:
53 compute_val = Norm(var, 0, 1 / sd)
54
55 return ContinuousVariable(
56 var.name,
57 compute_value=compute_val,
58 sparse=var.sparse,
59 )
60
61 def normalize_by_span(self, dist, var):
62 dma, dmi = (dist.max(), dist.min()) if dist.shape[1] else (np.nan, np.nan)
63 diff = dma - dmi
64 if diff < 1e-15:
65 diff = 1
66 if self.zero_based:
67 return ContinuousVariable(
68 var.name,
69 compute_value=Norm(var, dmi, 1 / diff),
70 sparse=var.sparse)
71 else:
72 return ContinuousVariable(
73 var.name,
74 compute_value=Norm(var, (dma + dmi) / 2, 2 / diff),
75 sparse=var.sparse)
76
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/Orange/preprocess/normalize.py b/Orange/preprocess/normalize.py
--- a/Orange/preprocess/normalize.py
+++ b/Orange/preprocess/normalize.py
@@ -1,6 +1,6 @@
import numpy as np
-from Orange.data import ContinuousVariable, Domain
+from Orange.data import Domain
from Orange.statistics import distribution
from Orange.util import Reprable
from .preprocess import Normalize
@@ -51,12 +51,7 @@
compute_val = Norm(var, avg, 1 / sd)
else:
compute_val = Norm(var, 0, 1 / sd)
-
- return ContinuousVariable(
- var.name,
- compute_value=compute_val,
- sparse=var.sparse,
- )
+ return var.copy(compute_value=compute_val)
def normalize_by_span(self, dist, var):
dma, dmi = (dist.max(), dist.min()) if dist.shape[1] else (np.nan, np.nan)
@@ -64,12 +59,7 @@
if diff < 1e-15:
diff = 1
if self.zero_based:
- return ContinuousVariable(
- var.name,
- compute_value=Norm(var, dmi, 1 / diff),
- sparse=var.sparse)
+ compute_val = Norm(var, dmi, 1 / diff)
else:
- return ContinuousVariable(
- var.name,
- compute_value=Norm(var, (dma + dmi) / 2, 2 / diff),
- sparse=var.sparse)
+ compute_val = Norm(var, (dma + dmi) / 2, 2 / diff)
+ return var.copy(compute_value=compute_val)
|
{"golden_diff": "diff --git a/Orange/preprocess/normalize.py b/Orange/preprocess/normalize.py\n--- a/Orange/preprocess/normalize.py\n+++ b/Orange/preprocess/normalize.py\n@@ -1,6 +1,6 @@\n import numpy as np\n \n-from Orange.data import ContinuousVariable, Domain\n+from Orange.data import Domain\n from Orange.statistics import distribution\n from Orange.util import Reprable\n from .preprocess import Normalize\n@@ -51,12 +51,7 @@\n compute_val = Norm(var, avg, 1 / sd)\n else:\n compute_val = Norm(var, 0, 1 / sd)\n-\n- return ContinuousVariable(\n- var.name,\n- compute_value=compute_val,\n- sparse=var.sparse,\n- )\n+ return var.copy(compute_value=compute_val)\n \n def normalize_by_span(self, dist, var):\n dma, dmi = (dist.max(), dist.min()) if dist.shape[1] else (np.nan, np.nan)\n@@ -64,12 +59,7 @@\n if diff < 1e-15:\n diff = 1\n if self.zero_based:\n- return ContinuousVariable(\n- var.name,\n- compute_value=Norm(var, dmi, 1 / diff),\n- sparse=var.sparse)\n+ compute_val = Norm(var, dmi, 1 / diff)\n else:\n- return ContinuousVariable(\n- var.name,\n- compute_value=Norm(var, (dma + dmi) / 2, 2 / diff),\n- sparse=var.sparse)\n+ compute_val = Norm(var, (dma + dmi) / 2, 2 / diff)\n+ return var.copy(compute_value=compute_val)\n", "issue": "2 x Transpose + Preprocess loses information\n**Describe the bug**\r\nSecond transpose cannot retrieve the domain after Preprocess.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. File (brown-selected).\r\n2. Transpose.\r\n3. Preprocesss (say Normalize).\r\n4. Transpose.\r\n\r\n**Orange version:**\r\n3.24.dev\r\n\r\n**Expected behavior**\r\nSecond Transpose puts columns names into a string variable.\r\n\r\n**Screenshots**\r\n<img width=\"1232\" alt=\"Screen Shot 2019-11-14 at 09 33 02\" src=\"https://user-images.githubusercontent.com/12524972/68839832-c910d600-06c1-11ea-9286-5bf033a9802f.png\">\r\n\r\n\n", "before_files": [{"content": "import numpy as np\n\nfrom Orange.data import ContinuousVariable, Domain\nfrom Orange.statistics import distribution\nfrom Orange.util import Reprable\nfrom .preprocess import Normalize\nfrom .transformation import Normalizer as Norm\n__all__ = [\"Normalizer\"]\n\n\nclass Normalizer(Reprable):\n def __init__(self,\n zero_based=True,\n norm_type=Normalize.NormalizeBySD,\n transform_class=False,\n center=True,\n normalize_datetime=False):\n self.zero_based = zero_based\n self.norm_type = norm_type\n self.transform_class = transform_class\n self.center = center\n self.normalize_datetime = normalize_datetime\n\n def __call__(self, data):\n dists = distribution.get_distributions(data)\n new_attrs = [self.normalize(dists[i], var) for\n (i, var) in enumerate(data.domain.attributes)]\n\n new_class_vars = data.domain.class_vars\n if self.transform_class:\n attr_len = len(data.domain.attributes)\n new_class_vars = [self.normalize(dists[i + attr_len], var) for\n (i, var) in enumerate(data.domain.class_vars)]\n\n domain = Domain(new_attrs, new_class_vars, data.domain.metas)\n return data.transform(domain)\n\n def normalize(self, dist, var):\n if not var.is_continuous or (var.is_time and not self.normalize_datetime):\n return var\n elif self.norm_type == Normalize.NormalizeBySD:\n return self.normalize_by_sd(dist, var)\n elif self.norm_type == Normalize.NormalizeBySpan:\n return self.normalize_by_span(dist, var)\n\n def normalize_by_sd(self, dist, var):\n avg, sd = (dist.mean(), dist.standard_deviation()) if dist.size else (0, 1)\n if sd == 0:\n sd = 1\n if self.center:\n compute_val = Norm(var, avg, 1 / sd)\n else:\n compute_val = Norm(var, 0, 1 / sd)\n\n return ContinuousVariable(\n var.name,\n compute_value=compute_val,\n sparse=var.sparse,\n )\n\n def normalize_by_span(self, dist, var):\n dma, dmi = (dist.max(), dist.min()) if dist.shape[1] else (np.nan, np.nan)\n diff = dma - dmi\n if diff < 1e-15:\n diff = 1\n if self.zero_based:\n return ContinuousVariable(\n var.name,\n compute_value=Norm(var, dmi, 1 / diff),\n sparse=var.sparse)\n else:\n return ContinuousVariable(\n var.name,\n compute_value=Norm(var, (dma + dmi) / 2, 2 / diff),\n sparse=var.sparse)\n", "path": "Orange/preprocess/normalize.py"}], "after_files": [{"content": "import numpy as np\n\nfrom Orange.data import Domain\nfrom Orange.statistics import distribution\nfrom Orange.util import Reprable\nfrom .preprocess import Normalize\nfrom .transformation import Normalizer as Norm\n__all__ = [\"Normalizer\"]\n\n\nclass Normalizer(Reprable):\n def __init__(self,\n zero_based=True,\n norm_type=Normalize.NormalizeBySD,\n transform_class=False,\n center=True,\n normalize_datetime=False):\n self.zero_based = zero_based\n self.norm_type = norm_type\n self.transform_class = transform_class\n self.center = center\n self.normalize_datetime = normalize_datetime\n\n def __call__(self, data):\n dists = distribution.get_distributions(data)\n new_attrs = [self.normalize(dists[i], var) for\n (i, var) in enumerate(data.domain.attributes)]\n\n new_class_vars = data.domain.class_vars\n if self.transform_class:\n attr_len = len(data.domain.attributes)\n new_class_vars = [self.normalize(dists[i + attr_len], var) for\n (i, var) in enumerate(data.domain.class_vars)]\n\n domain = Domain(new_attrs, new_class_vars, data.domain.metas)\n return data.transform(domain)\n\n def normalize(self, dist, var):\n if not var.is_continuous or (var.is_time and not self.normalize_datetime):\n return var\n elif self.norm_type == Normalize.NormalizeBySD:\n return self.normalize_by_sd(dist, var)\n elif self.norm_type == Normalize.NormalizeBySpan:\n return self.normalize_by_span(dist, var)\n\n def normalize_by_sd(self, dist, var):\n avg, sd = (dist.mean(), dist.standard_deviation()) if dist.size else (0, 1)\n if sd == 0:\n sd = 1\n if self.center:\n compute_val = Norm(var, avg, 1 / sd)\n else:\n compute_val = Norm(var, 0, 1 / sd)\n return var.copy(compute_value=compute_val)\n\n def normalize_by_span(self, dist, var):\n dma, dmi = (dist.max(), dist.min()) if dist.shape[1] else (np.nan, np.nan)\n diff = dma - dmi\n if diff < 1e-15:\n diff = 1\n if self.zero_based:\n compute_val = Norm(var, dmi, 1 / diff)\n else:\n compute_val = Norm(var, (dma + dmi) / 2, 2 / diff)\n return var.copy(compute_value=compute_val)\n", "path": "Orange/preprocess/normalize.py"}]}
| 1,182 | 383 |
gh_patches_debug_20707
|
rasdani/github-patches
|
git_diff
|
certbot__certbot-5670
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
KeyError when using --manual-cleanup-hook with wildcard certificate
## My operating system is (include version):
Ubuntu 16.04
## I installed Certbot with (certbot-auto, OS package manager, pip, etc):
Git - commit `31805c5`
## I ran this command and it produced this output:
```bash
$ certbot --noninteractive --manual --manual-public-ip-logging-ok --preferred-challenges dns --manual-auth-hook "/srv/letsencrypt-lexicon.sh auth" --manual-cleanup-hook "/srv/letsencrypt-lexicon.sh cleanup" --server https://acme-staging-v02.api.letsencrypt.org/directory --cert-name xenial.alberon.co.uk -d xenial.alberon.co.uk,*.xenial.alberon.co.uk -m [email protected] --agree-tos
...
Traceback (most recent call last):
File "/srv/certbot/venv/bin/certbot", line 11, in <module>
load_entry_point('certbot', 'console_scripts', 'certbot')()
File "/srv/certbot/certbot/main.py", line 1266, in main
return config.func(config, plugins)
File "/srv/certbot/certbot/main.py", line 1157, in certonly
lineage = _get_and_save_cert(le_client, config, domains, certname, lineage)
File "/srv/certbot/certbot/main.py", line 118, in _get_and_save_cert
lineage = le_client.obtain_and_enroll_certificate(domains, certname)
File "/srv/certbot/certbot/client.py", line 349, in obtain_and_enroll_certificate
cert, chain, key, _ = self.obtain_certificate(domains)
File "/srv/certbot/certbot/client.py", line 293, in obtain_certificate
orderr = self._get_order_and_authorizations(csr.data, self.config.allow_subset_of_names)
File "/srv/certbot/certbot/client.py", line 329, in _get_order_and_authorizations
authzr = self.auth_handler.handle_authorizations(orderr, best_effort)
File "/srv/certbot/certbot/auth_handler.py", line 82, in handle_authorizations
self._respond(resp, best_effort)
File "/srv/certbot/certbot/auth_handler.py", line 159, in _respond
self._cleanup_challenges(active_achalls)
File "/srv/certbot/certbot/auth_handler.py", line 304, in _cleanup_challenges
self.auth.cleanup(achalls)
File "/srv/certbot/certbot/plugins/manual.py", line 218, in cleanup
env = self.env.pop(achall.domain)
KeyError: u'xenial.alberon.co.uk'
```
## Certbot's behavior differed from what I expected because:
It crashed.
This only seems to happen when using `--manual-cleanup-hook` *and* including both `xenial.alberon.co.uk` and `*.xenial.alberon.co.uk`.
I think `self.env.pop(achall.domain)` gets called twice, both times with `achall.domain = 'xenial.alberon.co.uk'`, and the second time it's already been removed. I would guess the key needs to be changed to something else now that one domain may require two separate challenges.
## Here is a Certbot log showing the issue (if available):
https://gist.github.com/davejamesmiller/2a8c2f7a03cac9edd573c767a8fbedec
## Here is the relevant nginx server block or Apache virtualhost for the domain I am configuring:
N/A
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `certbot/plugins/manual.py`
Content:
```
1 """Manual authenticator plugin"""
2 import os
3
4 import zope.component
5 import zope.interface
6
7 from acme import challenges
8
9 from certbot import interfaces
10 from certbot import errors
11 from certbot import hooks
12 from certbot import reverter
13 from certbot.plugins import common
14
15
16 class ManualTlsSni01(common.TLSSNI01):
17 """TLS-SNI-01 authenticator for the Manual plugin
18
19 :ivar configurator: Authenticator object
20 :type configurator: :class:`~certbot.plugins.manual.Authenticator`
21
22 :ivar list achalls: Annotated
23 class:`~certbot.achallenges.KeyAuthorizationAnnotatedChallenge`
24 challenges
25
26 :param list indices: Meant to hold indices of challenges in a
27 larger array. NginxTlsSni01 is capable of solving many challenges
28 at once which causes an indexing issue within NginxConfigurator
29 who must return all responses in order. Imagine NginxConfigurator
30 maintaining state about where all of the http-01 Challenges,
31 TLS-SNI-01 Challenges belong in the response array. This is an
32 optional utility.
33
34 :param str challenge_conf: location of the challenge config file
35 """
36
37 def perform(self):
38 """Create the SSL certificates and private keys"""
39
40 for achall in self.achalls:
41 self._setup_challenge_cert(achall)
42
43
44 @zope.interface.implementer(interfaces.IAuthenticator)
45 @zope.interface.provider(interfaces.IPluginFactory)
46 class Authenticator(common.Plugin):
47 """Manual authenticator
48
49 This plugin allows the user to perform the domain validation
50 challenge(s) themselves. This either be done manually by the user or
51 through shell scripts provided to Certbot.
52
53 """
54
55 description = 'Manual configuration or run your own shell scripts'
56 hidden = True
57 long_description = (
58 'Authenticate through manual configuration or custom shell scripts. '
59 'When using shell scripts, an authenticator script must be provided. '
60 'The environment variables available to this script depend on the '
61 'type of challenge. $CERTBOT_DOMAIN will always contain the domain '
62 'being authenticated. For HTTP-01 and DNS-01, $CERTBOT_VALIDATION '
63 'is the validation string, and $CERTBOT_TOKEN is the filename of the '
64 'resource requested when performing an HTTP-01 challenge. When '
65 'performing a TLS-SNI-01 challenge, $CERTBOT_SNI_DOMAIN will contain '
66 'the SNI name for which the ACME server expects to be presented with '
67 'the self-signed certificate located at $CERTBOT_CERT_PATH. The '
68 'secret key needed to complete the TLS handshake is located at '
69 '$CERTBOT_KEY_PATH. An additional cleanup script can also be '
70 'provided and can use the additional variable $CERTBOT_AUTH_OUTPUT '
71 'which contains the stdout output from the auth script.')
72 _DNS_INSTRUCTIONS = """\
73 Please deploy a DNS TXT record under the name
74 {domain} with the following value:
75
76 {validation}
77
78 Before continuing, verify the record is deployed."""
79 _HTTP_INSTRUCTIONS = """\
80 Create a file containing just this data:
81
82 {validation}
83
84 And make it available on your web server at this URL:
85
86 {uri}
87 """
88 _TLSSNI_INSTRUCTIONS = """\
89 Configure the service listening on port {port} to present the certificate
90 {cert}
91 using the secret key
92 {key}
93 when it receives a TLS ClientHello with the SNI extension set to
94 {sni_domain}
95 """
96
97 def __init__(self, *args, **kwargs):
98 super(Authenticator, self).__init__(*args, **kwargs)
99 self.reverter = reverter.Reverter(self.config)
100 self.reverter.recovery_routine()
101 self.env = dict()
102 self.tls_sni_01 = None
103
104 @classmethod
105 def add_parser_arguments(cls, add):
106 add('auth-hook',
107 help='Path or command to execute for the authentication script')
108 add('cleanup-hook',
109 help='Path or command to execute for the cleanup script')
110 add('public-ip-logging-ok', action='store_true',
111 help='Automatically allows public IP logging (default: Ask)')
112
113 def prepare(self): # pylint: disable=missing-docstring
114 if self.config.noninteractive_mode and not self.conf('auth-hook'):
115 raise errors.PluginError(
116 'An authentication script must be provided with --{0} when '
117 'using the manual plugin non-interactively.'.format(
118 self.option_name('auth-hook')))
119 self._validate_hooks()
120
121 def _validate_hooks(self):
122 if self.config.validate_hooks:
123 for name in ('auth-hook', 'cleanup-hook'):
124 hook = self.conf(name)
125 if hook is not None:
126 hook_prefix = self.option_name(name)[:-len('-hook')]
127 hooks.validate_hook(hook, hook_prefix)
128
129 def more_info(self): # pylint: disable=missing-docstring,no-self-use
130 return (
131 'This plugin allows the user to customize setup for domain '
132 'validation challenges either through shell scripts provided by '
133 'the user or by performing the setup manually.')
134
135 def get_chall_pref(self, domain):
136 # pylint: disable=missing-docstring,no-self-use,unused-argument
137 return [challenges.HTTP01, challenges.DNS01, challenges.TLSSNI01]
138
139 def perform(self, achalls): # pylint: disable=missing-docstring
140 self._verify_ip_logging_ok()
141 if self.conf('auth-hook'):
142 perform_achall = self._perform_achall_with_script
143 else:
144 perform_achall = self._perform_achall_manually
145
146 responses = []
147 for achall in achalls:
148 if isinstance(achall.chall, challenges.TLSSNI01):
149 # Make a new ManualTlsSni01 instance for each challenge
150 # because the manual plugin deals with one challenge at a time.
151 self.tls_sni_01 = ManualTlsSni01(self)
152 self.tls_sni_01.add_chall(achall)
153 self.tls_sni_01.perform()
154 perform_achall(achall)
155 responses.append(achall.response(achall.account_key))
156 return responses
157
158 def _verify_ip_logging_ok(self):
159 if not self.conf('public-ip-logging-ok'):
160 cli_flag = '--{0}'.format(self.option_name('public-ip-logging-ok'))
161 msg = ('NOTE: The IP of this machine will be publicly logged as '
162 "having requested this certificate. If you're running "
163 'certbot in manual mode on a machine that is not your '
164 "server, please ensure you're okay with that.\n\n"
165 'Are you OK with your IP being logged?')
166 display = zope.component.getUtility(interfaces.IDisplay)
167 if display.yesno(msg, cli_flag=cli_flag, force_interactive=True):
168 setattr(self.config, self.dest('public-ip-logging-ok'), True)
169 else:
170 raise errors.PluginError('Must agree to IP logging to proceed')
171
172 def _perform_achall_with_script(self, achall):
173 env = dict(CERTBOT_DOMAIN=achall.domain,
174 CERTBOT_VALIDATION=achall.validation(achall.account_key))
175 if isinstance(achall.chall, challenges.HTTP01):
176 env['CERTBOT_TOKEN'] = achall.chall.encode('token')
177 else:
178 os.environ.pop('CERTBOT_TOKEN', None)
179 if isinstance(achall.chall, challenges.TLSSNI01):
180 env['CERTBOT_CERT_PATH'] = self.tls_sni_01.get_cert_path(achall)
181 env['CERTBOT_KEY_PATH'] = self.tls_sni_01.get_key_path(achall)
182 env['CERTBOT_SNI_DOMAIN'] = self.tls_sni_01.get_z_domain(achall)
183 os.environ.pop('CERTBOT_VALIDATION', None)
184 env.pop('CERTBOT_VALIDATION')
185 else:
186 os.environ.pop('CERTBOT_CERT_PATH', None)
187 os.environ.pop('CERTBOT_KEY_PATH', None)
188 os.environ.pop('CERTBOT_SNI_DOMAIN', None)
189 os.environ.update(env)
190 _, out = hooks.execute(self.conf('auth-hook'))
191 env['CERTBOT_AUTH_OUTPUT'] = out.strip()
192 self.env[achall.domain] = env
193
194 def _perform_achall_manually(self, achall):
195 validation = achall.validation(achall.account_key)
196 if isinstance(achall.chall, challenges.HTTP01):
197 msg = self._HTTP_INSTRUCTIONS.format(
198 achall=achall, encoded_token=achall.chall.encode('token'),
199 port=self.config.http01_port,
200 uri=achall.chall.uri(achall.domain), validation=validation)
201 elif isinstance(achall.chall, challenges.DNS01):
202 msg = self._DNS_INSTRUCTIONS.format(
203 domain=achall.validation_domain_name(achall.domain),
204 validation=validation)
205 else:
206 assert isinstance(achall.chall, challenges.TLSSNI01)
207 msg = self._TLSSNI_INSTRUCTIONS.format(
208 cert=self.tls_sni_01.get_cert_path(achall),
209 key=self.tls_sni_01.get_key_path(achall),
210 port=self.config.tls_sni_01_port,
211 sni_domain=self.tls_sni_01.get_z_domain(achall))
212 display = zope.component.getUtility(interfaces.IDisplay)
213 display.notification(msg, wrap=False, force_interactive=True)
214
215 def cleanup(self, achalls): # pylint: disable=missing-docstring
216 if self.conf('cleanup-hook'):
217 for achall in achalls:
218 env = self.env.pop(achall.domain)
219 if 'CERTBOT_TOKEN' not in env:
220 os.environ.pop('CERTBOT_TOKEN', None)
221 os.environ.update(env)
222 hooks.execute(self.conf('cleanup-hook'))
223 self.reverter.recovery_routine()
224
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/certbot/plugins/manual.py b/certbot/plugins/manual.py
--- a/certbot/plugins/manual.py
+++ b/certbot/plugins/manual.py
@@ -189,7 +189,7 @@
os.environ.update(env)
_, out = hooks.execute(self.conf('auth-hook'))
env['CERTBOT_AUTH_OUTPUT'] = out.strip()
- self.env[achall.domain] = env
+ self.env[achall] = env
def _perform_achall_manually(self, achall):
validation = achall.validation(achall.account_key)
@@ -215,7 +215,7 @@
def cleanup(self, achalls): # pylint: disable=missing-docstring
if self.conf('cleanup-hook'):
for achall in achalls:
- env = self.env.pop(achall.domain)
+ env = self.env.pop(achall)
if 'CERTBOT_TOKEN' not in env:
os.environ.pop('CERTBOT_TOKEN', None)
os.environ.update(env)
|
{"golden_diff": "diff --git a/certbot/plugins/manual.py b/certbot/plugins/manual.py\n--- a/certbot/plugins/manual.py\n+++ b/certbot/plugins/manual.py\n@@ -189,7 +189,7 @@\n os.environ.update(env)\n _, out = hooks.execute(self.conf('auth-hook'))\n env['CERTBOT_AUTH_OUTPUT'] = out.strip()\n- self.env[achall.domain] = env\n+ self.env[achall] = env\n \n def _perform_achall_manually(self, achall):\n validation = achall.validation(achall.account_key)\n@@ -215,7 +215,7 @@\n def cleanup(self, achalls): # pylint: disable=missing-docstring\n if self.conf('cleanup-hook'):\n for achall in achalls:\n- env = self.env.pop(achall.domain)\n+ env = self.env.pop(achall)\n if 'CERTBOT_TOKEN' not in env:\n os.environ.pop('CERTBOT_TOKEN', None)\n os.environ.update(env)\n", "issue": "KeyError when using --manual-cleanup-hook with wildcard certificate\n## My operating system is (include version):\r\nUbuntu 16.04\r\n\r\n## I installed Certbot with (certbot-auto, OS package manager, pip, etc):\r\nGit - commit `31805c5`\r\n\r\n## I ran this command and it produced this output:\r\n```bash\r\n$ certbot --noninteractive --manual --manual-public-ip-logging-ok --preferred-challenges dns --manual-auth-hook \"/srv/letsencrypt-lexicon.sh auth\" --manual-cleanup-hook \"/srv/letsencrypt-lexicon.sh cleanup\" --server https://acme-staging-v02.api.letsencrypt.org/directory --cert-name xenial.alberon.co.uk -d xenial.alberon.co.uk,*.xenial.alberon.co.uk -m [email protected] --agree-tos\r\n...\r\nTraceback (most recent call last):\r\n File \"/srv/certbot/venv/bin/certbot\", line 11, in <module>\r\n load_entry_point('certbot', 'console_scripts', 'certbot')()\r\n File \"/srv/certbot/certbot/main.py\", line 1266, in main\r\n return config.func(config, plugins)\r\n File \"/srv/certbot/certbot/main.py\", line 1157, in certonly\r\n lineage = _get_and_save_cert(le_client, config, domains, certname, lineage)\r\n File \"/srv/certbot/certbot/main.py\", line 118, in _get_and_save_cert\r\n lineage = le_client.obtain_and_enroll_certificate(domains, certname)\r\n File \"/srv/certbot/certbot/client.py\", line 349, in obtain_and_enroll_certificate\r\n cert, chain, key, _ = self.obtain_certificate(domains)\r\n File \"/srv/certbot/certbot/client.py\", line 293, in obtain_certificate\r\n orderr = self._get_order_and_authorizations(csr.data, self.config.allow_subset_of_names)\r\n File \"/srv/certbot/certbot/client.py\", line 329, in _get_order_and_authorizations\r\n authzr = self.auth_handler.handle_authorizations(orderr, best_effort)\r\n File \"/srv/certbot/certbot/auth_handler.py\", line 82, in handle_authorizations\r\n self._respond(resp, best_effort)\r\n File \"/srv/certbot/certbot/auth_handler.py\", line 159, in _respond\r\n self._cleanup_challenges(active_achalls)\r\n File \"/srv/certbot/certbot/auth_handler.py\", line 304, in _cleanup_challenges\r\n self.auth.cleanup(achalls)\r\n File \"/srv/certbot/certbot/plugins/manual.py\", line 218, in cleanup\r\n env = self.env.pop(achall.domain)\r\nKeyError: u'xenial.alberon.co.uk'\r\n```\r\n\r\n## Certbot's behavior differed from what I expected because:\r\nIt crashed.\r\n\r\nThis only seems to happen when using `--manual-cleanup-hook` *and* including both `xenial.alberon.co.uk` and `*.xenial.alberon.co.uk`.\r\n\r\nI think `self.env.pop(achall.domain)` gets called twice, both times with `achall.domain = 'xenial.alberon.co.uk'`, and the second time it's already been removed. I would guess the key needs to be changed to something else now that one domain may require two separate challenges.\r\n\r\n## Here is a Certbot log showing the issue (if available):\r\nhttps://gist.github.com/davejamesmiller/2a8c2f7a03cac9edd573c767a8fbedec\r\n\r\n## Here is the relevant nginx server block or Apache virtualhost for the domain I am configuring:\r\nN/A\n", "before_files": [{"content": "\"\"\"Manual authenticator plugin\"\"\"\nimport os\n\nimport zope.component\nimport zope.interface\n\nfrom acme import challenges\n\nfrom certbot import interfaces\nfrom certbot import errors\nfrom certbot import hooks\nfrom certbot import reverter\nfrom certbot.plugins import common\n\n\nclass ManualTlsSni01(common.TLSSNI01):\n \"\"\"TLS-SNI-01 authenticator for the Manual plugin\n\n :ivar configurator: Authenticator object\n :type configurator: :class:`~certbot.plugins.manual.Authenticator`\n\n :ivar list achalls: Annotated\n class:`~certbot.achallenges.KeyAuthorizationAnnotatedChallenge`\n challenges\n\n :param list indices: Meant to hold indices of challenges in a\n larger array. NginxTlsSni01 is capable of solving many challenges\n at once which causes an indexing issue within NginxConfigurator\n who must return all responses in order. Imagine NginxConfigurator\n maintaining state about where all of the http-01 Challenges,\n TLS-SNI-01 Challenges belong in the response array. This is an\n optional utility.\n\n :param str challenge_conf: location of the challenge config file\n \"\"\"\n\n def perform(self):\n \"\"\"Create the SSL certificates and private keys\"\"\"\n\n for achall in self.achalls:\n self._setup_challenge_cert(achall)\n\n\[email protected](interfaces.IAuthenticator)\[email protected](interfaces.IPluginFactory)\nclass Authenticator(common.Plugin):\n \"\"\"Manual authenticator\n\n This plugin allows the user to perform the domain validation\n challenge(s) themselves. This either be done manually by the user or\n through shell scripts provided to Certbot.\n\n \"\"\"\n\n description = 'Manual configuration or run your own shell scripts'\n hidden = True\n long_description = (\n 'Authenticate through manual configuration or custom shell scripts. '\n 'When using shell scripts, an authenticator script must be provided. '\n 'The environment variables available to this script depend on the '\n 'type of challenge. $CERTBOT_DOMAIN will always contain the domain '\n 'being authenticated. For HTTP-01 and DNS-01, $CERTBOT_VALIDATION '\n 'is the validation string, and $CERTBOT_TOKEN is the filename of the '\n 'resource requested when performing an HTTP-01 challenge. When '\n 'performing a TLS-SNI-01 challenge, $CERTBOT_SNI_DOMAIN will contain '\n 'the SNI name for which the ACME server expects to be presented with '\n 'the self-signed certificate located at $CERTBOT_CERT_PATH. The '\n 'secret key needed to complete the TLS handshake is located at '\n '$CERTBOT_KEY_PATH. An additional cleanup script can also be '\n 'provided and can use the additional variable $CERTBOT_AUTH_OUTPUT '\n 'which contains the stdout output from the auth script.')\n _DNS_INSTRUCTIONS = \"\"\"\\\nPlease deploy a DNS TXT record under the name\n{domain} with the following value:\n\n{validation}\n\nBefore continuing, verify the record is deployed.\"\"\"\n _HTTP_INSTRUCTIONS = \"\"\"\\\nCreate a file containing just this data:\n\n{validation}\n\nAnd make it available on your web server at this URL:\n\n{uri}\n\"\"\"\n _TLSSNI_INSTRUCTIONS = \"\"\"\\\nConfigure the service listening on port {port} to present the certificate\n{cert}\nusing the secret key\n{key}\nwhen it receives a TLS ClientHello with the SNI extension set to\n{sni_domain}\n\"\"\"\n\n def __init__(self, *args, **kwargs):\n super(Authenticator, self).__init__(*args, **kwargs)\n self.reverter = reverter.Reverter(self.config)\n self.reverter.recovery_routine()\n self.env = dict()\n self.tls_sni_01 = None\n\n @classmethod\n def add_parser_arguments(cls, add):\n add('auth-hook',\n help='Path or command to execute for the authentication script')\n add('cleanup-hook',\n help='Path or command to execute for the cleanup script')\n add('public-ip-logging-ok', action='store_true',\n help='Automatically allows public IP logging (default: Ask)')\n\n def prepare(self): # pylint: disable=missing-docstring\n if self.config.noninteractive_mode and not self.conf('auth-hook'):\n raise errors.PluginError(\n 'An authentication script must be provided with --{0} when '\n 'using the manual plugin non-interactively.'.format(\n self.option_name('auth-hook')))\n self._validate_hooks()\n\n def _validate_hooks(self):\n if self.config.validate_hooks:\n for name in ('auth-hook', 'cleanup-hook'):\n hook = self.conf(name)\n if hook is not None:\n hook_prefix = self.option_name(name)[:-len('-hook')]\n hooks.validate_hook(hook, hook_prefix)\n\n def more_info(self): # pylint: disable=missing-docstring,no-self-use\n return (\n 'This plugin allows the user to customize setup for domain '\n 'validation challenges either through shell scripts provided by '\n 'the user or by performing the setup manually.')\n\n def get_chall_pref(self, domain):\n # pylint: disable=missing-docstring,no-self-use,unused-argument\n return [challenges.HTTP01, challenges.DNS01, challenges.TLSSNI01]\n\n def perform(self, achalls): # pylint: disable=missing-docstring\n self._verify_ip_logging_ok()\n if self.conf('auth-hook'):\n perform_achall = self._perform_achall_with_script\n else:\n perform_achall = self._perform_achall_manually\n\n responses = []\n for achall in achalls:\n if isinstance(achall.chall, challenges.TLSSNI01):\n # Make a new ManualTlsSni01 instance for each challenge\n # because the manual plugin deals with one challenge at a time.\n self.tls_sni_01 = ManualTlsSni01(self)\n self.tls_sni_01.add_chall(achall)\n self.tls_sni_01.perform()\n perform_achall(achall)\n responses.append(achall.response(achall.account_key))\n return responses\n\n def _verify_ip_logging_ok(self):\n if not self.conf('public-ip-logging-ok'):\n cli_flag = '--{0}'.format(self.option_name('public-ip-logging-ok'))\n msg = ('NOTE: The IP of this machine will be publicly logged as '\n \"having requested this certificate. If you're running \"\n 'certbot in manual mode on a machine that is not your '\n \"server, please ensure you're okay with that.\\n\\n\"\n 'Are you OK with your IP being logged?')\n display = zope.component.getUtility(interfaces.IDisplay)\n if display.yesno(msg, cli_flag=cli_flag, force_interactive=True):\n setattr(self.config, self.dest('public-ip-logging-ok'), True)\n else:\n raise errors.PluginError('Must agree to IP logging to proceed')\n\n def _perform_achall_with_script(self, achall):\n env = dict(CERTBOT_DOMAIN=achall.domain,\n CERTBOT_VALIDATION=achall.validation(achall.account_key))\n if isinstance(achall.chall, challenges.HTTP01):\n env['CERTBOT_TOKEN'] = achall.chall.encode('token')\n else:\n os.environ.pop('CERTBOT_TOKEN', None)\n if isinstance(achall.chall, challenges.TLSSNI01):\n env['CERTBOT_CERT_PATH'] = self.tls_sni_01.get_cert_path(achall)\n env['CERTBOT_KEY_PATH'] = self.tls_sni_01.get_key_path(achall)\n env['CERTBOT_SNI_DOMAIN'] = self.tls_sni_01.get_z_domain(achall)\n os.environ.pop('CERTBOT_VALIDATION', None)\n env.pop('CERTBOT_VALIDATION')\n else:\n os.environ.pop('CERTBOT_CERT_PATH', None)\n os.environ.pop('CERTBOT_KEY_PATH', None)\n os.environ.pop('CERTBOT_SNI_DOMAIN', None)\n os.environ.update(env)\n _, out = hooks.execute(self.conf('auth-hook'))\n env['CERTBOT_AUTH_OUTPUT'] = out.strip()\n self.env[achall.domain] = env\n\n def _perform_achall_manually(self, achall):\n validation = achall.validation(achall.account_key)\n if isinstance(achall.chall, challenges.HTTP01):\n msg = self._HTTP_INSTRUCTIONS.format(\n achall=achall, encoded_token=achall.chall.encode('token'),\n port=self.config.http01_port,\n uri=achall.chall.uri(achall.domain), validation=validation)\n elif isinstance(achall.chall, challenges.DNS01):\n msg = self._DNS_INSTRUCTIONS.format(\n domain=achall.validation_domain_name(achall.domain),\n validation=validation)\n else:\n assert isinstance(achall.chall, challenges.TLSSNI01)\n msg = self._TLSSNI_INSTRUCTIONS.format(\n cert=self.tls_sni_01.get_cert_path(achall),\n key=self.tls_sni_01.get_key_path(achall),\n port=self.config.tls_sni_01_port,\n sni_domain=self.tls_sni_01.get_z_domain(achall))\n display = zope.component.getUtility(interfaces.IDisplay)\n display.notification(msg, wrap=False, force_interactive=True)\n\n def cleanup(self, achalls): # pylint: disable=missing-docstring\n if self.conf('cleanup-hook'):\n for achall in achalls:\n env = self.env.pop(achall.domain)\n if 'CERTBOT_TOKEN' not in env:\n os.environ.pop('CERTBOT_TOKEN', None)\n os.environ.update(env)\n hooks.execute(self.conf('cleanup-hook'))\n self.reverter.recovery_routine()\n", "path": "certbot/plugins/manual.py"}], "after_files": [{"content": "\"\"\"Manual authenticator plugin\"\"\"\nimport os\n\nimport zope.component\nimport zope.interface\n\nfrom acme import challenges\n\nfrom certbot import interfaces\nfrom certbot import errors\nfrom certbot import hooks\nfrom certbot import reverter\nfrom certbot.plugins import common\n\n\nclass ManualTlsSni01(common.TLSSNI01):\n \"\"\"TLS-SNI-01 authenticator for the Manual plugin\n\n :ivar configurator: Authenticator object\n :type configurator: :class:`~certbot.plugins.manual.Authenticator`\n\n :ivar list achalls: Annotated\n class:`~certbot.achallenges.KeyAuthorizationAnnotatedChallenge`\n challenges\n\n :param list indices: Meant to hold indices of challenges in a\n larger array. NginxTlsSni01 is capable of solving many challenges\n at once which causes an indexing issue within NginxConfigurator\n who must return all responses in order. Imagine NginxConfigurator\n maintaining state about where all of the http-01 Challenges,\n TLS-SNI-01 Challenges belong in the response array. This is an\n optional utility.\n\n :param str challenge_conf: location of the challenge config file\n \"\"\"\n\n def perform(self):\n \"\"\"Create the SSL certificates and private keys\"\"\"\n\n for achall in self.achalls:\n self._setup_challenge_cert(achall)\n\n\[email protected](interfaces.IAuthenticator)\[email protected](interfaces.IPluginFactory)\nclass Authenticator(common.Plugin):\n \"\"\"Manual authenticator\n\n This plugin allows the user to perform the domain validation\n challenge(s) themselves. This either be done manually by the user or\n through shell scripts provided to Certbot.\n\n \"\"\"\n\n description = 'Manual configuration or run your own shell scripts'\n hidden = True\n long_description = (\n 'Authenticate through manual configuration or custom shell scripts. '\n 'When using shell scripts, an authenticator script must be provided. '\n 'The environment variables available to this script depend on the '\n 'type of challenge. $CERTBOT_DOMAIN will always contain the domain '\n 'being authenticated. For HTTP-01 and DNS-01, $CERTBOT_VALIDATION '\n 'is the validation string, and $CERTBOT_TOKEN is the filename of the '\n 'resource requested when performing an HTTP-01 challenge. When '\n 'performing a TLS-SNI-01 challenge, $CERTBOT_SNI_DOMAIN will contain '\n 'the SNI name for which the ACME server expects to be presented with '\n 'the self-signed certificate located at $CERTBOT_CERT_PATH. The '\n 'secret key needed to complete the TLS handshake is located at '\n '$CERTBOT_KEY_PATH. An additional cleanup script can also be '\n 'provided and can use the additional variable $CERTBOT_AUTH_OUTPUT '\n 'which contains the stdout output from the auth script.')\n _DNS_INSTRUCTIONS = \"\"\"\\\nPlease deploy a DNS TXT record under the name\n{domain} with the following value:\n\n{validation}\n\nBefore continuing, verify the record is deployed.\"\"\"\n _HTTP_INSTRUCTIONS = \"\"\"\\\nCreate a file containing just this data:\n\n{validation}\n\nAnd make it available on your web server at this URL:\n\n{uri}\n\"\"\"\n _TLSSNI_INSTRUCTIONS = \"\"\"\\\nConfigure the service listening on port {port} to present the certificate\n{cert}\nusing the secret key\n{key}\nwhen it receives a TLS ClientHello with the SNI extension set to\n{sni_domain}\n\"\"\"\n\n def __init__(self, *args, **kwargs):\n super(Authenticator, self).__init__(*args, **kwargs)\n self.reverter = reverter.Reverter(self.config)\n self.reverter.recovery_routine()\n self.env = dict()\n self.tls_sni_01 = None\n\n @classmethod\n def add_parser_arguments(cls, add):\n add('auth-hook',\n help='Path or command to execute for the authentication script')\n add('cleanup-hook',\n help='Path or command to execute for the cleanup script')\n add('public-ip-logging-ok', action='store_true',\n help='Automatically allows public IP logging (default: Ask)')\n\n def prepare(self): # pylint: disable=missing-docstring\n if self.config.noninteractive_mode and not self.conf('auth-hook'):\n raise errors.PluginError(\n 'An authentication script must be provided with --{0} when '\n 'using the manual plugin non-interactively.'.format(\n self.option_name('auth-hook')))\n self._validate_hooks()\n\n def _validate_hooks(self):\n if self.config.validate_hooks:\n for name in ('auth-hook', 'cleanup-hook'):\n hook = self.conf(name)\n if hook is not None:\n hook_prefix = self.option_name(name)[:-len('-hook')]\n hooks.validate_hook(hook, hook_prefix)\n\n def more_info(self): # pylint: disable=missing-docstring,no-self-use\n return (\n 'This plugin allows the user to customize setup for domain '\n 'validation challenges either through shell scripts provided by '\n 'the user or by performing the setup manually.')\n\n def get_chall_pref(self, domain):\n # pylint: disable=missing-docstring,no-self-use,unused-argument\n return [challenges.HTTP01, challenges.DNS01, challenges.TLSSNI01]\n\n def perform(self, achalls): # pylint: disable=missing-docstring\n self._verify_ip_logging_ok()\n if self.conf('auth-hook'):\n perform_achall = self._perform_achall_with_script\n else:\n perform_achall = self._perform_achall_manually\n\n responses = []\n for achall in achalls:\n if isinstance(achall.chall, challenges.TLSSNI01):\n # Make a new ManualTlsSni01 instance for each challenge\n # because the manual plugin deals with one challenge at a time.\n self.tls_sni_01 = ManualTlsSni01(self)\n self.tls_sni_01.add_chall(achall)\n self.tls_sni_01.perform()\n perform_achall(achall)\n responses.append(achall.response(achall.account_key))\n return responses\n\n def _verify_ip_logging_ok(self):\n if not self.conf('public-ip-logging-ok'):\n cli_flag = '--{0}'.format(self.option_name('public-ip-logging-ok'))\n msg = ('NOTE: The IP of this machine will be publicly logged as '\n \"having requested this certificate. If you're running \"\n 'certbot in manual mode on a machine that is not your '\n \"server, please ensure you're okay with that.\\n\\n\"\n 'Are you OK with your IP being logged?')\n display = zope.component.getUtility(interfaces.IDisplay)\n if display.yesno(msg, cli_flag=cli_flag, force_interactive=True):\n setattr(self.config, self.dest('public-ip-logging-ok'), True)\n else:\n raise errors.PluginError('Must agree to IP logging to proceed')\n\n def _perform_achall_with_script(self, achall):\n env = dict(CERTBOT_DOMAIN=achall.domain,\n CERTBOT_VALIDATION=achall.validation(achall.account_key))\n if isinstance(achall.chall, challenges.HTTP01):\n env['CERTBOT_TOKEN'] = achall.chall.encode('token')\n else:\n os.environ.pop('CERTBOT_TOKEN', None)\n if isinstance(achall.chall, challenges.TLSSNI01):\n env['CERTBOT_CERT_PATH'] = self.tls_sni_01.get_cert_path(achall)\n env['CERTBOT_KEY_PATH'] = self.tls_sni_01.get_key_path(achall)\n env['CERTBOT_SNI_DOMAIN'] = self.tls_sni_01.get_z_domain(achall)\n os.environ.pop('CERTBOT_VALIDATION', None)\n env.pop('CERTBOT_VALIDATION')\n else:\n os.environ.pop('CERTBOT_CERT_PATH', None)\n os.environ.pop('CERTBOT_KEY_PATH', None)\n os.environ.pop('CERTBOT_SNI_DOMAIN', None)\n os.environ.update(env)\n _, out = hooks.execute(self.conf('auth-hook'))\n env['CERTBOT_AUTH_OUTPUT'] = out.strip()\n self.env[achall] = env\n\n def _perform_achall_manually(self, achall):\n validation = achall.validation(achall.account_key)\n if isinstance(achall.chall, challenges.HTTP01):\n msg = self._HTTP_INSTRUCTIONS.format(\n achall=achall, encoded_token=achall.chall.encode('token'),\n port=self.config.http01_port,\n uri=achall.chall.uri(achall.domain), validation=validation)\n elif isinstance(achall.chall, challenges.DNS01):\n msg = self._DNS_INSTRUCTIONS.format(\n domain=achall.validation_domain_name(achall.domain),\n validation=validation)\n else:\n assert isinstance(achall.chall, challenges.TLSSNI01)\n msg = self._TLSSNI_INSTRUCTIONS.format(\n cert=self.tls_sni_01.get_cert_path(achall),\n key=self.tls_sni_01.get_key_path(achall),\n port=self.config.tls_sni_01_port,\n sni_domain=self.tls_sni_01.get_z_domain(achall))\n display = zope.component.getUtility(interfaces.IDisplay)\n display.notification(msg, wrap=False, force_interactive=True)\n\n def cleanup(self, achalls): # pylint: disable=missing-docstring\n if self.conf('cleanup-hook'):\n for achall in achalls:\n env = self.env.pop(achall)\n if 'CERTBOT_TOKEN' not in env:\n os.environ.pop('CERTBOT_TOKEN', None)\n os.environ.update(env)\n hooks.execute(self.conf('cleanup-hook'))\n self.reverter.recovery_routine()\n", "path": "certbot/plugins/manual.py"}]}
| 3,840 | 228 |
gh_patches_debug_26813
|
rasdani/github-patches
|
git_diff
|
liberapay__liberapay.com-138
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Goal page is not accessible once connected
"405 Method Not Allowed"
on this page
https://liberapay.com/unisson/goal.html when i want to change my goal
I'm connected on my account.
Goal page is not accessible once connected
"405 Method Not Allowed"
on this page
https://liberapay.com/unisson/goal.html when i want to change my goal
I'm connected on my account.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `liberapay/utils/__init__.py`
Content:
```
1 # encoding: utf8
2
3 from __future__ import absolute_import, division, print_function, unicode_literals
4
5 from base64 import b64decode, b64encode
6 from datetime import date, datetime, timedelta
7 import re
8
9 from aspen import Response, json
10 from aspen.utils import to_rfc822, utcnow
11 from markupsafe import Markup
12 from postgres.cursors import SimpleCursorBase
13
14 import liberapay
15 from liberapay.exceptions import AuthRequired
16 from liberapay.utils.i18n import Money
17
18
19 BEGINNING_OF_EPOCH = to_rfc822(datetime(1970, 1, 1)).encode('ascii')
20
21
22 def get_participant(state, restrict=True, redirect_stub=True, allow_member=False):
23 """Given a Request, raise Response or return Participant.
24
25 If restrict is True then we'll restrict access to owners and admins.
26
27 """
28 request = state['request']
29 user = state['user']
30 slug = request.line.uri.path['username']
31 _ = state['_']
32
33 if restrict and user.ANON:
34 raise AuthRequired
35
36 if slug.startswith('~'):
37 thing = 'id'
38 value = slug[1:]
39 participant = user if user and str(user.id) == value else None
40 else:
41 thing = 'lower(username)'
42 value = slug.lower()
43 participant = user if user and user.username.lower() == value else None
44
45 if participant is None:
46 from liberapay.models.participant import Participant # avoid circular import
47 participant = Participant._from_thing(thing, value) if value else None
48 if participant is None or participant.kind == 'community':
49 raise Response(404)
50
51 if request.method in ('GET', 'HEAD'):
52 if slug != participant.username:
53 canon = '/' + participant.username + request.line.uri[len(slug)+1:]
54 raise Response(302, headers={'Location': canon})
55
56 status = participant.status
57 if status == 'closed':
58 if user.is_admin:
59 return participant
60 raise Response(410)
61 elif status == 'stub':
62 if redirect_stub:
63 to = participant.resolve_stub()
64 assert to
65 raise Response(302, headers={'Location': to})
66
67 if restrict:
68 if participant != user:
69 if allow_member and participant.kind == 'group' and user.member_of(participant):
70 pass
71 elif not user.is_admin:
72 raise Response(403, _("You are not authorized to access this page."))
73
74 return participant
75
76
77 def b64decode_s(s, **kw):
78 udecode = lambda a: a.decode('utf8')
79 if s[:1] == b'.':
80 udecode = lambda a: a
81 s = s[1:]
82 s = s.replace(b'~', b'=')
83 try:
84 return udecode(b64decode(s, '-_'))
85 except Exception:
86 try:
87 # For retrocompatibility
88 return udecode(b64decode(s))
89 except Exception:
90 pass
91 if 'default' in kw:
92 return kw['default']
93 raise Response(400, "invalid base64 input")
94
95
96 def b64encode_s(s):
97 prefix = b''
98 if not isinstance(s, bytes):
99 s = s.encode('utf8')
100 else:
101 # Check whether the string is binary or already utf8
102 try:
103 s.decode('utf8')
104 except UnicodeError:
105 prefix = b'.'
106 return prefix + b64encode(s, b'-_').replace(b'=', b'~')
107
108
109 def update_global_stats(website):
110 website.gnusers = website.db.one("""
111 SELECT count(*)
112 FROM participants
113 WHERE status = 'active'
114 AND kind <> 'community';
115 """)
116 transfer_volume = website.db.one("""
117 SELECT coalesce(sum(amount), 0)
118 FROM current_tips
119 WHERE is_funded
120 """)
121 website.gmonthly_volume = Money(transfer_volume * 52 / 12, 'EUR')
122
123
124 def _execute(this, sql, params=[]):
125 print(sql.strip(), params)
126 super(SimpleCursorBase, this).execute(sql, params)
127
128 def log_cursor(f):
129 "Prints sql and params to stdout. Works globaly so watch for threaded use."
130 def wrapper(*a, **kw):
131 try:
132 SimpleCursorBase.execute = _execute
133 ret = f(*a, **kw)
134 finally:
135 del SimpleCursorBase.execute
136 return ret
137 return wrapper
138
139
140 def excerpt_intro(text, length=175, append='…'):
141 if not text:
142 return ''
143 if len(text) > length:
144 return text[:length] + append
145 return text
146
147
148 def is_card_expired(exp_year, exp_month):
149 today = date.today()
150 cur_year, cur_month = today.year, today.month
151 return exp_year < cur_year or exp_year == cur_year and exp_month < cur_month
152
153
154 def set_cookie(cookies, key, value, expires=None, httponly=True, path=b'/'):
155 cookies[key] = value
156 cookie = cookies[key]
157 if expires:
158 if isinstance(expires, timedelta):
159 expires += utcnow()
160 if isinstance(expires, datetime):
161 expires = to_rfc822(expires).encode('ascii')
162 cookie[b'expires'] = expires
163 if httponly:
164 cookie[b'httponly'] = True
165 if path:
166 cookie[b'path'] = path
167 if liberapay.canonical_scheme == 'https':
168 cookie[b'secure'] = True
169
170
171 def erase_cookie(cookies, key, **kw):
172 set_cookie(cookies, key, '', BEGINNING_OF_EPOCH, **kw)
173
174
175 def filter_profile_subnav(user, participant, pages):
176 out = []
177 for foo, bar, show_them, show_others in pages:
178 if (user == participant and show_them) \
179 or (user != participant and show_others) \
180 or user.is_admin:
181 out.append((foo, bar))
182 return out
183
184
185 def to_javascript(obj):
186 """For when you want to inject an object into a <script> tag.
187 """
188 return json.dumps(obj).replace('</', '<\\/')
189
190
191 svg_attrs_re = re.compile(r'\s+(?:height|width|x|y|xmlns)=(["\']).*?\1')
192
193 def include_svg(svg, height, width, x=None, y=None):
194 """For when you want to include an SVG in an HTML page or in another SVG.
195 """
196 assert svg.startswith('<svg')
197 i = svg.find('>')
198 assert i != -1
199 d = locals()
200 attrs = svg_attrs_re.sub('', svg[4:i])
201 for a in ('height', 'width', 'x', 'y'):
202 v = d[a]
203 if v is None:
204 continue
205 attrs += ' %s="%s"' % (a, v)
206 return Markup(svg[:4] + attrs + svg[i:])
207
208
209 def group_by(iterable, key):
210 r = {}
211 for obj in iterable:
212 try:
213 k = obj[key]
214 except KeyError:
215 continue
216 r.setdefault(k, []).append(obj)
217 return r
218
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/liberapay/utils/__init__.py b/liberapay/utils/__init__.py
--- a/liberapay/utils/__init__.py
+++ b/liberapay/utils/__init__.py
@@ -6,6 +6,8 @@
from datetime import date, datetime, timedelta
import re
+from six.moves.urllib.parse import quote as urlquote
+
from aspen import Response, json
from aspen.utils import to_rfc822, utcnow
from markupsafe import Markup
@@ -13,6 +15,7 @@
import liberapay
from liberapay.exceptions import AuthRequired
+from liberapay.models.community import Community
from liberapay.utils.i18n import Money
@@ -74,6 +77,30 @@
return participant
+def get_community(state, restrict=False):
+ request, response = state['request'], state['response']
+ user = state['user']
+ name = request.path['name']
+
+ c = Community.from_name(name)
+ if request.method in ('GET', 'HEAD'):
+ if not c:
+ response.redirect('/for/new?name=' + urlquote(name))
+ if c.name != name:
+ response.redirect('/for/' + c.name + request.line.uri[5+len(name):])
+ elif not c:
+ raise Response(404)
+
+ if restrict:
+ if user.ANON:
+ raise AuthRequired
+ if user.id != c.creator and not user.is_admin:
+ _ = state['_']
+ raise Response(403, _("You are not authorized to access this page."))
+
+ return c
+
+
def b64decode_s(s, **kw):
udecode = lambda a: a.decode('utf8')
if s[:1] == b'.':
|
{"golden_diff": "diff --git a/liberapay/utils/__init__.py b/liberapay/utils/__init__.py\n--- a/liberapay/utils/__init__.py\n+++ b/liberapay/utils/__init__.py\n@@ -6,6 +6,8 @@\n from datetime import date, datetime, timedelta\n import re\n \n+from six.moves.urllib.parse import quote as urlquote\n+\n from aspen import Response, json\n from aspen.utils import to_rfc822, utcnow\n from markupsafe import Markup\n@@ -13,6 +15,7 @@\n \n import liberapay\n from liberapay.exceptions import AuthRequired\n+from liberapay.models.community import Community\n from liberapay.utils.i18n import Money\n \n \n@@ -74,6 +77,30 @@\n return participant\n \n \n+def get_community(state, restrict=False):\n+ request, response = state['request'], state['response']\n+ user = state['user']\n+ name = request.path['name']\n+\n+ c = Community.from_name(name)\n+ if request.method in ('GET', 'HEAD'):\n+ if not c:\n+ response.redirect('/for/new?name=' + urlquote(name))\n+ if c.name != name:\n+ response.redirect('/for/' + c.name + request.line.uri[5+len(name):])\n+ elif not c:\n+ raise Response(404)\n+\n+ if restrict:\n+ if user.ANON:\n+ raise AuthRequired\n+ if user.id != c.creator and not user.is_admin:\n+ _ = state['_']\n+ raise Response(403, _(\"You are not authorized to access this page.\"))\n+\n+ return c\n+\n+\n def b64decode_s(s, **kw):\n udecode = lambda a: a.decode('utf8')\n if s[:1] == b'.':\n", "issue": "Goal page is not accessible once connected\n\"405 Method Not Allowed\"\non this page \nhttps://liberapay.com/unisson/goal.html when i want to change my goal\nI'm connected on my account.\n\nGoal page is not accessible once connected\n\"405 Method Not Allowed\"\non this page \nhttps://liberapay.com/unisson/goal.html when i want to change my goal\nI'm connected on my account.\n\n", "before_files": [{"content": "# encoding: utf8\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom base64 import b64decode, b64encode\nfrom datetime import date, datetime, timedelta\nimport re\n\nfrom aspen import Response, json\nfrom aspen.utils import to_rfc822, utcnow\nfrom markupsafe import Markup\nfrom postgres.cursors import SimpleCursorBase\n\nimport liberapay\nfrom liberapay.exceptions import AuthRequired\nfrom liberapay.utils.i18n import Money\n\n\nBEGINNING_OF_EPOCH = to_rfc822(datetime(1970, 1, 1)).encode('ascii')\n\n\ndef get_participant(state, restrict=True, redirect_stub=True, allow_member=False):\n \"\"\"Given a Request, raise Response or return Participant.\n\n If restrict is True then we'll restrict access to owners and admins.\n\n \"\"\"\n request = state['request']\n user = state['user']\n slug = request.line.uri.path['username']\n _ = state['_']\n\n if restrict and user.ANON:\n raise AuthRequired\n\n if slug.startswith('~'):\n thing = 'id'\n value = slug[1:]\n participant = user if user and str(user.id) == value else None\n else:\n thing = 'lower(username)'\n value = slug.lower()\n participant = user if user and user.username.lower() == value else None\n\n if participant is None:\n from liberapay.models.participant import Participant # avoid circular import\n participant = Participant._from_thing(thing, value) if value else None\n if participant is None or participant.kind == 'community':\n raise Response(404)\n\n if request.method in ('GET', 'HEAD'):\n if slug != participant.username:\n canon = '/' + participant.username + request.line.uri[len(slug)+1:]\n raise Response(302, headers={'Location': canon})\n\n status = participant.status\n if status == 'closed':\n if user.is_admin:\n return participant\n raise Response(410)\n elif status == 'stub':\n if redirect_stub:\n to = participant.resolve_stub()\n assert to\n raise Response(302, headers={'Location': to})\n\n if restrict:\n if participant != user:\n if allow_member and participant.kind == 'group' and user.member_of(participant):\n pass\n elif not user.is_admin:\n raise Response(403, _(\"You are not authorized to access this page.\"))\n\n return participant\n\n\ndef b64decode_s(s, **kw):\n udecode = lambda a: a.decode('utf8')\n if s[:1] == b'.':\n udecode = lambda a: a\n s = s[1:]\n s = s.replace(b'~', b'=')\n try:\n return udecode(b64decode(s, '-_'))\n except Exception:\n try:\n # For retrocompatibility\n return udecode(b64decode(s))\n except Exception:\n pass\n if 'default' in kw:\n return kw['default']\n raise Response(400, \"invalid base64 input\")\n\n\ndef b64encode_s(s):\n prefix = b''\n if not isinstance(s, bytes):\n s = s.encode('utf8')\n else:\n # Check whether the string is binary or already utf8\n try:\n s.decode('utf8')\n except UnicodeError:\n prefix = b'.'\n return prefix + b64encode(s, b'-_').replace(b'=', b'~')\n\n\ndef update_global_stats(website):\n website.gnusers = website.db.one(\"\"\"\n SELECT count(*)\n FROM participants\n WHERE status = 'active'\n AND kind <> 'community';\n \"\"\")\n transfer_volume = website.db.one(\"\"\"\n SELECT coalesce(sum(amount), 0)\n FROM current_tips\n WHERE is_funded\n \"\"\")\n website.gmonthly_volume = Money(transfer_volume * 52 / 12, 'EUR')\n\n\ndef _execute(this, sql, params=[]):\n print(sql.strip(), params)\n super(SimpleCursorBase, this).execute(sql, params)\n\ndef log_cursor(f):\n \"Prints sql and params to stdout. Works globaly so watch for threaded use.\"\n def wrapper(*a, **kw):\n try:\n SimpleCursorBase.execute = _execute\n ret = f(*a, **kw)\n finally:\n del SimpleCursorBase.execute\n return ret\n return wrapper\n\n\ndef excerpt_intro(text, length=175, append='\u2026'):\n if not text:\n return ''\n if len(text) > length:\n return text[:length] + append\n return text\n\n\ndef is_card_expired(exp_year, exp_month):\n today = date.today()\n cur_year, cur_month = today.year, today.month\n return exp_year < cur_year or exp_year == cur_year and exp_month < cur_month\n\n\ndef set_cookie(cookies, key, value, expires=None, httponly=True, path=b'/'):\n cookies[key] = value\n cookie = cookies[key]\n if expires:\n if isinstance(expires, timedelta):\n expires += utcnow()\n if isinstance(expires, datetime):\n expires = to_rfc822(expires).encode('ascii')\n cookie[b'expires'] = expires\n if httponly:\n cookie[b'httponly'] = True\n if path:\n cookie[b'path'] = path\n if liberapay.canonical_scheme == 'https':\n cookie[b'secure'] = True\n\n\ndef erase_cookie(cookies, key, **kw):\n set_cookie(cookies, key, '', BEGINNING_OF_EPOCH, **kw)\n\n\ndef filter_profile_subnav(user, participant, pages):\n out = []\n for foo, bar, show_them, show_others in pages:\n if (user == participant and show_them) \\\n or (user != participant and show_others) \\\n or user.is_admin:\n out.append((foo, bar))\n return out\n\n\ndef to_javascript(obj):\n \"\"\"For when you want to inject an object into a <script> tag.\n \"\"\"\n return json.dumps(obj).replace('</', '<\\\\/')\n\n\nsvg_attrs_re = re.compile(r'\\s+(?:height|width|x|y|xmlns)=([\"\\']).*?\\1')\n\ndef include_svg(svg, height, width, x=None, y=None):\n \"\"\"For when you want to include an SVG in an HTML page or in another SVG.\n \"\"\"\n assert svg.startswith('<svg')\n i = svg.find('>')\n assert i != -1\n d = locals()\n attrs = svg_attrs_re.sub('', svg[4:i])\n for a in ('height', 'width', 'x', 'y'):\n v = d[a]\n if v is None:\n continue\n attrs += ' %s=\"%s\"' % (a, v)\n return Markup(svg[:4] + attrs + svg[i:])\n\n\ndef group_by(iterable, key):\n r = {}\n for obj in iterable:\n try:\n k = obj[key]\n except KeyError:\n continue\n r.setdefault(k, []).append(obj)\n return r\n", "path": "liberapay/utils/__init__.py"}], "after_files": [{"content": "# encoding: utf8\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom base64 import b64decode, b64encode\nfrom datetime import date, datetime, timedelta\nimport re\n\nfrom six.moves.urllib.parse import quote as urlquote\n\nfrom aspen import Response, json\nfrom aspen.utils import to_rfc822, utcnow\nfrom markupsafe import Markup\nfrom postgres.cursors import SimpleCursorBase\n\nimport liberapay\nfrom liberapay.exceptions import AuthRequired\nfrom liberapay.models.community import Community\nfrom liberapay.utils.i18n import Money\n\n\nBEGINNING_OF_EPOCH = to_rfc822(datetime(1970, 1, 1)).encode('ascii')\n\n\ndef get_participant(state, restrict=True, redirect_stub=True, allow_member=False):\n \"\"\"Given a Request, raise Response or return Participant.\n\n If restrict is True then we'll restrict access to owners and admins.\n\n \"\"\"\n request = state['request']\n user = state['user']\n slug = request.line.uri.path['username']\n _ = state['_']\n\n if restrict and user.ANON:\n raise AuthRequired\n\n if slug.startswith('~'):\n thing = 'id'\n value = slug[1:]\n participant = user if user and str(user.id) == value else None\n else:\n thing = 'lower(username)'\n value = slug.lower()\n participant = user if user and user.username.lower() == value else None\n\n if participant is None:\n from liberapay.models.participant import Participant # avoid circular import\n participant = Participant._from_thing(thing, value) if value else None\n if participant is None or participant.kind == 'community':\n raise Response(404)\n\n if request.method in ('GET', 'HEAD'):\n if slug != participant.username:\n canon = '/' + participant.username + request.line.uri[len(slug)+1:]\n raise Response(302, headers={'Location': canon})\n\n status = participant.status\n if status == 'closed':\n if user.is_admin:\n return participant\n raise Response(410)\n elif status == 'stub':\n if redirect_stub:\n to = participant.resolve_stub()\n assert to\n raise Response(302, headers={'Location': to})\n\n if restrict:\n if participant != user:\n if allow_member and participant.kind == 'group' and user.member_of(participant):\n pass\n elif not user.is_admin:\n raise Response(403, _(\"You are not authorized to access this page.\"))\n\n return participant\n\n\ndef get_community(state, restrict=False):\n request, response = state['request'], state['response']\n user = state['user']\n name = request.path['name']\n\n c = Community.from_name(name)\n if request.method in ('GET', 'HEAD'):\n if not c:\n response.redirect('/for/new?name=' + urlquote(name))\n if c.name != name:\n response.redirect('/for/' + c.name + request.line.uri[5+len(name):])\n elif not c:\n raise Response(404)\n\n if restrict:\n if user.ANON:\n raise AuthRequired\n if user.id != c.creator and not user.is_admin:\n _ = state['_']\n raise Response(403, _(\"You are not authorized to access this page.\"))\n\n return c\n\n\ndef b64decode_s(s, **kw):\n udecode = lambda a: a.decode('utf8')\n if s[:1] == b'.':\n udecode = lambda a: a\n s = s[1:]\n s = s.replace(b'~', b'=')\n try:\n return udecode(b64decode(s, '-_'))\n except Exception:\n try:\n # For retrocompatibility\n return udecode(b64decode(s))\n except Exception:\n pass\n if 'default' in kw:\n return kw['default']\n raise Response(400, \"invalid base64 input\")\n\n\ndef b64encode_s(s):\n prefix = b''\n if not isinstance(s, bytes):\n s = s.encode('utf8')\n else:\n # Check whether the string is binary or already utf8\n try:\n s.decode('utf8')\n except UnicodeError:\n prefix = b'.'\n return prefix + b64encode(s, b'-_').replace(b'=', b'~')\n\n\ndef update_global_stats(website):\n website.gnusers = website.db.one(\"\"\"\n SELECT count(*)\n FROM participants\n WHERE status = 'active'\n AND kind <> 'community';\n \"\"\")\n transfer_volume = website.db.one(\"\"\"\n SELECT coalesce(sum(amount), 0)\n FROM current_tips\n WHERE is_funded\n \"\"\")\n website.gmonthly_volume = Money(transfer_volume * 52 / 12, 'EUR')\n\n\ndef _execute(this, sql, params=[]):\n print(sql.strip(), params)\n super(SimpleCursorBase, this).execute(sql, params)\n\ndef log_cursor(f):\n \"Prints sql and params to stdout. Works globaly so watch for threaded use.\"\n def wrapper(*a, **kw):\n try:\n SimpleCursorBase.execute = _execute\n ret = f(*a, **kw)\n finally:\n del SimpleCursorBase.execute\n return ret\n return wrapper\n\n\ndef excerpt_intro(text, length=175, append='\u2026'):\n if not text:\n return ''\n if len(text) > length:\n return text[:length] + append\n return text\n\n\ndef is_card_expired(exp_year, exp_month):\n today = date.today()\n cur_year, cur_month = today.year, today.month\n return exp_year < cur_year or exp_year == cur_year and exp_month < cur_month\n\n\ndef set_cookie(cookies, key, value, expires=None, httponly=True, path=b'/'):\n cookies[key] = value\n cookie = cookies[key]\n if expires:\n if isinstance(expires, timedelta):\n expires += utcnow()\n if isinstance(expires, datetime):\n expires = to_rfc822(expires).encode('ascii')\n cookie[b'expires'] = expires\n if httponly:\n cookie[b'httponly'] = True\n if path:\n cookie[b'path'] = path\n if liberapay.canonical_scheme == 'https':\n cookie[b'secure'] = True\n\n\ndef erase_cookie(cookies, key, **kw):\n set_cookie(cookies, key, '', BEGINNING_OF_EPOCH, **kw)\n\n\ndef filter_profile_subnav(user, participant, pages):\n out = []\n for foo, bar, show_them, show_others in pages:\n if (user == participant and show_them) \\\n or (user != participant and show_others) \\\n or user.is_admin:\n out.append((foo, bar))\n return out\n\n\ndef to_javascript(obj):\n \"\"\"For when you want to inject an object into a <script> tag.\n \"\"\"\n return json.dumps(obj).replace('</', '<\\\\/')\n\n\nsvg_attrs_re = re.compile(r'\\s+(?:height|width|x|y|xmlns)=([\"\\']).*?\\1')\n\ndef include_svg(svg, height, width, x=None, y=None):\n \"\"\"For when you want to include an SVG in an HTML page or in another SVG.\n \"\"\"\n assert svg.startswith('<svg')\n i = svg.find('>')\n assert i != -1\n d = locals()\n attrs = svg_attrs_re.sub('', svg[4:i])\n for a in ('height', 'width', 'x', 'y'):\n v = d[a]\n if v is None:\n continue\n attrs += ' %s=\"%s\"' % (a, v)\n return Markup(svg[:4] + attrs + svg[i:])\n\n\ndef group_by(iterable, key):\n r = {}\n for obj in iterable:\n try:\n k = obj[key]\n except KeyError:\n continue\n r.setdefault(k, []).append(obj)\n return r\n", "path": "liberapay/utils/__init__.py"}]}
| 2,489 | 411 |
gh_patches_debug_29717
|
rasdani/github-patches
|
git_diff
|
azavea__raster-vision-745
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Not able to use default stride with sliding window semseg option
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `rastervision/task/semantic_segmentation_config.py`
Content:
```
1 from copy import deepcopy
2 from typing import (List, Dict, Tuple, Union)
3
4 import rastervision as rv
5 from rastervision.task import SemanticSegmentation
6 from rastervision.core.class_map import (ClassMap, ClassItem)
7 from rastervision.task import (TaskConfig, TaskConfigBuilder)
8 from rastervision.protos.task_pb2 import TaskConfig as TaskConfigMsg
9 from rastervision.protos.class_item_pb2 import ClassItem as ClassItemMsg
10
11
12 class SemanticSegmentationConfig(TaskConfig):
13 class ChipOptions:
14 def __init__(self,
15 window_method='random_sample',
16 target_classes=None,
17 debug_chip_probability=0.25,
18 negative_survival_probability=1.0,
19 chips_per_scene=1000,
20 target_count_threshold=1000,
21 stride=None):
22 self.window_method = window_method
23 self.target_classes = target_classes
24 self.debug_chip_probability = debug_chip_probability
25 self.negative_survival_probability = negative_survival_probability
26 self.chips_per_scene = chips_per_scene
27 self.target_count_threshold = target_count_threshold
28 self.stride = stride
29
30 def __init__(self,
31 class_map,
32 predict_batch_size=10,
33 predict_package_uri=None,
34 debug=True,
35 chip_size=300,
36 chip_options=None):
37 super().__init__(rv.SEMANTIC_SEGMENTATION, predict_batch_size,
38 predict_package_uri, debug)
39 self.class_map = class_map
40 self.chip_size = chip_size
41 if chip_options is None:
42 chip_options = SemanticSegmentationConfig.ChipOptions()
43 self.chip_options = chip_options
44
45 def save_bundle_files(self, bundle_dir):
46 return (self, [])
47
48 def load_bundle_files(self, bundle_dir):
49 return self
50
51 def create_task(self, backend):
52 return SemanticSegmentation(self, backend)
53
54 def to_proto(self):
55 msg = super().to_proto()
56 chip_options = TaskConfigMsg.SemanticSegmentationConfig.ChipOptions(
57 window_method=self.chip_options.window_method,
58 target_classes=self.chip_options.target_classes,
59 debug_chip_probability=self.chip_options.debug_chip_probability,
60 negative_survival_probability=self.chip_options.
61 negative_survival_probability,
62 chips_per_scene=self.chip_options.chips_per_scene,
63 target_count_threshold=self.chip_options.target_count_threshold,
64 stride=self.chip_options.stride)
65
66 conf = TaskConfigMsg.SemanticSegmentationConfig(
67 chip_size=self.chip_size,
68 class_items=self.class_map.to_proto(),
69 chip_options=chip_options)
70 msg.MergeFrom(
71 TaskConfigMsg(
72 semantic_segmentation_config=conf,
73 predict_package_uri=self.predict_package_uri))
74
75 return msg
76
77
78 class SemanticSegmentationConfigBuilder(TaskConfigBuilder):
79 def __init__(self, prev=None):
80 config = {}
81 if prev:
82 config = {
83 'predict_batch_size': prev.predict_batch_size,
84 'predict_package_uri': prev.predict_package_uri,
85 'debug': prev.debug,
86 'class_map': prev.class_map,
87 'chip_size': prev.chip_size,
88 'chip_options': prev.chip_options
89 }
90 super().__init__(SemanticSegmentationConfig, config)
91
92 def from_proto(self, msg):
93 conf = msg.semantic_segmentation_config
94
95 negative_survival_probability = conf.chip_options \
96 .negative_survival_probability
97
98 return self.with_classes(list(conf.class_items)) \
99 .with_predict_batch_size(msg.predict_batch_size) \
100 .with_predict_package_uri(msg.predict_package_uri) \
101 .with_debug(msg.debug) \
102 .with_chip_size(conf.chip_size) \
103 .with_chip_options(
104 window_method=conf.chip_options.window_method,
105 target_classes=list(conf.chip_options.target_classes),
106 debug_chip_probability=conf.chip_options.debug_chip_probability,
107 negative_survival_probability=negative_survival_probability,
108 chips_per_scene=conf.chip_options.chips_per_scene,
109 target_count_threshold=conf.chip_options.target_count_threshold,
110 stride=conf.chip_options.stride)
111
112 def validate(self):
113 super().validate()
114 # Segmentation masks are stored as uint8 to save space, so can only handle 256
115 # classes. If this is really needed, we can add an option for saving with uint16.
116 max_classes = 256
117 if len(self.config['class_map']) > max_classes:
118 raise rv.ConfigError(
119 'Cannot use more than {} classes with semantic segmentation.'.
120 format(max_classes))
121
122 def with_classes(
123 self, classes: Union[ClassMap, List[str], List[ClassItemMsg], List[
124 ClassItem], Dict[str, int], Dict[str, Tuple[int, str]]]):
125 """Set the classes for this task.
126
127 Args:
128 classes: Either a list of class names, a dict which
129 maps class names to class ids, or a dict
130 which maps class names to a tuple of (class_id, color),
131 where color is a PIL color string.
132 """
133 b = deepcopy(self)
134 b.config['class_map'] = ClassMap.construct_from(classes)
135 return b
136
137 def with_chip_size(self, chip_size):
138 """Set the chip_size for this task.
139
140 Args:
141 chip_size: Integer value chip size
142 """
143 b = deepcopy(self)
144 b.config['chip_size'] = chip_size
145 return b
146
147 def with_chip_options(self,
148 window_method='random_sample',
149 target_classes=None,
150 debug_chip_probability=0.25,
151 negative_survival_probability=1.0,
152 chips_per_scene=1000,
153 target_count_threshold=1000,
154 stride=None):
155 """Sets semantic segmentation configurations for the Chip command
156
157 Args:
158 window_method: Window method to use for chipping.
159 Options are: random_sample, sliding
160 target_classes: list of class ids to train model on
161 debug_chip_probability: probability of generating a debug chip.
162 Applies to the 'random_sample' window method.
163 negative_survival_probability: probability that a sampled negative
164 chip will be utilized if it does not
165 contain more pixels than
166 target_count_threshold.
167 Applies to the 'random_sample' window method.
168 chips_per_scene: number of chips to generate per scene.
169 Applies to the 'random_sample' window method.
170 target_count_threshold: minimum number of pixels covering target_classes
171 that a chip must have.
172 Applies to the 'random_sample' window method.
173 stride: Stride of windows across image. Defaults to half the chip size.
174 Applies to the 'sliding_window' method.
175
176 Returns:
177 SemanticSegmentationConfigBuilder
178 """
179 b = deepcopy(self)
180
181 b.config['chip_options'] = SemanticSegmentationConfig.ChipOptions(
182 window_method=window_method,
183 target_classes=target_classes,
184 debug_chip_probability=debug_chip_probability,
185 negative_survival_probability=negative_survival_probability,
186 chips_per_scene=chips_per_scene,
187 target_count_threshold=target_count_threshold,
188 stride=stride)
189 return b
190
```
Path: `rastervision/task/semantic_segmentation.py`
Content:
```
1 from typing import List
2 import logging
3
4 import numpy as np
5
6 from .task import Task
7 from rastervision.core.box import Box
8 from rastervision.data.scene import Scene
9 from rastervision.data.label import SemanticSegmentationLabels
10
11 log = logging.getLogger(__name__)
12
13
14 def get_random_sample_train_windows(label_store, chip_size, class_map, extent,
15 chip_options, filter_windows):
16 prob = chip_options.negative_survival_probability
17 target_count_threshold = chip_options.target_count_threshold
18 target_classes = chip_options.target_classes
19 chips_per_scene = chip_options.chips_per_scene
20
21 if not target_classes:
22 all_class_ids = [item.id for item in class_map.get_items()]
23 target_classes = all_class_ids
24
25 windows = []
26 attempts = 0
27 while (attempts < chips_per_scene):
28 candidate_window = extent.make_random_square(chip_size)
29 if not filter_windows([candidate_window]):
30 continue
31 attempts = attempts + 1
32
33 if (prob >= 1.0):
34 windows.append(candidate_window)
35 elif attempts == chips_per_scene and len(windows) == 0:
36 windows.append(candidate_window)
37 else:
38 good = label_store.enough_target_pixels(
39 candidate_window, target_count_threshold, target_classes)
40 if good or (np.random.rand() < prob):
41 windows.append(candidate_window)
42
43 return windows
44
45
46 class SemanticSegmentation(Task):
47 """Task-derived type that implements the semantic segmentation task."""
48
49 def get_train_windows(self, scene: Scene) -> List[Box]:
50 """Get training windows covering a scene.
51
52 Args:
53 scene: The scene over-which windows are to be generated.
54
55 Returns:
56 A list of windows, list(Box)
57
58 """
59
60 def filter_windows(windows):
61 if scene.aoi_polygons:
62 windows = Box.filter_by_aoi(windows, scene.aoi_polygons)
63 return windows
64
65 raster_source = scene.raster_source
66 extent = raster_source.get_extent()
67 label_store = scene.ground_truth_label_source
68 chip_size = self.config.chip_size
69
70 chip_options = self.config.chip_options
71
72 if chip_options.window_method == 'random_sample':
73 return get_random_sample_train_windows(
74 label_store, chip_size, self.config.class_map, extent,
75 chip_options, filter_windows)
76 elif chip_options.window_method == 'sliding':
77 stride = chip_options.stride
78 if stride is None:
79 stride = chip_size / 2
80
81 return list(
82 filter_windows((extent.get_windows(chip_size, stride))))
83
84 def get_train_labels(self, window: Box, scene: Scene) -> np.ndarray:
85 """Get the training labels for the given window in the given scene.
86
87 Args:
88 window: The window over-which the labels are to be
89 retrieved.
90 scene: The scene from-which the window of labels is to be
91 extracted.
92
93 Returns:
94 An appropriately-shaped 2d np.ndarray with the labels
95 encoded as packed pixels.
96
97 """
98 label_store = scene.ground_truth_label_source
99 return label_store.get_labels(window)
100
101 def get_predict_windows(self, extent: Box) -> List[Box]:
102 """Get windows over-which predictions will be calculated.
103
104 Args:
105 extent: The overall extent of the area.
106
107 Returns:
108 An sequence of windows.
109
110 """
111 chip_size = self.config.chip_size
112 return extent.get_windows(chip_size, chip_size)
113
114 def post_process_predictions(self, labels, scene):
115 return labels
116
117 def save_debug_predict_image(self, scene, debug_dir_uri):
118 # TODO implement this
119 pass
120
121 def predict_scene(self, scene, tmp_dir):
122 """Predict on a single scene, and return the labels."""
123 log.info('Making predictions for scene')
124 raster_source = scene.raster_source
125 windows = self.get_predict_windows(raster_source.get_extent())
126
127 def label_fn(window):
128 chip = raster_source.get_chip(window)
129 labels = self.backend.predict([chip], [window], tmp_dir)
130 label_arr = labels.get_label_arr(window)
131
132 # Set NODATA pixels in imagery to predicted value of 0 (ie. ignore)
133 label_arr[np.sum(chip, axis=2) == 0] = 0
134
135 print('.', end='', flush=True)
136 return label_arr
137
138 return SemanticSegmentationLabels(windows, label_fn)
139
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/rastervision/task/semantic_segmentation.py b/rastervision/task/semantic_segmentation.py
--- a/rastervision/task/semantic_segmentation.py
+++ b/rastervision/task/semantic_segmentation.py
@@ -77,6 +77,7 @@
stride = chip_options.stride
if stride is None:
stride = chip_size / 2
+ stride = int(round(stride))
return list(
filter_windows((extent.get_windows(chip_size, stride))))
diff --git a/rastervision/task/semantic_segmentation_config.py b/rastervision/task/semantic_segmentation_config.py
--- a/rastervision/task/semantic_segmentation_config.py
+++ b/rastervision/task/semantic_segmentation_config.py
@@ -95,6 +95,10 @@
negative_survival_probability = conf.chip_options \
.negative_survival_probability
+ stride = conf.chip_options.stride
+ if stride == 0:
+ stride = None
+
return self.with_classes(list(conf.class_items)) \
.with_predict_batch_size(msg.predict_batch_size) \
.with_predict_package_uri(msg.predict_package_uri) \
@@ -107,7 +111,7 @@
negative_survival_probability=negative_survival_probability,
chips_per_scene=conf.chip_options.chips_per_scene,
target_count_threshold=conf.chip_options.target_count_threshold,
- stride=conf.chip_options.stride)
+ stride=stride)
def validate(self):
super().validate()
|
{"golden_diff": "diff --git a/rastervision/task/semantic_segmentation.py b/rastervision/task/semantic_segmentation.py\n--- a/rastervision/task/semantic_segmentation.py\n+++ b/rastervision/task/semantic_segmentation.py\n@@ -77,6 +77,7 @@\n stride = chip_options.stride\n if stride is None:\n stride = chip_size / 2\n+ stride = int(round(stride))\n \n return list(\n filter_windows((extent.get_windows(chip_size, stride))))\ndiff --git a/rastervision/task/semantic_segmentation_config.py b/rastervision/task/semantic_segmentation_config.py\n--- a/rastervision/task/semantic_segmentation_config.py\n+++ b/rastervision/task/semantic_segmentation_config.py\n@@ -95,6 +95,10 @@\n negative_survival_probability = conf.chip_options \\\n .negative_survival_probability\n \n+ stride = conf.chip_options.stride\n+ if stride == 0:\n+ stride = None\n+\n return self.with_classes(list(conf.class_items)) \\\n .with_predict_batch_size(msg.predict_batch_size) \\\n .with_predict_package_uri(msg.predict_package_uri) \\\n@@ -107,7 +111,7 @@\n negative_survival_probability=negative_survival_probability,\n chips_per_scene=conf.chip_options.chips_per_scene,\n target_count_threshold=conf.chip_options.target_count_threshold,\n- stride=conf.chip_options.stride)\n+ stride=stride)\n \n def validate(self):\n super().validate()\n", "issue": "Not able to use default stride with sliding window semseg option\n\n", "before_files": [{"content": "from copy import deepcopy\nfrom typing import (List, Dict, Tuple, Union)\n\nimport rastervision as rv\nfrom rastervision.task import SemanticSegmentation\nfrom rastervision.core.class_map import (ClassMap, ClassItem)\nfrom rastervision.task import (TaskConfig, TaskConfigBuilder)\nfrom rastervision.protos.task_pb2 import TaskConfig as TaskConfigMsg\nfrom rastervision.protos.class_item_pb2 import ClassItem as ClassItemMsg\n\n\nclass SemanticSegmentationConfig(TaskConfig):\n class ChipOptions:\n def __init__(self,\n window_method='random_sample',\n target_classes=None,\n debug_chip_probability=0.25,\n negative_survival_probability=1.0,\n chips_per_scene=1000,\n target_count_threshold=1000,\n stride=None):\n self.window_method = window_method\n self.target_classes = target_classes\n self.debug_chip_probability = debug_chip_probability\n self.negative_survival_probability = negative_survival_probability\n self.chips_per_scene = chips_per_scene\n self.target_count_threshold = target_count_threshold\n self.stride = stride\n\n def __init__(self,\n class_map,\n predict_batch_size=10,\n predict_package_uri=None,\n debug=True,\n chip_size=300,\n chip_options=None):\n super().__init__(rv.SEMANTIC_SEGMENTATION, predict_batch_size,\n predict_package_uri, debug)\n self.class_map = class_map\n self.chip_size = chip_size\n if chip_options is None:\n chip_options = SemanticSegmentationConfig.ChipOptions()\n self.chip_options = chip_options\n\n def save_bundle_files(self, bundle_dir):\n return (self, [])\n\n def load_bundle_files(self, bundle_dir):\n return self\n\n def create_task(self, backend):\n return SemanticSegmentation(self, backend)\n\n def to_proto(self):\n msg = super().to_proto()\n chip_options = TaskConfigMsg.SemanticSegmentationConfig.ChipOptions(\n window_method=self.chip_options.window_method,\n target_classes=self.chip_options.target_classes,\n debug_chip_probability=self.chip_options.debug_chip_probability,\n negative_survival_probability=self.chip_options.\n negative_survival_probability,\n chips_per_scene=self.chip_options.chips_per_scene,\n target_count_threshold=self.chip_options.target_count_threshold,\n stride=self.chip_options.stride)\n\n conf = TaskConfigMsg.SemanticSegmentationConfig(\n chip_size=self.chip_size,\n class_items=self.class_map.to_proto(),\n chip_options=chip_options)\n msg.MergeFrom(\n TaskConfigMsg(\n semantic_segmentation_config=conf,\n predict_package_uri=self.predict_package_uri))\n\n return msg\n\n\nclass SemanticSegmentationConfigBuilder(TaskConfigBuilder):\n def __init__(self, prev=None):\n config = {}\n if prev:\n config = {\n 'predict_batch_size': prev.predict_batch_size,\n 'predict_package_uri': prev.predict_package_uri,\n 'debug': prev.debug,\n 'class_map': prev.class_map,\n 'chip_size': prev.chip_size,\n 'chip_options': prev.chip_options\n }\n super().__init__(SemanticSegmentationConfig, config)\n\n def from_proto(self, msg):\n conf = msg.semantic_segmentation_config\n\n negative_survival_probability = conf.chip_options \\\n .negative_survival_probability\n\n return self.with_classes(list(conf.class_items)) \\\n .with_predict_batch_size(msg.predict_batch_size) \\\n .with_predict_package_uri(msg.predict_package_uri) \\\n .with_debug(msg.debug) \\\n .with_chip_size(conf.chip_size) \\\n .with_chip_options(\n window_method=conf.chip_options.window_method,\n target_classes=list(conf.chip_options.target_classes),\n debug_chip_probability=conf.chip_options.debug_chip_probability,\n negative_survival_probability=negative_survival_probability,\n chips_per_scene=conf.chip_options.chips_per_scene,\n target_count_threshold=conf.chip_options.target_count_threshold,\n stride=conf.chip_options.stride)\n\n def validate(self):\n super().validate()\n # Segmentation masks are stored as uint8 to save space, so can only handle 256\n # classes. If this is really needed, we can add an option for saving with uint16.\n max_classes = 256\n if len(self.config['class_map']) > max_classes:\n raise rv.ConfigError(\n 'Cannot use more than {} classes with semantic segmentation.'.\n format(max_classes))\n\n def with_classes(\n self, classes: Union[ClassMap, List[str], List[ClassItemMsg], List[\n ClassItem], Dict[str, int], Dict[str, Tuple[int, str]]]):\n \"\"\"Set the classes for this task.\n\n Args:\n classes: Either a list of class names, a dict which\n maps class names to class ids, or a dict\n which maps class names to a tuple of (class_id, color),\n where color is a PIL color string.\n \"\"\"\n b = deepcopy(self)\n b.config['class_map'] = ClassMap.construct_from(classes)\n return b\n\n def with_chip_size(self, chip_size):\n \"\"\"Set the chip_size for this task.\n\n Args:\n chip_size: Integer value chip size\n \"\"\"\n b = deepcopy(self)\n b.config['chip_size'] = chip_size\n return b\n\n def with_chip_options(self,\n window_method='random_sample',\n target_classes=None,\n debug_chip_probability=0.25,\n negative_survival_probability=1.0,\n chips_per_scene=1000,\n target_count_threshold=1000,\n stride=None):\n \"\"\"Sets semantic segmentation configurations for the Chip command\n\n Args:\n window_method: Window method to use for chipping.\n Options are: random_sample, sliding\n target_classes: list of class ids to train model on\n debug_chip_probability: probability of generating a debug chip.\n Applies to the 'random_sample' window method.\n negative_survival_probability: probability that a sampled negative\n chip will be utilized if it does not\n contain more pixels than\n target_count_threshold.\n Applies to the 'random_sample' window method.\n chips_per_scene: number of chips to generate per scene.\n Applies to the 'random_sample' window method.\n target_count_threshold: minimum number of pixels covering target_classes\n that a chip must have.\n Applies to the 'random_sample' window method.\n stride: Stride of windows across image. Defaults to half the chip size.\n Applies to the 'sliding_window' method.\n\n Returns:\n SemanticSegmentationConfigBuilder\n \"\"\"\n b = deepcopy(self)\n\n b.config['chip_options'] = SemanticSegmentationConfig.ChipOptions(\n window_method=window_method,\n target_classes=target_classes,\n debug_chip_probability=debug_chip_probability,\n negative_survival_probability=negative_survival_probability,\n chips_per_scene=chips_per_scene,\n target_count_threshold=target_count_threshold,\n stride=stride)\n return b\n", "path": "rastervision/task/semantic_segmentation_config.py"}, {"content": "from typing import List\nimport logging\n\nimport numpy as np\n\nfrom .task import Task\nfrom rastervision.core.box import Box\nfrom rastervision.data.scene import Scene\nfrom rastervision.data.label import SemanticSegmentationLabels\n\nlog = logging.getLogger(__name__)\n\n\ndef get_random_sample_train_windows(label_store, chip_size, class_map, extent,\n chip_options, filter_windows):\n prob = chip_options.negative_survival_probability\n target_count_threshold = chip_options.target_count_threshold\n target_classes = chip_options.target_classes\n chips_per_scene = chip_options.chips_per_scene\n\n if not target_classes:\n all_class_ids = [item.id for item in class_map.get_items()]\n target_classes = all_class_ids\n\n windows = []\n attempts = 0\n while (attempts < chips_per_scene):\n candidate_window = extent.make_random_square(chip_size)\n if not filter_windows([candidate_window]):\n continue\n attempts = attempts + 1\n\n if (prob >= 1.0):\n windows.append(candidate_window)\n elif attempts == chips_per_scene and len(windows) == 0:\n windows.append(candidate_window)\n else:\n good = label_store.enough_target_pixels(\n candidate_window, target_count_threshold, target_classes)\n if good or (np.random.rand() < prob):\n windows.append(candidate_window)\n\n return windows\n\n\nclass SemanticSegmentation(Task):\n \"\"\"Task-derived type that implements the semantic segmentation task.\"\"\"\n\n def get_train_windows(self, scene: Scene) -> List[Box]:\n \"\"\"Get training windows covering a scene.\n\n Args:\n scene: The scene over-which windows are to be generated.\n\n Returns:\n A list of windows, list(Box)\n\n \"\"\"\n\n def filter_windows(windows):\n if scene.aoi_polygons:\n windows = Box.filter_by_aoi(windows, scene.aoi_polygons)\n return windows\n\n raster_source = scene.raster_source\n extent = raster_source.get_extent()\n label_store = scene.ground_truth_label_source\n chip_size = self.config.chip_size\n\n chip_options = self.config.chip_options\n\n if chip_options.window_method == 'random_sample':\n return get_random_sample_train_windows(\n label_store, chip_size, self.config.class_map, extent,\n chip_options, filter_windows)\n elif chip_options.window_method == 'sliding':\n stride = chip_options.stride\n if stride is None:\n stride = chip_size / 2\n\n return list(\n filter_windows((extent.get_windows(chip_size, stride))))\n\n def get_train_labels(self, window: Box, scene: Scene) -> np.ndarray:\n \"\"\"Get the training labels for the given window in the given scene.\n\n Args:\n window: The window over-which the labels are to be\n retrieved.\n scene: The scene from-which the window of labels is to be\n extracted.\n\n Returns:\n An appropriately-shaped 2d np.ndarray with the labels\n encoded as packed pixels.\n\n \"\"\"\n label_store = scene.ground_truth_label_source\n return label_store.get_labels(window)\n\n def get_predict_windows(self, extent: Box) -> List[Box]:\n \"\"\"Get windows over-which predictions will be calculated.\n\n Args:\n extent: The overall extent of the area.\n\n Returns:\n An sequence of windows.\n\n \"\"\"\n chip_size = self.config.chip_size\n return extent.get_windows(chip_size, chip_size)\n\n def post_process_predictions(self, labels, scene):\n return labels\n\n def save_debug_predict_image(self, scene, debug_dir_uri):\n # TODO implement this\n pass\n\n def predict_scene(self, scene, tmp_dir):\n \"\"\"Predict on a single scene, and return the labels.\"\"\"\n log.info('Making predictions for scene')\n raster_source = scene.raster_source\n windows = self.get_predict_windows(raster_source.get_extent())\n\n def label_fn(window):\n chip = raster_source.get_chip(window)\n labels = self.backend.predict([chip], [window], tmp_dir)\n label_arr = labels.get_label_arr(window)\n\n # Set NODATA pixels in imagery to predicted value of 0 (ie. ignore)\n label_arr[np.sum(chip, axis=2) == 0] = 0\n\n print('.', end='', flush=True)\n return label_arr\n\n return SemanticSegmentationLabels(windows, label_fn)\n", "path": "rastervision/task/semantic_segmentation.py"}], "after_files": [{"content": "from copy import deepcopy\nfrom typing import (List, Dict, Tuple, Union)\n\nimport rastervision as rv\nfrom rastervision.task import SemanticSegmentation\nfrom rastervision.core.class_map import (ClassMap, ClassItem)\nfrom rastervision.task import (TaskConfig, TaskConfigBuilder)\nfrom rastervision.protos.task_pb2 import TaskConfig as TaskConfigMsg\nfrom rastervision.protos.class_item_pb2 import ClassItem as ClassItemMsg\n\n\nclass SemanticSegmentationConfig(TaskConfig):\n class ChipOptions:\n def __init__(self,\n window_method='random_sample',\n target_classes=None,\n debug_chip_probability=0.25,\n negative_survival_probability=1.0,\n chips_per_scene=1000,\n target_count_threshold=1000,\n stride=None):\n self.window_method = window_method\n self.target_classes = target_classes\n self.debug_chip_probability = debug_chip_probability\n self.negative_survival_probability = negative_survival_probability\n self.chips_per_scene = chips_per_scene\n self.target_count_threshold = target_count_threshold\n self.stride = stride\n\n def __init__(self,\n class_map,\n predict_batch_size=10,\n predict_package_uri=None,\n debug=True,\n chip_size=300,\n chip_options=None):\n super().__init__(rv.SEMANTIC_SEGMENTATION, predict_batch_size,\n predict_package_uri, debug)\n self.class_map = class_map\n self.chip_size = chip_size\n if chip_options is None:\n chip_options = SemanticSegmentationConfig.ChipOptions()\n self.chip_options = chip_options\n\n def save_bundle_files(self, bundle_dir):\n return (self, [])\n\n def load_bundle_files(self, bundle_dir):\n return self\n\n def create_task(self, backend):\n return SemanticSegmentation(self, backend)\n\n def to_proto(self):\n msg = super().to_proto()\n chip_options = TaskConfigMsg.SemanticSegmentationConfig.ChipOptions(\n window_method=self.chip_options.window_method,\n target_classes=self.chip_options.target_classes,\n debug_chip_probability=self.chip_options.debug_chip_probability,\n negative_survival_probability=self.chip_options.\n negative_survival_probability,\n chips_per_scene=self.chip_options.chips_per_scene,\n target_count_threshold=self.chip_options.target_count_threshold,\n stride=self.chip_options.stride)\n\n conf = TaskConfigMsg.SemanticSegmentationConfig(\n chip_size=self.chip_size,\n class_items=self.class_map.to_proto(),\n chip_options=chip_options)\n msg.MergeFrom(\n TaskConfigMsg(\n semantic_segmentation_config=conf,\n predict_package_uri=self.predict_package_uri))\n\n return msg\n\n\nclass SemanticSegmentationConfigBuilder(TaskConfigBuilder):\n def __init__(self, prev=None):\n config = {}\n if prev:\n config = {\n 'predict_batch_size': prev.predict_batch_size,\n 'predict_package_uri': prev.predict_package_uri,\n 'debug': prev.debug,\n 'class_map': prev.class_map,\n 'chip_size': prev.chip_size,\n 'chip_options': prev.chip_options\n }\n super().__init__(SemanticSegmentationConfig, config)\n\n def from_proto(self, msg):\n conf = msg.semantic_segmentation_config\n\n negative_survival_probability = conf.chip_options \\\n .negative_survival_probability\n\n stride = conf.chip_options.stride\n if stride == 0:\n stride = None\n\n return self.with_classes(list(conf.class_items)) \\\n .with_predict_batch_size(msg.predict_batch_size) \\\n .with_predict_package_uri(msg.predict_package_uri) \\\n .with_debug(msg.debug) \\\n .with_chip_size(conf.chip_size) \\\n .with_chip_options(\n window_method=conf.chip_options.window_method,\n target_classes=list(conf.chip_options.target_classes),\n debug_chip_probability=conf.chip_options.debug_chip_probability,\n negative_survival_probability=negative_survival_probability,\n chips_per_scene=conf.chip_options.chips_per_scene,\n target_count_threshold=conf.chip_options.target_count_threshold,\n stride=stride)\n\n def validate(self):\n super().validate()\n # Segmentation masks are stored as uint8 to save space, so can only handle 256\n # classes. If this is really needed, we can add an option for saving with uint16.\n max_classes = 256\n if len(self.config['class_map']) > max_classes:\n raise rv.ConfigError(\n 'Cannot use more than {} classes with semantic segmentation.'.\n format(max_classes))\n\n def with_classes(\n self, classes: Union[ClassMap, List[str], List[ClassItemMsg], List[\n ClassItem], Dict[str, int], Dict[str, Tuple[int, str]]]):\n \"\"\"Set the classes for this task.\n\n Args:\n classes: Either a list of class names, a dict which\n maps class names to class ids, or a dict\n which maps class names to a tuple of (class_id, color),\n where color is a PIL color string.\n \"\"\"\n b = deepcopy(self)\n b.config['class_map'] = ClassMap.construct_from(classes)\n return b\n\n def with_chip_size(self, chip_size):\n \"\"\"Set the chip_size for this task.\n\n Args:\n chip_size: Integer value chip size\n \"\"\"\n b = deepcopy(self)\n b.config['chip_size'] = chip_size\n return b\n\n def with_chip_options(self,\n window_method='random_sample',\n target_classes=None,\n debug_chip_probability=0.25,\n negative_survival_probability=1.0,\n chips_per_scene=1000,\n target_count_threshold=1000,\n stride=None):\n \"\"\"Sets semantic segmentation configurations for the Chip command\n\n Args:\n window_method: Window method to use for chipping.\n Options are: random_sample, sliding\n target_classes: list of class ids to train model on\n debug_chip_probability: probability of generating a debug chip.\n Applies to the 'random_sample' window method.\n negative_survival_probability: probability that a sampled negative\n chip will be utilized if it does not\n contain more pixels than\n target_count_threshold.\n Applies to the 'random_sample' window method.\n chips_per_scene: number of chips to generate per scene.\n Applies to the 'random_sample' window method.\n target_count_threshold: minimum number of pixels covering target_classes\n that a chip must have.\n Applies to the 'random_sample' window method.\n stride: Stride of windows across image. Defaults to half the chip size.\n Applies to the 'sliding_window' method.\n\n Returns:\n SemanticSegmentationConfigBuilder\n \"\"\"\n b = deepcopy(self)\n\n b.config['chip_options'] = SemanticSegmentationConfig.ChipOptions(\n window_method=window_method,\n target_classes=target_classes,\n debug_chip_probability=debug_chip_probability,\n negative_survival_probability=negative_survival_probability,\n chips_per_scene=chips_per_scene,\n target_count_threshold=target_count_threshold,\n stride=stride)\n return b\n", "path": "rastervision/task/semantic_segmentation_config.py"}, {"content": "from typing import List\nimport logging\n\nimport numpy as np\n\nfrom .task import Task\nfrom rastervision.core.box import Box\nfrom rastervision.data.scene import Scene\nfrom rastervision.data.label import SemanticSegmentationLabels\n\nlog = logging.getLogger(__name__)\n\n\ndef get_random_sample_train_windows(label_store, chip_size, class_map, extent,\n chip_options, filter_windows):\n prob = chip_options.negative_survival_probability\n target_count_threshold = chip_options.target_count_threshold\n target_classes = chip_options.target_classes\n chips_per_scene = chip_options.chips_per_scene\n\n if not target_classes:\n all_class_ids = [item.id for item in class_map.get_items()]\n target_classes = all_class_ids\n\n windows = []\n attempts = 0\n while (attempts < chips_per_scene):\n candidate_window = extent.make_random_square(chip_size)\n if not filter_windows([candidate_window]):\n continue\n attempts = attempts + 1\n\n if (prob >= 1.0):\n windows.append(candidate_window)\n elif attempts == chips_per_scene and len(windows) == 0:\n windows.append(candidate_window)\n else:\n good = label_store.enough_target_pixels(\n candidate_window, target_count_threshold, target_classes)\n if good or (np.random.rand() < prob):\n windows.append(candidate_window)\n\n return windows\n\n\nclass SemanticSegmentation(Task):\n \"\"\"Task-derived type that implements the semantic segmentation task.\"\"\"\n\n def get_train_windows(self, scene: Scene) -> List[Box]:\n \"\"\"Get training windows covering a scene.\n\n Args:\n scene: The scene over-which windows are to be generated.\n\n Returns:\n A list of windows, list(Box)\n\n \"\"\"\n\n def filter_windows(windows):\n if scene.aoi_polygons:\n windows = Box.filter_by_aoi(windows, scene.aoi_polygons)\n return windows\n\n raster_source = scene.raster_source\n extent = raster_source.get_extent()\n label_store = scene.ground_truth_label_source\n chip_size = self.config.chip_size\n\n chip_options = self.config.chip_options\n\n if chip_options.window_method == 'random_sample':\n return get_random_sample_train_windows(\n label_store, chip_size, self.config.class_map, extent,\n chip_options, filter_windows)\n elif chip_options.window_method == 'sliding':\n stride = chip_options.stride\n if stride is None:\n stride = chip_size / 2\n stride = int(round(stride))\n\n return list(\n filter_windows((extent.get_windows(chip_size, stride))))\n\n def get_train_labels(self, window: Box, scene: Scene) -> np.ndarray:\n \"\"\"Get the training labels for the given window in the given scene.\n\n Args:\n window: The window over-which the labels are to be\n retrieved.\n scene: The scene from-which the window of labels is to be\n extracted.\n\n Returns:\n An appropriately-shaped 2d np.ndarray with the labels\n encoded as packed pixels.\n\n \"\"\"\n label_store = scene.ground_truth_label_source\n return label_store.get_labels(window)\n\n def get_predict_windows(self, extent: Box) -> List[Box]:\n \"\"\"Get windows over-which predictions will be calculated.\n\n Args:\n extent: The overall extent of the area.\n\n Returns:\n An sequence of windows.\n\n \"\"\"\n chip_size = self.config.chip_size\n return extent.get_windows(chip_size, chip_size)\n\n def post_process_predictions(self, labels, scene):\n return labels\n\n def save_debug_predict_image(self, scene, debug_dir_uri):\n # TODO implement this\n pass\n\n def predict_scene(self, scene, tmp_dir):\n \"\"\"Predict on a single scene, and return the labels.\"\"\"\n log.info('Making predictions for scene')\n raster_source = scene.raster_source\n windows = self.get_predict_windows(raster_source.get_extent())\n\n def label_fn(window):\n chip = raster_source.get_chip(window)\n labels = self.backend.predict([chip], [window], tmp_dir)\n label_arr = labels.get_label_arr(window)\n\n # Set NODATA pixels in imagery to predicted value of 0 (ie. ignore)\n label_arr[np.sum(chip, axis=2) == 0] = 0\n\n print('.', end='', flush=True)\n return label_arr\n\n return SemanticSegmentationLabels(windows, label_fn)\n", "path": "rastervision/task/semantic_segmentation.py"}]}
| 3,578 | 342 |
gh_patches_debug_36131
|
rasdani/github-patches
|
git_diff
|
netbox-community__netbox-4063
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
List ChoiceField choices in Swagger as enum
### Environment
* Python version: 3.7.3
* NetBox version: 2.7.2
### Proposed Functionality
Based on this specification https://swagger.io/docs/specification/data-models/enums/ the ChoiceField choices for label and value can (and should) be listed as enums.
### Use Case
Proper generation of API model from openapi specification for 3rd party projects without requirement to query OPTIONS for each endpoint to receive extra data from django. For example we are generating Java API for Netbox based on openapi spec from Netbox. Also having the choices listed on api/docs Swagger UI is very helpful when browsing through the API docs.
### Database Changes
None
### External Dependencies
None
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `netbox/utilities/custom_inspectors.py`
Content:
```
1 from django.contrib.postgres.fields import JSONField
2 from drf_yasg import openapi
3 from drf_yasg.inspectors import FieldInspector, NotHandled, PaginatorInspector, FilterInspector, SwaggerAutoSchema
4 from drf_yasg.utils import get_serializer_ref_name
5 from rest_framework.fields import ChoiceField
6 from rest_framework.relations import ManyRelatedField
7 from taggit_serializer.serializers import TagListSerializerField
8
9 from dcim.api.serializers import InterfaceSerializer as DeviceInterfaceSerializer
10 from extras.api.customfields import CustomFieldsSerializer
11 from utilities.api import ChoiceField, SerializedPKRelatedField, WritableNestedSerializer
12 from virtualization.api.serializers import InterfaceSerializer as VirtualMachineInterfaceSerializer
13
14 # this might be ugly, but it limits drf_yasg-specific code to this file
15 DeviceInterfaceSerializer.Meta.ref_name = 'DeviceInterface'
16 VirtualMachineInterfaceSerializer.Meta.ref_name = 'VirtualMachineInterface'
17
18
19 class NetBoxSwaggerAutoSchema(SwaggerAutoSchema):
20 writable_serializers = {}
21
22 def get_request_serializer(self):
23 serializer = super().get_request_serializer()
24
25 if serializer is not None and self.method in self.implicit_body_methods:
26 properties = {}
27 for child_name, child in serializer.fields.items():
28 if isinstance(child, (ChoiceField, WritableNestedSerializer)):
29 properties[child_name] = None
30 elif isinstance(child, ManyRelatedField) and isinstance(child.child_relation, SerializedPKRelatedField):
31 properties[child_name] = None
32
33 if properties:
34 if type(serializer) not in self.writable_serializers:
35 writable_name = 'Writable' + type(serializer).__name__
36 meta_class = getattr(type(serializer), 'Meta', None)
37 if meta_class:
38 ref_name = 'Writable' + get_serializer_ref_name(serializer)
39 writable_meta = type('Meta', (meta_class,), {'ref_name': ref_name})
40 properties['Meta'] = writable_meta
41
42 self.writable_serializers[type(serializer)] = type(writable_name, (type(serializer),), properties)
43
44 writable_class = self.writable_serializers[type(serializer)]
45 serializer = writable_class()
46
47 return serializer
48
49
50 class SerializedPKRelatedFieldInspector(FieldInspector):
51 def field_to_swagger_object(self, field, swagger_object_type, use_references, **kwargs):
52 SwaggerType, ChildSwaggerType = self._get_partial_types(field, swagger_object_type, use_references, **kwargs)
53 if isinstance(field, SerializedPKRelatedField):
54 return self.probe_field_inspectors(field.serializer(), ChildSwaggerType, use_references)
55
56 return NotHandled
57
58
59 class TagListFieldInspector(FieldInspector):
60 def field_to_swagger_object(self, field, swagger_object_type, use_references, **kwargs):
61 SwaggerType, ChildSwaggerType = self._get_partial_types(field, swagger_object_type, use_references, **kwargs)
62 if isinstance(field, TagListSerializerField):
63 child_schema = self.probe_field_inspectors(field.child, ChildSwaggerType, use_references)
64 return SwaggerType(
65 type=openapi.TYPE_ARRAY,
66 items=child_schema,
67 )
68
69 return NotHandled
70
71
72 class CustomChoiceFieldInspector(FieldInspector):
73 def field_to_swagger_object(self, field, swagger_object_type, use_references, **kwargs):
74 # this returns a callable which extracts title, description and other stuff
75 # https://drf-yasg.readthedocs.io/en/stable/_modules/drf_yasg/inspectors/base.html#FieldInspector._get_partial_types
76 SwaggerType, _ = self._get_partial_types(field, swagger_object_type, use_references, **kwargs)
77
78 if isinstance(field, ChoiceField):
79 value_schema = openapi.Schema(type=openapi.TYPE_STRING)
80
81 choices = list(field._choices.keys())
82 if set([None] + choices) == {None, True, False}:
83 # DeviceType.subdevice_role, Device.face and InterfaceConnection.connection_status all need to be
84 # differentiated since they each have subtly different values in their choice keys.
85 # - subdevice_role and connection_status are booleans, although subdevice_role includes None
86 # - face is an integer set {0, 1} which is easily confused with {False, True}
87 schema_type = openapi.TYPE_STRING
88 if all(type(x) == bool for x in [c for c in choices if c is not None]):
89 schema_type = openapi.TYPE_BOOLEAN
90 value_schema = openapi.Schema(type=schema_type)
91 value_schema['x-nullable'] = True
92
93 if isinstance(choices[0], int):
94 # Change value_schema for IPAddressFamilyChoices, RackWidthChoices
95 value_schema = openapi.Schema(type=openapi.TYPE_INTEGER)
96
97 schema = SwaggerType(type=openapi.TYPE_OBJECT, required=["label", "value"], properties={
98 "label": openapi.Schema(type=openapi.TYPE_STRING),
99 "value": value_schema
100 })
101
102 return schema
103
104 elif isinstance(field, CustomFieldsSerializer):
105 schema = SwaggerType(type=openapi.TYPE_OBJECT)
106 return schema
107
108 return NotHandled
109
110
111 class NullableBooleanFieldInspector(FieldInspector):
112 def process_result(self, result, method_name, obj, **kwargs):
113
114 if isinstance(result, openapi.Schema) and isinstance(obj, ChoiceField) and result.type == 'boolean':
115 keys = obj.choices.keys()
116 if set(keys) == {None, True, False}:
117 result['x-nullable'] = True
118 result.type = 'boolean'
119
120 return result
121
122
123 class JSONFieldInspector(FieldInspector):
124 """Required because by default, Swagger sees a JSONField as a string and not dict
125 """
126 def process_result(self, result, method_name, obj, **kwargs):
127 if isinstance(result, openapi.Schema) and isinstance(obj, JSONField):
128 result.type = 'dict'
129 return result
130
131
132 class IdInFilterInspector(FilterInspector):
133 def process_result(self, result, method_name, obj, **kwargs):
134 if isinstance(result, list):
135 params = [p for p in result if isinstance(p, openapi.Parameter) and p.name == 'id__in']
136 for p in params:
137 p.type = 'string'
138
139 return result
140
141
142 class NullablePaginatorInspector(PaginatorInspector):
143 def process_result(self, result, method_name, obj, **kwargs):
144 if method_name == 'get_paginated_response' and isinstance(result, openapi.Schema):
145 next = result.properties['next']
146 if isinstance(next, openapi.Schema):
147 next['x-nullable'] = True
148 previous = result.properties['previous']
149 if isinstance(previous, openapi.Schema):
150 previous['x-nullable'] = True
151
152 return result
153
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/netbox/utilities/custom_inspectors.py b/netbox/utilities/custom_inspectors.py
--- a/netbox/utilities/custom_inspectors.py
+++ b/netbox/utilities/custom_inspectors.py
@@ -76,26 +76,28 @@
SwaggerType, _ = self._get_partial_types(field, swagger_object_type, use_references, **kwargs)
if isinstance(field, ChoiceField):
- value_schema = openapi.Schema(type=openapi.TYPE_STRING)
+ choices = field._choices
+ choice_value = list(choices.keys())
+ choice_label = list(choices.values())
+ value_schema = openapi.Schema(type=openapi.TYPE_STRING, enum=choice_value)
- choices = list(field._choices.keys())
- if set([None] + choices) == {None, True, False}:
+ if set([None] + choice_value) == {None, True, False}:
# DeviceType.subdevice_role, Device.face and InterfaceConnection.connection_status all need to be
# differentiated since they each have subtly different values in their choice keys.
# - subdevice_role and connection_status are booleans, although subdevice_role includes None
# - face is an integer set {0, 1} which is easily confused with {False, True}
schema_type = openapi.TYPE_STRING
- if all(type(x) == bool for x in [c for c in choices if c is not None]):
+ if all(type(x) == bool for x in [c for c in choice_value if c is not None]):
schema_type = openapi.TYPE_BOOLEAN
- value_schema = openapi.Schema(type=schema_type)
+ value_schema = openapi.Schema(type=schema_type, enum=choice_value)
value_schema['x-nullable'] = True
- if isinstance(choices[0], int):
+ if isinstance(choice_value[0], int):
# Change value_schema for IPAddressFamilyChoices, RackWidthChoices
- value_schema = openapi.Schema(type=openapi.TYPE_INTEGER)
+ value_schema = openapi.Schema(type=openapi.TYPE_INTEGER, enum=choice_value)
schema = SwaggerType(type=openapi.TYPE_OBJECT, required=["label", "value"], properties={
- "label": openapi.Schema(type=openapi.TYPE_STRING),
+ "label": openapi.Schema(type=openapi.TYPE_STRING, enum=choice_label),
"value": value_schema
})
|
{"golden_diff": "diff --git a/netbox/utilities/custom_inspectors.py b/netbox/utilities/custom_inspectors.py\n--- a/netbox/utilities/custom_inspectors.py\n+++ b/netbox/utilities/custom_inspectors.py\n@@ -76,26 +76,28 @@\n SwaggerType, _ = self._get_partial_types(field, swagger_object_type, use_references, **kwargs)\n \n if isinstance(field, ChoiceField):\n- value_schema = openapi.Schema(type=openapi.TYPE_STRING)\n+ choices = field._choices\n+ choice_value = list(choices.keys())\n+ choice_label = list(choices.values())\n+ value_schema = openapi.Schema(type=openapi.TYPE_STRING, enum=choice_value)\n \n- choices = list(field._choices.keys())\n- if set([None] + choices) == {None, True, False}:\n+ if set([None] + choice_value) == {None, True, False}:\n # DeviceType.subdevice_role, Device.face and InterfaceConnection.connection_status all need to be\n # differentiated since they each have subtly different values in their choice keys.\n # - subdevice_role and connection_status are booleans, although subdevice_role includes None\n # - face is an integer set {0, 1} which is easily confused with {False, True}\n schema_type = openapi.TYPE_STRING\n- if all(type(x) == bool for x in [c for c in choices if c is not None]):\n+ if all(type(x) == bool for x in [c for c in choice_value if c is not None]):\n schema_type = openapi.TYPE_BOOLEAN\n- value_schema = openapi.Schema(type=schema_type)\n+ value_schema = openapi.Schema(type=schema_type, enum=choice_value)\n value_schema['x-nullable'] = True\n \n- if isinstance(choices[0], int):\n+ if isinstance(choice_value[0], int):\n # Change value_schema for IPAddressFamilyChoices, RackWidthChoices\n- value_schema = openapi.Schema(type=openapi.TYPE_INTEGER)\n+ value_schema = openapi.Schema(type=openapi.TYPE_INTEGER, enum=choice_value)\n \n schema = SwaggerType(type=openapi.TYPE_OBJECT, required=[\"label\", \"value\"], properties={\n- \"label\": openapi.Schema(type=openapi.TYPE_STRING),\n+ \"label\": openapi.Schema(type=openapi.TYPE_STRING, enum=choice_label),\n \"value\": value_schema\n })\n", "issue": "List ChoiceField choices in Swagger as enum\n### Environment\r\n* Python version: 3.7.3\r\n* NetBox version: 2.7.2\r\n\r\n### Proposed Functionality\r\n\r\nBased on this specification https://swagger.io/docs/specification/data-models/enums/ the ChoiceField choices for label and value can (and should) be listed as enums.\r\n\r\n### Use Case\r\n\r\nProper generation of API model from openapi specification for 3rd party projects without requirement to query OPTIONS for each endpoint to receive extra data from django. For example we are generating Java API for Netbox based on openapi spec from Netbox. Also having the choices listed on api/docs Swagger UI is very helpful when browsing through the API docs.\r\n\r\n### Database Changes\r\n\r\nNone\r\n\r\n### External Dependencies\r\n\r\nNone\n", "before_files": [{"content": "from django.contrib.postgres.fields import JSONField\nfrom drf_yasg import openapi\nfrom drf_yasg.inspectors import FieldInspector, NotHandled, PaginatorInspector, FilterInspector, SwaggerAutoSchema\nfrom drf_yasg.utils import get_serializer_ref_name\nfrom rest_framework.fields import ChoiceField\nfrom rest_framework.relations import ManyRelatedField\nfrom taggit_serializer.serializers import TagListSerializerField\n\nfrom dcim.api.serializers import InterfaceSerializer as DeviceInterfaceSerializer\nfrom extras.api.customfields import CustomFieldsSerializer\nfrom utilities.api import ChoiceField, SerializedPKRelatedField, WritableNestedSerializer\nfrom virtualization.api.serializers import InterfaceSerializer as VirtualMachineInterfaceSerializer\n\n# this might be ugly, but it limits drf_yasg-specific code to this file\nDeviceInterfaceSerializer.Meta.ref_name = 'DeviceInterface'\nVirtualMachineInterfaceSerializer.Meta.ref_name = 'VirtualMachineInterface'\n\n\nclass NetBoxSwaggerAutoSchema(SwaggerAutoSchema):\n writable_serializers = {}\n\n def get_request_serializer(self):\n serializer = super().get_request_serializer()\n\n if serializer is not None and self.method in self.implicit_body_methods:\n properties = {}\n for child_name, child in serializer.fields.items():\n if isinstance(child, (ChoiceField, WritableNestedSerializer)):\n properties[child_name] = None\n elif isinstance(child, ManyRelatedField) and isinstance(child.child_relation, SerializedPKRelatedField):\n properties[child_name] = None\n\n if properties:\n if type(serializer) not in self.writable_serializers:\n writable_name = 'Writable' + type(serializer).__name__\n meta_class = getattr(type(serializer), 'Meta', None)\n if meta_class:\n ref_name = 'Writable' + get_serializer_ref_name(serializer)\n writable_meta = type('Meta', (meta_class,), {'ref_name': ref_name})\n properties['Meta'] = writable_meta\n\n self.writable_serializers[type(serializer)] = type(writable_name, (type(serializer),), properties)\n\n writable_class = self.writable_serializers[type(serializer)]\n serializer = writable_class()\n\n return serializer\n\n\nclass SerializedPKRelatedFieldInspector(FieldInspector):\n def field_to_swagger_object(self, field, swagger_object_type, use_references, **kwargs):\n SwaggerType, ChildSwaggerType = self._get_partial_types(field, swagger_object_type, use_references, **kwargs)\n if isinstance(field, SerializedPKRelatedField):\n return self.probe_field_inspectors(field.serializer(), ChildSwaggerType, use_references)\n\n return NotHandled\n\n\nclass TagListFieldInspector(FieldInspector):\n def field_to_swagger_object(self, field, swagger_object_type, use_references, **kwargs):\n SwaggerType, ChildSwaggerType = self._get_partial_types(field, swagger_object_type, use_references, **kwargs)\n if isinstance(field, TagListSerializerField):\n child_schema = self.probe_field_inspectors(field.child, ChildSwaggerType, use_references)\n return SwaggerType(\n type=openapi.TYPE_ARRAY,\n items=child_schema,\n )\n\n return NotHandled\n\n\nclass CustomChoiceFieldInspector(FieldInspector):\n def field_to_swagger_object(self, field, swagger_object_type, use_references, **kwargs):\n # this returns a callable which extracts title, description and other stuff\n # https://drf-yasg.readthedocs.io/en/stable/_modules/drf_yasg/inspectors/base.html#FieldInspector._get_partial_types\n SwaggerType, _ = self._get_partial_types(field, swagger_object_type, use_references, **kwargs)\n\n if isinstance(field, ChoiceField):\n value_schema = openapi.Schema(type=openapi.TYPE_STRING)\n\n choices = list(field._choices.keys())\n if set([None] + choices) == {None, True, False}:\n # DeviceType.subdevice_role, Device.face and InterfaceConnection.connection_status all need to be\n # differentiated since they each have subtly different values in their choice keys.\n # - subdevice_role and connection_status are booleans, although subdevice_role includes None\n # - face is an integer set {0, 1} which is easily confused with {False, True}\n schema_type = openapi.TYPE_STRING\n if all(type(x) == bool for x in [c for c in choices if c is not None]):\n schema_type = openapi.TYPE_BOOLEAN\n value_schema = openapi.Schema(type=schema_type)\n value_schema['x-nullable'] = True\n\n if isinstance(choices[0], int):\n # Change value_schema for IPAddressFamilyChoices, RackWidthChoices\n value_schema = openapi.Schema(type=openapi.TYPE_INTEGER)\n\n schema = SwaggerType(type=openapi.TYPE_OBJECT, required=[\"label\", \"value\"], properties={\n \"label\": openapi.Schema(type=openapi.TYPE_STRING),\n \"value\": value_schema\n })\n\n return schema\n\n elif isinstance(field, CustomFieldsSerializer):\n schema = SwaggerType(type=openapi.TYPE_OBJECT)\n return schema\n\n return NotHandled\n\n\nclass NullableBooleanFieldInspector(FieldInspector):\n def process_result(self, result, method_name, obj, **kwargs):\n\n if isinstance(result, openapi.Schema) and isinstance(obj, ChoiceField) and result.type == 'boolean':\n keys = obj.choices.keys()\n if set(keys) == {None, True, False}:\n result['x-nullable'] = True\n result.type = 'boolean'\n\n return result\n\n\nclass JSONFieldInspector(FieldInspector):\n \"\"\"Required because by default, Swagger sees a JSONField as a string and not dict\n \"\"\"\n def process_result(self, result, method_name, obj, **kwargs):\n if isinstance(result, openapi.Schema) and isinstance(obj, JSONField):\n result.type = 'dict'\n return result\n\n\nclass IdInFilterInspector(FilterInspector):\n def process_result(self, result, method_name, obj, **kwargs):\n if isinstance(result, list):\n params = [p for p in result if isinstance(p, openapi.Parameter) and p.name == 'id__in']\n for p in params:\n p.type = 'string'\n\n return result\n\n\nclass NullablePaginatorInspector(PaginatorInspector):\n def process_result(self, result, method_name, obj, **kwargs):\n if method_name == 'get_paginated_response' and isinstance(result, openapi.Schema):\n next = result.properties['next']\n if isinstance(next, openapi.Schema):\n next['x-nullable'] = True\n previous = result.properties['previous']\n if isinstance(previous, openapi.Schema):\n previous['x-nullable'] = True\n\n return result\n", "path": "netbox/utilities/custom_inspectors.py"}], "after_files": [{"content": "from django.contrib.postgres.fields import JSONField\nfrom drf_yasg import openapi\nfrom drf_yasg.inspectors import FieldInspector, NotHandled, PaginatorInspector, FilterInspector, SwaggerAutoSchema\nfrom drf_yasg.utils import get_serializer_ref_name\nfrom rest_framework.fields import ChoiceField\nfrom rest_framework.relations import ManyRelatedField\nfrom taggit_serializer.serializers import TagListSerializerField\n\nfrom dcim.api.serializers import InterfaceSerializer as DeviceInterfaceSerializer\nfrom extras.api.customfields import CustomFieldsSerializer\nfrom utilities.api import ChoiceField, SerializedPKRelatedField, WritableNestedSerializer\nfrom virtualization.api.serializers import InterfaceSerializer as VirtualMachineInterfaceSerializer\n\n# this might be ugly, but it limits drf_yasg-specific code to this file\nDeviceInterfaceSerializer.Meta.ref_name = 'DeviceInterface'\nVirtualMachineInterfaceSerializer.Meta.ref_name = 'VirtualMachineInterface'\n\n\nclass NetBoxSwaggerAutoSchema(SwaggerAutoSchema):\n writable_serializers = {}\n\n def get_request_serializer(self):\n serializer = super().get_request_serializer()\n\n if serializer is not None and self.method in self.implicit_body_methods:\n properties = {}\n for child_name, child in serializer.fields.items():\n if isinstance(child, (ChoiceField, WritableNestedSerializer)):\n properties[child_name] = None\n elif isinstance(child, ManyRelatedField) and isinstance(child.child_relation, SerializedPKRelatedField):\n properties[child_name] = None\n\n if properties:\n if type(serializer) not in self.writable_serializers:\n writable_name = 'Writable' + type(serializer).__name__\n meta_class = getattr(type(serializer), 'Meta', None)\n if meta_class:\n ref_name = 'Writable' + get_serializer_ref_name(serializer)\n writable_meta = type('Meta', (meta_class,), {'ref_name': ref_name})\n properties['Meta'] = writable_meta\n\n self.writable_serializers[type(serializer)] = type(writable_name, (type(serializer),), properties)\n\n writable_class = self.writable_serializers[type(serializer)]\n serializer = writable_class()\n\n return serializer\n\n\nclass SerializedPKRelatedFieldInspector(FieldInspector):\n def field_to_swagger_object(self, field, swagger_object_type, use_references, **kwargs):\n SwaggerType, ChildSwaggerType = self._get_partial_types(field, swagger_object_type, use_references, **kwargs)\n if isinstance(field, SerializedPKRelatedField):\n return self.probe_field_inspectors(field.serializer(), ChildSwaggerType, use_references)\n\n return NotHandled\n\n\nclass TagListFieldInspector(FieldInspector):\n def field_to_swagger_object(self, field, swagger_object_type, use_references, **kwargs):\n SwaggerType, ChildSwaggerType = self._get_partial_types(field, swagger_object_type, use_references, **kwargs)\n if isinstance(field, TagListSerializerField):\n child_schema = self.probe_field_inspectors(field.child, ChildSwaggerType, use_references)\n return SwaggerType(\n type=openapi.TYPE_ARRAY,\n items=child_schema,\n )\n\n return NotHandled\n\n\nclass CustomChoiceFieldInspector(FieldInspector):\n def field_to_swagger_object(self, field, swagger_object_type, use_references, **kwargs):\n # this returns a callable which extracts title, description and other stuff\n # https://drf-yasg.readthedocs.io/en/stable/_modules/drf_yasg/inspectors/base.html#FieldInspector._get_partial_types\n SwaggerType, _ = self._get_partial_types(field, swagger_object_type, use_references, **kwargs)\n\n if isinstance(field, ChoiceField):\n choices = field._choices\n choice_value = list(choices.keys())\n choice_label = list(choices.values())\n value_schema = openapi.Schema(type=openapi.TYPE_STRING, enum=choice_value)\n\n if set([None] + choice_value) == {None, True, False}:\n # DeviceType.subdevice_role, Device.face and InterfaceConnection.connection_status all need to be\n # differentiated since they each have subtly different values in their choice keys.\n # - subdevice_role and connection_status are booleans, although subdevice_role includes None\n # - face is an integer set {0, 1} which is easily confused with {False, True}\n schema_type = openapi.TYPE_STRING\n if all(type(x) == bool for x in [c for c in choice_value if c is not None]):\n schema_type = openapi.TYPE_BOOLEAN\n value_schema = openapi.Schema(type=schema_type, enum=choice_value)\n value_schema['x-nullable'] = True\n\n if isinstance(choice_value[0], int):\n # Change value_schema for IPAddressFamilyChoices, RackWidthChoices\n value_schema = openapi.Schema(type=openapi.TYPE_INTEGER, enum=choice_value)\n\n schema = SwaggerType(type=openapi.TYPE_OBJECT, required=[\"label\", \"value\"], properties={\n \"label\": openapi.Schema(type=openapi.TYPE_STRING, enum=choice_label),\n \"value\": value_schema\n })\n\n return schema\n\n elif isinstance(field, CustomFieldsSerializer):\n schema = SwaggerType(type=openapi.TYPE_OBJECT)\n return schema\n\n return NotHandled\n\n\nclass NullableBooleanFieldInspector(FieldInspector):\n def process_result(self, result, method_name, obj, **kwargs):\n\n if isinstance(result, openapi.Schema) and isinstance(obj, ChoiceField) and result.type == 'boolean':\n keys = obj.choices.keys()\n if set(keys) == {None, True, False}:\n result['x-nullable'] = True\n result.type = 'boolean'\n\n return result\n\n\nclass JSONFieldInspector(FieldInspector):\n \"\"\"Required because by default, Swagger sees a JSONField as a string and not dict\n \"\"\"\n def process_result(self, result, method_name, obj, **kwargs):\n if isinstance(result, openapi.Schema) and isinstance(obj, JSONField):\n result.type = 'dict'\n return result\n\n\nclass IdInFilterInspector(FilterInspector):\n def process_result(self, result, method_name, obj, **kwargs):\n if isinstance(result, list):\n params = [p for p in result if isinstance(p, openapi.Parameter) and p.name == 'id__in']\n for p in params:\n p.type = 'string'\n\n return result\n\n\nclass NullablePaginatorInspector(PaginatorInspector):\n def process_result(self, result, method_name, obj, **kwargs):\n if method_name == 'get_paginated_response' and isinstance(result, openapi.Schema):\n next = result.properties['next']\n if isinstance(next, openapi.Schema):\n next['x-nullable'] = True\n previous = result.properties['previous']\n if isinstance(previous, openapi.Schema):\n previous['x-nullable'] = True\n\n return result\n", "path": "netbox/utilities/custom_inspectors.py"}]}
| 2,190 | 522 |
gh_patches_debug_42799
|
rasdani/github-patches
|
git_diff
|
microsoft__MLOS-483
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
mlos_bench: read results file format tests
@poojanilangekar, could you please add some tests for the changes in #460 so that we make sure that the code accepts both input file formats we expect to be able to support? Thanks!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mlos_bench/mlos_bench/environments/local/local_env.py`
Content:
```
1 #
2 # Copyright (c) Microsoft Corporation.
3 # Licensed under the MIT License.
4 #
5 """
6 Scheduler-side benchmark environment to run scripts locally.
7 """
8
9 import json
10 import logging
11 import sys
12
13 from datetime import datetime
14 from tempfile import TemporaryDirectory
15 from contextlib import nullcontext
16
17 from types import TracebackType
18 from typing import Any, Dict, Iterable, List, Mapping, Optional, Tuple, Type, Union
19 from typing_extensions import Literal
20
21 import pandas
22
23 from mlos_bench.environments.status import Status
24 from mlos_bench.environments.base_environment import Environment
25 from mlos_bench.environments.script_env import ScriptEnv
26 from mlos_bench.services.base_service import Service
27 from mlos_bench.services.types.local_exec_type import SupportsLocalExec
28 from mlos_bench.tunables.tunable_groups import TunableGroups
29 from mlos_bench.util import path_join
30
31 _LOG = logging.getLogger(__name__)
32
33
34 class LocalEnv(ScriptEnv):
35 # pylint: disable=too-many-instance-attributes
36 """
37 Scheduler-side Environment that runs scripts locally.
38 """
39
40 def __init__(self,
41 *,
42 name: str,
43 config: dict,
44 global_config: Optional[dict] = None,
45 tunables: Optional[TunableGroups] = None,
46 service: Optional[Service] = None):
47 """
48 Create a new environment for local execution.
49
50 Parameters
51 ----------
52 name: str
53 Human-readable name of the environment.
54 config : dict
55 Free-format dictionary that contains the benchmark environment
56 configuration. Each config must have at least the "tunable_params"
57 and the "const_args" sections.
58 `LocalEnv` must also have at least some of the following parameters:
59 {setup, run, teardown, dump_params_file, read_results_file}
60 global_config : dict
61 Free-format dictionary of global parameters (e.g., security credentials)
62 to be mixed in into the "const_args" section of the local config.
63 tunables : TunableGroups
64 A collection of tunable parameters for *all* environments.
65 service: Service
66 An optional service object (e.g., providing methods to
67 deploy or reboot a VM, etc.).
68 """
69 super().__init__(name=name, config=config, global_config=global_config,
70 tunables=tunables, service=service)
71
72 assert self._service is not None and isinstance(self._service, SupportsLocalExec), \
73 "LocalEnv requires a service that supports local execution"
74 self._local_exec_service: SupportsLocalExec = self._service
75
76 self._temp_dir: Optional[str] = None
77 self._temp_dir_context: Union[TemporaryDirectory, nullcontext, None] = None
78
79 self._dump_params_file: Optional[str] = self.config.get("dump_params_file")
80 self._dump_meta_file: Optional[str] = self.config.get("dump_meta_file")
81
82 self._read_results_file: Optional[str] = self.config.get("read_results_file")
83 self._read_telemetry_file: Optional[str] = self.config.get("read_telemetry_file")
84
85 def __enter__(self) -> Environment:
86 assert self._temp_dir is None and self._temp_dir_context is None
87 self._temp_dir_context = self._local_exec_service.temp_dir_context(self.config.get("temp_dir"))
88 self._temp_dir = self._temp_dir_context.__enter__()
89 return super().__enter__()
90
91 def __exit__(self, ex_type: Optional[Type[BaseException]],
92 ex_val: Optional[BaseException],
93 ex_tb: Optional[TracebackType]) -> Literal[False]:
94 """
95 Exit the context of the benchmarking environment.
96 """
97 assert not (self._temp_dir is None or self._temp_dir_context is None)
98 self._temp_dir_context.__exit__(ex_type, ex_val, ex_tb)
99 self._temp_dir = None
100 self._temp_dir_context = None
101 return super().__exit__(ex_type, ex_val, ex_tb)
102
103 def setup(self, tunables: TunableGroups, global_config: Optional[dict] = None) -> bool:
104 """
105 Check if the environment is ready and set up the application
106 and benchmarks, if necessary.
107
108 Parameters
109 ----------
110 tunables : TunableGroups
111 A collection of tunable OS and application parameters along with their
112 values. In a local environment these could be used to prepare a config
113 file on the scheduler prior to transferring it to the remote environment,
114 for instance.
115 global_config : dict
116 Free-format dictionary of global parameters of the environment
117 that are not used in the optimization process.
118
119 Returns
120 -------
121 is_success : bool
122 True if operation is successful, false otherwise.
123 """
124 if not super().setup(tunables, global_config):
125 return False
126
127 _LOG.info("Set up the environment locally: '%s' at %s", self, self._temp_dir)
128 assert self._temp_dir is not None
129
130 if self._dump_params_file:
131 fname = path_join(self._temp_dir, self._dump_params_file)
132 _LOG.debug("Dump tunables to file: %s", fname)
133 with open(fname, "wt", encoding="utf-8") as fh_tunables:
134 # json.dump(self._params, fh_tunables) # Tunables *and* const_args
135 json.dump(self._tunable_params.get_param_values(), fh_tunables)
136
137 if self._dump_meta_file:
138 fname = path_join(self._temp_dir, self._dump_meta_file)
139 _LOG.debug("Dump tunables metadata to file: %s", fname)
140 with open(fname, "wt", encoding="utf-8") as fh_meta:
141 json.dump({
142 tunable.name: tunable.meta
143 for (tunable, _group) in self._tunable_params if tunable.meta
144 }, fh_meta)
145
146 if self._script_setup:
147 return_code = self._local_exec(self._script_setup, self._temp_dir)
148 self._is_ready = bool(return_code == 0)
149 else:
150 self._is_ready = True
151
152 return self._is_ready
153
154 def run(self) -> Tuple[Status, Optional[Dict[str, float]]]:
155 """
156 Run a script in the local scheduler environment.
157
158 Returns
159 -------
160 (status, output) : (Status, dict)
161 A pair of (Status, output) values, where `output` is a dict
162 with the results or None if the status is not COMPLETED.
163 If run script is a benchmark, then the score is usually expected to
164 be in the `score` field.
165 """
166 (status, _) = result = super().run()
167 if not status.is_ready():
168 return result
169
170 assert self._temp_dir is not None
171
172 if self._script_run:
173 return_code = self._local_exec(self._script_run, self._temp_dir)
174 if return_code != 0:
175 return (Status.FAILED, None)
176
177 # FIXME: We should not be assuming that the only output file type is a CSV.
178 if not self._read_results_file:
179 _LOG.debug("Not reading the data at: %s", self)
180 return (Status.SUCCEEDED, {})
181
182 data: pandas.DataFrame = pandas.read_csv(
183 self._config_loader_service.resolve_path(
184 self._read_results_file, extra_paths=[self._temp_dir]))
185
186 if sys.platform == 'win32':
187 data.rename(str.rstrip, axis='columns', inplace=True)
188
189 _LOG.debug("Read data:\n%s", data)
190 if list(data.columns) == ["metric", "value"]:
191 _LOG.warning(
192 "Local run has %d rows: assume long format of (metric, value)", len(data))
193 data = pandas.DataFrame([data.value.to_list()], columns=data.metric.to_list())
194
195 data_dict = data.iloc[-1].to_dict()
196 _LOG.info("Local run complete: %s ::\n%s", self, data_dict)
197 return (Status.SUCCEEDED, data_dict) if data_dict else (Status.FAILED, None)
198
199 def status(self) -> Tuple[Status, List[Tuple[datetime, str, Any]]]:
200
201 (status, _) = super().status()
202 if not (self._is_ready and self._read_telemetry_file):
203 return (status, [])
204
205 assert self._temp_dir is not None
206 try:
207 # FIXME: We should not be assuming that the only output file type is a CSV.
208 data: pandas.DataFrame = pandas.read_csv(
209 self._config_loader_service.resolve_path(
210 self._read_telemetry_file, extra_paths=[self._temp_dir]),
211 parse_dates=[0],
212 )
213 except FileNotFoundError as ex:
214 _LOG.warning("Telemetry CSV file not found: %s :: %s", self._read_telemetry_file, ex)
215 return (status, [])
216
217 if sys.platform == 'win32':
218 data.rename(str.rstrip, axis='columns', inplace=True)
219
220 _LOG.debug("Read telemetry data:\n%s", data)
221 if list(data.columns) != ["timestamp", "metric", "value"]:
222 _LOG.warning(
223 'Telemetry CSV file should have columns ["timestamp", "metric", "value"] :: %s',
224 self._read_telemetry_file)
225
226 col_dtypes: Mapping[int, Type] = {0: datetime}
227 return (status, [
228 (pandas.Timestamp(ts).to_pydatetime(), metric, value)
229 for (ts, metric, value) in data.to_records(index=False, column_dtypes=col_dtypes)
230 ])
231
232 def teardown(self) -> None:
233 """
234 Clean up the local environment.
235 """
236 if self._script_teardown:
237 _LOG.info("Local teardown: %s", self)
238 return_code = self._local_exec(self._script_teardown)
239 _LOG.info("Local teardown complete: %s :: %s", self, return_code)
240 super().teardown()
241
242 def _local_exec(self, script: Iterable[str], cwd: Optional[str] = None) -> int:
243 """
244 Execute a script locally in the scheduler environment.
245
246 Parameters
247 ----------
248 script : Iterable[str]
249 Lines of the script to run locally.
250 Treat every line as a separate command to run.
251 cwd : Optional[str]
252 Work directory to run the script at.
253
254 Returns
255 -------
256 return_code : int
257 Return code of the script. 0 if successful.
258 """
259 env_params = self._get_env_params()
260 _LOG.info("Run script locally on: %s at %s with env %s", self, cwd, env_params)
261 (return_code, _stdout, stderr) = self._local_exec_service.local_exec(
262 script, env=env_params, cwd=cwd)
263 if return_code != 0:
264 _LOG.warning("ERROR: Local script returns code %d stderr:\n%s", return_code, stderr)
265 return return_code
266
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mlos_bench/mlos_bench/environments/local/local_env.py b/mlos_bench/mlos_bench/environments/local/local_env.py
--- a/mlos_bench/mlos_bench/environments/local/local_env.py
+++ b/mlos_bench/mlos_bench/environments/local/local_env.py
@@ -179,22 +179,36 @@
_LOG.debug("Not reading the data at: %s", self)
return (Status.SUCCEEDED, {})
- data: pandas.DataFrame = pandas.read_csv(
+ data = self._normalize_columns(pandas.read_csv(
self._config_loader_service.resolve_path(
- self._read_results_file, extra_paths=[self._temp_dir]))
-
- if sys.platform == 'win32':
- data.rename(str.rstrip, axis='columns', inplace=True)
+ self._read_results_file, extra_paths=[self._temp_dir]),
+ index_col=False,
+ ))
_LOG.debug("Read data:\n%s", data)
if list(data.columns) == ["metric", "value"]:
- _LOG.warning(
- "Local run has %d rows: assume long format of (metric, value)", len(data))
+ _LOG.info("Local results have (metric,value) header and %d rows: assume long format", len(data))
data = pandas.DataFrame([data.value.to_list()], columns=data.metric.to_list())
+ elif len(data) == 1:
+ _LOG.info("Local results have 1 row: assume wide format")
+ else:
+ raise ValueError(f"Invalid data format: {data}")
data_dict = data.iloc[-1].to_dict()
_LOG.info("Local run complete: %s ::\n%s", self, data_dict)
- return (Status.SUCCEEDED, data_dict) if data_dict else (Status.FAILED, None)
+ return (Status.SUCCEEDED, data_dict)
+
+ @staticmethod
+ def _normalize_columns(data: pandas.DataFrame) -> pandas.DataFrame:
+ """
+ Strip trailing spaces from column names (Windows only).
+ """
+ # Windows cmd interpretation of > redirect symbols can leave trailing spaces in
+ # the final column, which leads to misnamed columns.
+ # For now, we simply strip trailing spaces from column names to account for that.
+ if sys.platform == 'win32':
+ data.rename(str.rstrip, axis='columns', inplace=True)
+ return data
def status(self) -> Tuple[Status, List[Tuple[datetime, str, Any]]]:
@@ -204,25 +218,25 @@
assert self._temp_dir is not None
try:
+ fname = self._config_loader_service.resolve_path(
+ self._read_telemetry_file, extra_paths=[self._temp_dir])
+
# FIXME: We should not be assuming that the only output file type is a CSV.
- data: pandas.DataFrame = pandas.read_csv(
- self._config_loader_service.resolve_path(
- self._read_telemetry_file, extra_paths=[self._temp_dir]),
- parse_dates=[0],
- )
+ data = self._normalize_columns(
+ pandas.read_csv(fname, index_col=False, parse_dates=[0]))
+
+ expected_col_names = ["timestamp", "metric", "value"]
+ if len(data.columns) != len(expected_col_names):
+ raise ValueError(f'Telemetry data must have columns {expected_col_names}')
+ elif list(data.columns) != expected_col_names:
+ # Assume no header - this is ok for telemetry data.
+ data = pandas.read_csv(
+ fname, index_col=False, parse_dates=[0], names=expected_col_names)
except FileNotFoundError as ex:
_LOG.warning("Telemetry CSV file not found: %s :: %s", self._read_telemetry_file, ex)
return (status, [])
- if sys.platform == 'win32':
- data.rename(str.rstrip, axis='columns', inplace=True)
-
_LOG.debug("Read telemetry data:\n%s", data)
- if list(data.columns) != ["timestamp", "metric", "value"]:
- _LOG.warning(
- 'Telemetry CSV file should have columns ["timestamp", "metric", "value"] :: %s',
- self._read_telemetry_file)
-
col_dtypes: Mapping[int, Type] = {0: datetime}
return (status, [
(pandas.Timestamp(ts).to_pydatetime(), metric, value)
|
{"golden_diff": "diff --git a/mlos_bench/mlos_bench/environments/local/local_env.py b/mlos_bench/mlos_bench/environments/local/local_env.py\n--- a/mlos_bench/mlos_bench/environments/local/local_env.py\n+++ b/mlos_bench/mlos_bench/environments/local/local_env.py\n@@ -179,22 +179,36 @@\n _LOG.debug(\"Not reading the data at: %s\", self)\n return (Status.SUCCEEDED, {})\n \n- data: pandas.DataFrame = pandas.read_csv(\n+ data = self._normalize_columns(pandas.read_csv(\n self._config_loader_service.resolve_path(\n- self._read_results_file, extra_paths=[self._temp_dir]))\n-\n- if sys.platform == 'win32':\n- data.rename(str.rstrip, axis='columns', inplace=True)\n+ self._read_results_file, extra_paths=[self._temp_dir]),\n+ index_col=False,\n+ ))\n \n _LOG.debug(\"Read data:\\n%s\", data)\n if list(data.columns) == [\"metric\", \"value\"]:\n- _LOG.warning(\n- \"Local run has %d rows: assume long format of (metric, value)\", len(data))\n+ _LOG.info(\"Local results have (metric,value) header and %d rows: assume long format\", len(data))\n data = pandas.DataFrame([data.value.to_list()], columns=data.metric.to_list())\n+ elif len(data) == 1:\n+ _LOG.info(\"Local results have 1 row: assume wide format\")\n+ else:\n+ raise ValueError(f\"Invalid data format: {data}\")\n \n data_dict = data.iloc[-1].to_dict()\n _LOG.info(\"Local run complete: %s ::\\n%s\", self, data_dict)\n- return (Status.SUCCEEDED, data_dict) if data_dict else (Status.FAILED, None)\n+ return (Status.SUCCEEDED, data_dict)\n+\n+ @staticmethod\n+ def _normalize_columns(data: pandas.DataFrame) -> pandas.DataFrame:\n+ \"\"\"\n+ Strip trailing spaces from column names (Windows only).\n+ \"\"\"\n+ # Windows cmd interpretation of > redirect symbols can leave trailing spaces in\n+ # the final column, which leads to misnamed columns.\n+ # For now, we simply strip trailing spaces from column names to account for that.\n+ if sys.platform == 'win32':\n+ data.rename(str.rstrip, axis='columns', inplace=True)\n+ return data\n \n def status(self) -> Tuple[Status, List[Tuple[datetime, str, Any]]]:\n \n@@ -204,25 +218,25 @@\n \n assert self._temp_dir is not None\n try:\n+ fname = self._config_loader_service.resolve_path(\n+ self._read_telemetry_file, extra_paths=[self._temp_dir])\n+\n # FIXME: We should not be assuming that the only output file type is a CSV.\n- data: pandas.DataFrame = pandas.read_csv(\n- self._config_loader_service.resolve_path(\n- self._read_telemetry_file, extra_paths=[self._temp_dir]),\n- parse_dates=[0],\n- )\n+ data = self._normalize_columns(\n+ pandas.read_csv(fname, index_col=False, parse_dates=[0]))\n+\n+ expected_col_names = [\"timestamp\", \"metric\", \"value\"]\n+ if len(data.columns) != len(expected_col_names):\n+ raise ValueError(f'Telemetry data must have columns {expected_col_names}')\n+ elif list(data.columns) != expected_col_names:\n+ # Assume no header - this is ok for telemetry data.\n+ data = pandas.read_csv(\n+ fname, index_col=False, parse_dates=[0], names=expected_col_names)\n except FileNotFoundError as ex:\n _LOG.warning(\"Telemetry CSV file not found: %s :: %s\", self._read_telemetry_file, ex)\n return (status, [])\n \n- if sys.platform == 'win32':\n- data.rename(str.rstrip, axis='columns', inplace=True)\n-\n _LOG.debug(\"Read telemetry data:\\n%s\", data)\n- if list(data.columns) != [\"timestamp\", \"metric\", \"value\"]:\n- _LOG.warning(\n- 'Telemetry CSV file should have columns [\"timestamp\", \"metric\", \"value\"] :: %s',\n- self._read_telemetry_file)\n-\n col_dtypes: Mapping[int, Type] = {0: datetime}\n return (status, [\n (pandas.Timestamp(ts).to_pydatetime(), metric, value)\n", "issue": "mlos_bench: read results file format tests\n@poojanilangekar, could you please add some tests for the changes in #460 so that we make sure that the code accepts both input file formats we expect to be able to support? Thanks!\n", "before_files": [{"content": "#\n# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT License.\n#\n\"\"\"\nScheduler-side benchmark environment to run scripts locally.\n\"\"\"\n\nimport json\nimport logging\nimport sys\n\nfrom datetime import datetime\nfrom tempfile import TemporaryDirectory\nfrom contextlib import nullcontext\n\nfrom types import TracebackType\nfrom typing import Any, Dict, Iterable, List, Mapping, Optional, Tuple, Type, Union\nfrom typing_extensions import Literal\n\nimport pandas\n\nfrom mlos_bench.environments.status import Status\nfrom mlos_bench.environments.base_environment import Environment\nfrom mlos_bench.environments.script_env import ScriptEnv\nfrom mlos_bench.services.base_service import Service\nfrom mlos_bench.services.types.local_exec_type import SupportsLocalExec\nfrom mlos_bench.tunables.tunable_groups import TunableGroups\nfrom mlos_bench.util import path_join\n\n_LOG = logging.getLogger(__name__)\n\n\nclass LocalEnv(ScriptEnv):\n # pylint: disable=too-many-instance-attributes\n \"\"\"\n Scheduler-side Environment that runs scripts locally.\n \"\"\"\n\n def __init__(self,\n *,\n name: str,\n config: dict,\n global_config: Optional[dict] = None,\n tunables: Optional[TunableGroups] = None,\n service: Optional[Service] = None):\n \"\"\"\n Create a new environment for local execution.\n\n Parameters\n ----------\n name: str\n Human-readable name of the environment.\n config : dict\n Free-format dictionary that contains the benchmark environment\n configuration. Each config must have at least the \"tunable_params\"\n and the \"const_args\" sections.\n `LocalEnv` must also have at least some of the following parameters:\n {setup, run, teardown, dump_params_file, read_results_file}\n global_config : dict\n Free-format dictionary of global parameters (e.g., security credentials)\n to be mixed in into the \"const_args\" section of the local config.\n tunables : TunableGroups\n A collection of tunable parameters for *all* environments.\n service: Service\n An optional service object (e.g., providing methods to\n deploy or reboot a VM, etc.).\n \"\"\"\n super().__init__(name=name, config=config, global_config=global_config,\n tunables=tunables, service=service)\n\n assert self._service is not None and isinstance(self._service, SupportsLocalExec), \\\n \"LocalEnv requires a service that supports local execution\"\n self._local_exec_service: SupportsLocalExec = self._service\n\n self._temp_dir: Optional[str] = None\n self._temp_dir_context: Union[TemporaryDirectory, nullcontext, None] = None\n\n self._dump_params_file: Optional[str] = self.config.get(\"dump_params_file\")\n self._dump_meta_file: Optional[str] = self.config.get(\"dump_meta_file\")\n\n self._read_results_file: Optional[str] = self.config.get(\"read_results_file\")\n self._read_telemetry_file: Optional[str] = self.config.get(\"read_telemetry_file\")\n\n def __enter__(self) -> Environment:\n assert self._temp_dir is None and self._temp_dir_context is None\n self._temp_dir_context = self._local_exec_service.temp_dir_context(self.config.get(\"temp_dir\"))\n self._temp_dir = self._temp_dir_context.__enter__()\n return super().__enter__()\n\n def __exit__(self, ex_type: Optional[Type[BaseException]],\n ex_val: Optional[BaseException],\n ex_tb: Optional[TracebackType]) -> Literal[False]:\n \"\"\"\n Exit the context of the benchmarking environment.\n \"\"\"\n assert not (self._temp_dir is None or self._temp_dir_context is None)\n self._temp_dir_context.__exit__(ex_type, ex_val, ex_tb)\n self._temp_dir = None\n self._temp_dir_context = None\n return super().__exit__(ex_type, ex_val, ex_tb)\n\n def setup(self, tunables: TunableGroups, global_config: Optional[dict] = None) -> bool:\n \"\"\"\n Check if the environment is ready and set up the application\n and benchmarks, if necessary.\n\n Parameters\n ----------\n tunables : TunableGroups\n A collection of tunable OS and application parameters along with their\n values. In a local environment these could be used to prepare a config\n file on the scheduler prior to transferring it to the remote environment,\n for instance.\n global_config : dict\n Free-format dictionary of global parameters of the environment\n that are not used in the optimization process.\n\n Returns\n -------\n is_success : bool\n True if operation is successful, false otherwise.\n \"\"\"\n if not super().setup(tunables, global_config):\n return False\n\n _LOG.info(\"Set up the environment locally: '%s' at %s\", self, self._temp_dir)\n assert self._temp_dir is not None\n\n if self._dump_params_file:\n fname = path_join(self._temp_dir, self._dump_params_file)\n _LOG.debug(\"Dump tunables to file: %s\", fname)\n with open(fname, \"wt\", encoding=\"utf-8\") as fh_tunables:\n # json.dump(self._params, fh_tunables) # Tunables *and* const_args\n json.dump(self._tunable_params.get_param_values(), fh_tunables)\n\n if self._dump_meta_file:\n fname = path_join(self._temp_dir, self._dump_meta_file)\n _LOG.debug(\"Dump tunables metadata to file: %s\", fname)\n with open(fname, \"wt\", encoding=\"utf-8\") as fh_meta:\n json.dump({\n tunable.name: tunable.meta\n for (tunable, _group) in self._tunable_params if tunable.meta\n }, fh_meta)\n\n if self._script_setup:\n return_code = self._local_exec(self._script_setup, self._temp_dir)\n self._is_ready = bool(return_code == 0)\n else:\n self._is_ready = True\n\n return self._is_ready\n\n def run(self) -> Tuple[Status, Optional[Dict[str, float]]]:\n \"\"\"\n Run a script in the local scheduler environment.\n\n Returns\n -------\n (status, output) : (Status, dict)\n A pair of (Status, output) values, where `output` is a dict\n with the results or None if the status is not COMPLETED.\n If run script is a benchmark, then the score is usually expected to\n be in the `score` field.\n \"\"\"\n (status, _) = result = super().run()\n if not status.is_ready():\n return result\n\n assert self._temp_dir is not None\n\n if self._script_run:\n return_code = self._local_exec(self._script_run, self._temp_dir)\n if return_code != 0:\n return (Status.FAILED, None)\n\n # FIXME: We should not be assuming that the only output file type is a CSV.\n if not self._read_results_file:\n _LOG.debug(\"Not reading the data at: %s\", self)\n return (Status.SUCCEEDED, {})\n\n data: pandas.DataFrame = pandas.read_csv(\n self._config_loader_service.resolve_path(\n self._read_results_file, extra_paths=[self._temp_dir]))\n\n if sys.platform == 'win32':\n data.rename(str.rstrip, axis='columns', inplace=True)\n\n _LOG.debug(\"Read data:\\n%s\", data)\n if list(data.columns) == [\"metric\", \"value\"]:\n _LOG.warning(\n \"Local run has %d rows: assume long format of (metric, value)\", len(data))\n data = pandas.DataFrame([data.value.to_list()], columns=data.metric.to_list())\n\n data_dict = data.iloc[-1].to_dict()\n _LOG.info(\"Local run complete: %s ::\\n%s\", self, data_dict)\n return (Status.SUCCEEDED, data_dict) if data_dict else (Status.FAILED, None)\n\n def status(self) -> Tuple[Status, List[Tuple[datetime, str, Any]]]:\n\n (status, _) = super().status()\n if not (self._is_ready and self._read_telemetry_file):\n return (status, [])\n\n assert self._temp_dir is not None\n try:\n # FIXME: We should not be assuming that the only output file type is a CSV.\n data: pandas.DataFrame = pandas.read_csv(\n self._config_loader_service.resolve_path(\n self._read_telemetry_file, extra_paths=[self._temp_dir]),\n parse_dates=[0],\n )\n except FileNotFoundError as ex:\n _LOG.warning(\"Telemetry CSV file not found: %s :: %s\", self._read_telemetry_file, ex)\n return (status, [])\n\n if sys.platform == 'win32':\n data.rename(str.rstrip, axis='columns', inplace=True)\n\n _LOG.debug(\"Read telemetry data:\\n%s\", data)\n if list(data.columns) != [\"timestamp\", \"metric\", \"value\"]:\n _LOG.warning(\n 'Telemetry CSV file should have columns [\"timestamp\", \"metric\", \"value\"] :: %s',\n self._read_telemetry_file)\n\n col_dtypes: Mapping[int, Type] = {0: datetime}\n return (status, [\n (pandas.Timestamp(ts).to_pydatetime(), metric, value)\n for (ts, metric, value) in data.to_records(index=False, column_dtypes=col_dtypes)\n ])\n\n def teardown(self) -> None:\n \"\"\"\n Clean up the local environment.\n \"\"\"\n if self._script_teardown:\n _LOG.info(\"Local teardown: %s\", self)\n return_code = self._local_exec(self._script_teardown)\n _LOG.info(\"Local teardown complete: %s :: %s\", self, return_code)\n super().teardown()\n\n def _local_exec(self, script: Iterable[str], cwd: Optional[str] = None) -> int:\n \"\"\"\n Execute a script locally in the scheduler environment.\n\n Parameters\n ----------\n script : Iterable[str]\n Lines of the script to run locally.\n Treat every line as a separate command to run.\n cwd : Optional[str]\n Work directory to run the script at.\n\n Returns\n -------\n return_code : int\n Return code of the script. 0 if successful.\n \"\"\"\n env_params = self._get_env_params()\n _LOG.info(\"Run script locally on: %s at %s with env %s\", self, cwd, env_params)\n (return_code, _stdout, stderr) = self._local_exec_service.local_exec(\n script, env=env_params, cwd=cwd)\n if return_code != 0:\n _LOG.warning(\"ERROR: Local script returns code %d stderr:\\n%s\", return_code, stderr)\n return return_code\n", "path": "mlos_bench/mlos_bench/environments/local/local_env.py"}], "after_files": [{"content": "#\n# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT License.\n#\n\"\"\"\nScheduler-side benchmark environment to run scripts locally.\n\"\"\"\n\nimport json\nimport logging\nimport sys\n\nfrom datetime import datetime\nfrom tempfile import TemporaryDirectory\nfrom contextlib import nullcontext\n\nfrom types import TracebackType\nfrom typing import Any, Dict, Iterable, List, Mapping, Optional, Tuple, Type, Union\nfrom typing_extensions import Literal\n\nimport pandas\n\nfrom mlos_bench.environments.status import Status\nfrom mlos_bench.environments.base_environment import Environment\nfrom mlos_bench.environments.script_env import ScriptEnv\nfrom mlos_bench.services.base_service import Service\nfrom mlos_bench.services.types.local_exec_type import SupportsLocalExec\nfrom mlos_bench.tunables.tunable_groups import TunableGroups\nfrom mlos_bench.util import path_join\n\n_LOG = logging.getLogger(__name__)\n\n\nclass LocalEnv(ScriptEnv):\n # pylint: disable=too-many-instance-attributes\n \"\"\"\n Scheduler-side Environment that runs scripts locally.\n \"\"\"\n\n def __init__(self,\n *,\n name: str,\n config: dict,\n global_config: Optional[dict] = None,\n tunables: Optional[TunableGroups] = None,\n service: Optional[Service] = None):\n \"\"\"\n Create a new environment for local execution.\n\n Parameters\n ----------\n name: str\n Human-readable name of the environment.\n config : dict\n Free-format dictionary that contains the benchmark environment\n configuration. Each config must have at least the \"tunable_params\"\n and the \"const_args\" sections.\n `LocalEnv` must also have at least some of the following parameters:\n {setup, run, teardown, dump_params_file, read_results_file}\n global_config : dict\n Free-format dictionary of global parameters (e.g., security credentials)\n to be mixed in into the \"const_args\" section of the local config.\n tunables : TunableGroups\n A collection of tunable parameters for *all* environments.\n service: Service\n An optional service object (e.g., providing methods to\n deploy or reboot a VM, etc.).\n \"\"\"\n super().__init__(name=name, config=config, global_config=global_config,\n tunables=tunables, service=service)\n\n assert self._service is not None and isinstance(self._service, SupportsLocalExec), \\\n \"LocalEnv requires a service that supports local execution\"\n self._local_exec_service: SupportsLocalExec = self._service\n\n self._temp_dir: Optional[str] = None\n self._temp_dir_context: Union[TemporaryDirectory, nullcontext, None] = None\n\n self._dump_params_file: Optional[str] = self.config.get(\"dump_params_file\")\n self._dump_meta_file: Optional[str] = self.config.get(\"dump_meta_file\")\n\n self._read_results_file: Optional[str] = self.config.get(\"read_results_file\")\n self._read_telemetry_file: Optional[str] = self.config.get(\"read_telemetry_file\")\n\n def __enter__(self) -> Environment:\n assert self._temp_dir is None and self._temp_dir_context is None\n self._temp_dir_context = self._local_exec_service.temp_dir_context(self.config.get(\"temp_dir\"))\n self._temp_dir = self._temp_dir_context.__enter__()\n return super().__enter__()\n\n def __exit__(self, ex_type: Optional[Type[BaseException]],\n ex_val: Optional[BaseException],\n ex_tb: Optional[TracebackType]) -> Literal[False]:\n \"\"\"\n Exit the context of the benchmarking environment.\n \"\"\"\n assert not (self._temp_dir is None or self._temp_dir_context is None)\n self._temp_dir_context.__exit__(ex_type, ex_val, ex_tb)\n self._temp_dir = None\n self._temp_dir_context = None\n return super().__exit__(ex_type, ex_val, ex_tb)\n\n def setup(self, tunables: TunableGroups, global_config: Optional[dict] = None) -> bool:\n \"\"\"\n Check if the environment is ready and set up the application\n and benchmarks, if necessary.\n\n Parameters\n ----------\n tunables : TunableGroups\n A collection of tunable OS and application parameters along with their\n values. In a local environment these could be used to prepare a config\n file on the scheduler prior to transferring it to the remote environment,\n for instance.\n global_config : dict\n Free-format dictionary of global parameters of the environment\n that are not used in the optimization process.\n\n Returns\n -------\n is_success : bool\n True if operation is successful, false otherwise.\n \"\"\"\n if not super().setup(tunables, global_config):\n return False\n\n _LOG.info(\"Set up the environment locally: '%s' at %s\", self, self._temp_dir)\n assert self._temp_dir is not None\n\n if self._dump_params_file:\n fname = path_join(self._temp_dir, self._dump_params_file)\n _LOG.debug(\"Dump tunables to file: %s\", fname)\n with open(fname, \"wt\", encoding=\"utf-8\") as fh_tunables:\n # json.dump(self._params, fh_tunables) # Tunables *and* const_args\n json.dump(self._tunable_params.get_param_values(), fh_tunables)\n\n if self._dump_meta_file:\n fname = path_join(self._temp_dir, self._dump_meta_file)\n _LOG.debug(\"Dump tunables metadata to file: %s\", fname)\n with open(fname, \"wt\", encoding=\"utf-8\") as fh_meta:\n json.dump({\n tunable.name: tunable.meta\n for (tunable, _group) in self._tunable_params if tunable.meta\n }, fh_meta)\n\n if self._script_setup:\n return_code = self._local_exec(self._script_setup, self._temp_dir)\n self._is_ready = bool(return_code == 0)\n else:\n self._is_ready = True\n\n return self._is_ready\n\n def run(self) -> Tuple[Status, Optional[Dict[str, float]]]:\n \"\"\"\n Run a script in the local scheduler environment.\n\n Returns\n -------\n (status, output) : (Status, dict)\n A pair of (Status, output) values, where `output` is a dict\n with the results or None if the status is not COMPLETED.\n If run script is a benchmark, then the score is usually expected to\n be in the `score` field.\n \"\"\"\n (status, _) = result = super().run()\n if not status.is_ready():\n return result\n\n assert self._temp_dir is not None\n\n if self._script_run:\n return_code = self._local_exec(self._script_run, self._temp_dir)\n if return_code != 0:\n return (Status.FAILED, None)\n\n # FIXME: We should not be assuming that the only output file type is a CSV.\n if not self._read_results_file:\n _LOG.debug(\"Not reading the data at: %s\", self)\n return (Status.SUCCEEDED, {})\n\n data = self._normalize_columns(pandas.read_csv(\n self._config_loader_service.resolve_path(\n self._read_results_file, extra_paths=[self._temp_dir]),\n index_col=False,\n ))\n\n _LOG.debug(\"Read data:\\n%s\", data)\n if list(data.columns) == [\"metric\", \"value\"]:\n _LOG.info(\"Local results have (metric,value) header and %d rows: assume long format\", len(data))\n data = pandas.DataFrame([data.value.to_list()], columns=data.metric.to_list())\n elif len(data) == 1:\n _LOG.info(\"Local results have 1 row: assume wide format\")\n else:\n raise ValueError(f\"Invalid data format: {data}\")\n\n data_dict = data.iloc[-1].to_dict()\n _LOG.info(\"Local run complete: %s ::\\n%s\", self, data_dict)\n return (Status.SUCCEEDED, data_dict)\n\n @staticmethod\n def _normalize_columns(data: pandas.DataFrame) -> pandas.DataFrame:\n \"\"\"\n Strip trailing spaces from column names (Windows only).\n \"\"\"\n # Windows cmd interpretation of > redirect symbols can leave trailing spaces in\n # the final column, which leads to misnamed columns.\n # For now, we simply strip trailing spaces from column names to account for that.\n if sys.platform == 'win32':\n data.rename(str.rstrip, axis='columns', inplace=True)\n return data\n\n def status(self) -> Tuple[Status, List[Tuple[datetime, str, Any]]]:\n\n (status, _) = super().status()\n if not (self._is_ready and self._read_telemetry_file):\n return (status, [])\n\n assert self._temp_dir is not None\n try:\n fname = self._config_loader_service.resolve_path(\n self._read_telemetry_file, extra_paths=[self._temp_dir])\n\n # FIXME: We should not be assuming that the only output file type is a CSV.\n data = self._normalize_columns(\n pandas.read_csv(fname, index_col=False, parse_dates=[0]))\n\n expected_col_names = [\"timestamp\", \"metric\", \"value\"]\n if len(data.columns) != len(expected_col_names):\n raise ValueError(f'Telemetry data must have columns {expected_col_names}')\n elif list(data.columns) != expected_col_names:\n # Assume no header - this is ok for telemetry data.\n data = pandas.read_csv(\n fname, index_col=False, parse_dates=[0], names=expected_col_names)\n except FileNotFoundError as ex:\n _LOG.warning(\"Telemetry CSV file not found: %s :: %s\", self._read_telemetry_file, ex)\n return (status, [])\n\n _LOG.debug(\"Read telemetry data:\\n%s\", data)\n col_dtypes: Mapping[int, Type] = {0: datetime}\n return (status, [\n (pandas.Timestamp(ts).to_pydatetime(), metric, value)\n for (ts, metric, value) in data.to_records(index=False, column_dtypes=col_dtypes)\n ])\n\n def teardown(self) -> None:\n \"\"\"\n Clean up the local environment.\n \"\"\"\n if self._script_teardown:\n _LOG.info(\"Local teardown: %s\", self)\n return_code = self._local_exec(self._script_teardown)\n _LOG.info(\"Local teardown complete: %s :: %s\", self, return_code)\n super().teardown()\n\n def _local_exec(self, script: Iterable[str], cwd: Optional[str] = None) -> int:\n \"\"\"\n Execute a script locally in the scheduler environment.\n\n Parameters\n ----------\n script : Iterable[str]\n Lines of the script to run locally.\n Treat every line as a separate command to run.\n cwd : Optional[str]\n Work directory to run the script at.\n\n Returns\n -------\n return_code : int\n Return code of the script. 0 if successful.\n \"\"\"\n env_params = self._get_env_params()\n _LOG.info(\"Run script locally on: %s at %s with env %s\", self, cwd, env_params)\n (return_code, _stdout, stderr) = self._local_exec_service.local_exec(\n script, env=env_params, cwd=cwd)\n if return_code != 0:\n _LOG.warning(\"ERROR: Local script returns code %d stderr:\\n%s\", return_code, stderr)\n return return_code\n", "path": "mlos_bench/mlos_bench/environments/local/local_env.py"}]}
| 3,388 | 987 |
gh_patches_debug_3340
|
rasdani/github-patches
|
git_diff
|
encode__starlette-1349
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Insufficient input validation of content-type 'multipart/form-data'
### Checklist
- [x] The bug is reproducible against the latest release and/or `master`.
- [x] There are no similar issues or pull requests to fix it yet.
### Describe the bug
Not actually a bug, but insufficient input validation.
### To reproduce
Provide POST input (e.g. with postman) with content-type 'multipart/form-data' but without _boundary_ parameter.
### Expected behavior
Shall return 4xx error code. According to rfc7578 boundary is a required parameter.
### Actual behavior
_None_ boundary is passed to multipart library and an exception is raised there.
### Debugging material
```
TypeError: can't concat NoneType to bytes
File "starlette/middleware/trustedhost.py", line 51, in __call__
await self.app(scope, receive, send)
File "starlette/middleware/sessions.py", line 75, in __call__
await self.app(scope, receive, send_wrapper)
File "cpms/middleware.py", line 55, in __call__
await self.app(scope, receive, send)
File "starlette/middleware/authentication.py", line 48, in __call__
await self.app(scope, receive, send)
File "starlette/exceptions.py", line 82, in __call__
raise exc from None
File "starlette/exceptions.py", line 71, in __call__
await self.app(scope, receive, sender)
File "starlette/routing.py", line 582, in __call__
await route.handle(scope, receive, send)
File "starlette/routing.py", line 243, in handle
await self.app(scope, receive, send)
File "starlette/routing.py", line 54, in app
response = await func(request)
File "starlette/authentication.py", line 69, in async_wrapper
return await func(*args, **kwargs)
File "cpms/views/files.py", line 90, in decorator
return await endpoint(request)
File "cpms/views/files.py", line 133, in upload_files
for f_name, f_value in (await request.form()).items():
File "starlette/requests.py", line 240, in form
self._form = await multipart_parser.parse()
File "starlette/formparsers.py", line 181, in parse
parser = multipart.MultipartParser(boundary, callbacks)
File "multipart/multipart.py", line 1042, in __init__
self.boundary = b'\r\n--' + boundary
```
### Environment
- OS: OpenBSD 6.7
- Python version: 3.7.9
- Starlette version: 0.14.1
### Additional context
The problem occurs due insufficient checking of the input on line 166 of starlette/formparsers.py
` boundary = params.get(b"boundary")`
It is better to replace it with:
` boundary = params[b"boundary"]`
so exception at early stage will be raised.
Or make an additional input validation.
```
boundary = params.get(b"boundary")
if not boundary:
raise HTTPException(status_code=400)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `starlette/formparsers.py`
Content:
```
1 import typing
2 from enum import Enum
3 from urllib.parse import unquote_plus
4
5 from starlette.datastructures import FormData, Headers, UploadFile
6
7 try:
8 import multipart
9 from multipart.multipart import parse_options_header
10 except ImportError: # pragma: nocover
11 parse_options_header = None
12 multipart = None
13
14
15 class FormMessage(Enum):
16 FIELD_START = 1
17 FIELD_NAME = 2
18 FIELD_DATA = 3
19 FIELD_END = 4
20 END = 5
21
22
23 class MultiPartMessage(Enum):
24 PART_BEGIN = 1
25 PART_DATA = 2
26 PART_END = 3
27 HEADER_FIELD = 4
28 HEADER_VALUE = 5
29 HEADER_END = 6
30 HEADERS_FINISHED = 7
31 END = 8
32
33
34 def _user_safe_decode(src: bytes, codec: str) -> str:
35 try:
36 return src.decode(codec)
37 except (UnicodeDecodeError, LookupError):
38 return src.decode("latin-1")
39
40
41 class FormParser:
42 def __init__(
43 self, headers: Headers, stream: typing.AsyncGenerator[bytes, None]
44 ) -> None:
45 assert (
46 multipart is not None
47 ), "The `python-multipart` library must be installed to use form parsing."
48 self.headers = headers
49 self.stream = stream
50 self.messages: typing.List[typing.Tuple[FormMessage, bytes]] = []
51
52 def on_field_start(self) -> None:
53 message = (FormMessage.FIELD_START, b"")
54 self.messages.append(message)
55
56 def on_field_name(self, data: bytes, start: int, end: int) -> None:
57 message = (FormMessage.FIELD_NAME, data[start:end])
58 self.messages.append(message)
59
60 def on_field_data(self, data: bytes, start: int, end: int) -> None:
61 message = (FormMessage.FIELD_DATA, data[start:end])
62 self.messages.append(message)
63
64 def on_field_end(self) -> None:
65 message = (FormMessage.FIELD_END, b"")
66 self.messages.append(message)
67
68 def on_end(self) -> None:
69 message = (FormMessage.END, b"")
70 self.messages.append(message)
71
72 async def parse(self) -> FormData:
73 # Callbacks dictionary.
74 callbacks = {
75 "on_field_start": self.on_field_start,
76 "on_field_name": self.on_field_name,
77 "on_field_data": self.on_field_data,
78 "on_field_end": self.on_field_end,
79 "on_end": self.on_end,
80 }
81
82 # Create the parser.
83 parser = multipart.QuerystringParser(callbacks)
84 field_name = b""
85 field_value = b""
86
87 items: typing.List[typing.Tuple[str, typing.Union[str, UploadFile]]] = []
88
89 # Feed the parser with data from the request.
90 async for chunk in self.stream:
91 if chunk:
92 parser.write(chunk)
93 else:
94 parser.finalize()
95 messages = list(self.messages)
96 self.messages.clear()
97 for message_type, message_bytes in messages:
98 if message_type == FormMessage.FIELD_START:
99 field_name = b""
100 field_value = b""
101 elif message_type == FormMessage.FIELD_NAME:
102 field_name += message_bytes
103 elif message_type == FormMessage.FIELD_DATA:
104 field_value += message_bytes
105 elif message_type == FormMessage.FIELD_END:
106 name = unquote_plus(field_name.decode("latin-1"))
107 value = unquote_plus(field_value.decode("latin-1"))
108 items.append((name, value))
109
110 return FormData(items)
111
112
113 class MultiPartParser:
114 def __init__(
115 self, headers: Headers, stream: typing.AsyncGenerator[bytes, None]
116 ) -> None:
117 assert (
118 multipart is not None
119 ), "The `python-multipart` library must be installed to use form parsing."
120 self.headers = headers
121 self.stream = stream
122 self.messages: typing.List[typing.Tuple[MultiPartMessage, bytes]] = []
123
124 def on_part_begin(self) -> None:
125 message = (MultiPartMessage.PART_BEGIN, b"")
126 self.messages.append(message)
127
128 def on_part_data(self, data: bytes, start: int, end: int) -> None:
129 message = (MultiPartMessage.PART_DATA, data[start:end])
130 self.messages.append(message)
131
132 def on_part_end(self) -> None:
133 message = (MultiPartMessage.PART_END, b"")
134 self.messages.append(message)
135
136 def on_header_field(self, data: bytes, start: int, end: int) -> None:
137 message = (MultiPartMessage.HEADER_FIELD, data[start:end])
138 self.messages.append(message)
139
140 def on_header_value(self, data: bytes, start: int, end: int) -> None:
141 message = (MultiPartMessage.HEADER_VALUE, data[start:end])
142 self.messages.append(message)
143
144 def on_header_end(self) -> None:
145 message = (MultiPartMessage.HEADER_END, b"")
146 self.messages.append(message)
147
148 def on_headers_finished(self) -> None:
149 message = (MultiPartMessage.HEADERS_FINISHED, b"")
150 self.messages.append(message)
151
152 def on_end(self) -> None:
153 message = (MultiPartMessage.END, b"")
154 self.messages.append(message)
155
156 async def parse(self) -> FormData:
157 # Parse the Content-Type header to get the multipart boundary.
158 content_type, params = parse_options_header(self.headers["Content-Type"])
159 charset = params.get(b"charset", "utf-8")
160 if type(charset) == bytes:
161 charset = charset.decode("latin-1")
162 boundary = params.get(b"boundary")
163
164 # Callbacks dictionary.
165 callbacks = {
166 "on_part_begin": self.on_part_begin,
167 "on_part_data": self.on_part_data,
168 "on_part_end": self.on_part_end,
169 "on_header_field": self.on_header_field,
170 "on_header_value": self.on_header_value,
171 "on_header_end": self.on_header_end,
172 "on_headers_finished": self.on_headers_finished,
173 "on_end": self.on_end,
174 }
175
176 # Create the parser.
177 parser = multipart.MultipartParser(boundary, callbacks)
178 header_field = b""
179 header_value = b""
180 content_disposition = None
181 content_type = b""
182 field_name = ""
183 data = b""
184 file: typing.Optional[UploadFile] = None
185
186 items: typing.List[typing.Tuple[str, typing.Union[str, UploadFile]]] = []
187 item_headers: typing.List[typing.Tuple[bytes, bytes]] = []
188
189 # Feed the parser with data from the request.
190 async for chunk in self.stream:
191 parser.write(chunk)
192 messages = list(self.messages)
193 self.messages.clear()
194 for message_type, message_bytes in messages:
195 if message_type == MultiPartMessage.PART_BEGIN:
196 content_disposition = None
197 content_type = b""
198 data = b""
199 item_headers = []
200 elif message_type == MultiPartMessage.HEADER_FIELD:
201 header_field += message_bytes
202 elif message_type == MultiPartMessage.HEADER_VALUE:
203 header_value += message_bytes
204 elif message_type == MultiPartMessage.HEADER_END:
205 field = header_field.lower()
206 if field == b"content-disposition":
207 content_disposition = header_value
208 elif field == b"content-type":
209 content_type = header_value
210 item_headers.append((field, header_value))
211 header_field = b""
212 header_value = b""
213 elif message_type == MultiPartMessage.HEADERS_FINISHED:
214 disposition, options = parse_options_header(content_disposition)
215 field_name = _user_safe_decode(options[b"name"], charset)
216 if b"filename" in options:
217 filename = _user_safe_decode(options[b"filename"], charset)
218 file = UploadFile(
219 filename=filename,
220 content_type=content_type.decode("latin-1"),
221 headers=Headers(raw=item_headers),
222 )
223 else:
224 file = None
225 elif message_type == MultiPartMessage.PART_DATA:
226 if file is None:
227 data += message_bytes
228 else:
229 await file.write(message_bytes)
230 elif message_type == MultiPartMessage.PART_END:
231 if file is None:
232 items.append((field_name, _user_safe_decode(data, charset)))
233 else:
234 await file.seek(0)
235 items.append((field_name, file))
236
237 parser.finalize()
238 return FormData(items)
239
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/starlette/formparsers.py b/starlette/formparsers.py
--- a/starlette/formparsers.py
+++ b/starlette/formparsers.py
@@ -159,7 +159,7 @@
charset = params.get(b"charset", "utf-8")
if type(charset) == bytes:
charset = charset.decode("latin-1")
- boundary = params.get(b"boundary")
+ boundary = params[b"boundary"]
# Callbacks dictionary.
callbacks = {
|
{"golden_diff": "diff --git a/starlette/formparsers.py b/starlette/formparsers.py\n--- a/starlette/formparsers.py\n+++ b/starlette/formparsers.py\n@@ -159,7 +159,7 @@\n charset = params.get(b\"charset\", \"utf-8\")\n if type(charset) == bytes:\n charset = charset.decode(\"latin-1\")\n- boundary = params.get(b\"boundary\")\n+ boundary = params[b\"boundary\"]\n \n # Callbacks dictionary.\n callbacks = {\n", "issue": "Insufficient input validation of content-type 'multipart/form-data'\n### Checklist\r\n\r\n- [x] The bug is reproducible against the latest release and/or `master`.\r\n- [x] There are no similar issues or pull requests to fix it yet.\r\n\r\n### Describe the bug\r\n\r\nNot actually a bug, but insufficient input validation.\r\n\r\n### To reproduce\r\n\r\nProvide POST input (e.g. with postman) with content-type 'multipart/form-data' but without _boundary_ parameter.\r\n\r\n### Expected behavior\r\n\r\nShall return 4xx error code. According to rfc7578 boundary is a required parameter.\r\n\r\n### Actual behavior\r\n\r\n_None_ boundary is passed to multipart library and an exception is raised there.\r\n\r\n### Debugging material\r\n\r\n```\r\nTypeError: can't concat NoneType to bytes\r\n File \"starlette/middleware/trustedhost.py\", line 51, in __call__\r\n await self.app(scope, receive, send)\r\n File \"starlette/middleware/sessions.py\", line 75, in __call__\r\n await self.app(scope, receive, send_wrapper)\r\n File \"cpms/middleware.py\", line 55, in __call__\r\n await self.app(scope, receive, send)\r\n File \"starlette/middleware/authentication.py\", line 48, in __call__\r\n await self.app(scope, receive, send)\r\n File \"starlette/exceptions.py\", line 82, in __call__\r\n raise exc from None\r\n File \"starlette/exceptions.py\", line 71, in __call__\r\n await self.app(scope, receive, sender)\r\n File \"starlette/routing.py\", line 582, in __call__\r\n await route.handle(scope, receive, send)\r\n File \"starlette/routing.py\", line 243, in handle\r\n await self.app(scope, receive, send)\r\n File \"starlette/routing.py\", line 54, in app\r\n response = await func(request)\r\n File \"starlette/authentication.py\", line 69, in async_wrapper\r\n return await func(*args, **kwargs)\r\n File \"cpms/views/files.py\", line 90, in decorator\r\n return await endpoint(request)\r\n File \"cpms/views/files.py\", line 133, in upload_files\r\n for f_name, f_value in (await request.form()).items():\r\n File \"starlette/requests.py\", line 240, in form\r\n self._form = await multipart_parser.parse()\r\n File \"starlette/formparsers.py\", line 181, in parse\r\n parser = multipart.MultipartParser(boundary, callbacks)\r\n File \"multipart/multipart.py\", line 1042, in __init__\r\n self.boundary = b'\\r\\n--' + boundary\r\n```\r\n\r\n### Environment\r\n\r\n- OS: OpenBSD 6.7\r\n- Python version: 3.7.9\r\n- Starlette version: 0.14.1\r\n\r\n### Additional context\r\n\r\nThe problem occurs due insufficient checking of the input on line 166 of starlette/formparsers.py\r\n` boundary = params.get(b\"boundary\")`\r\n\r\nIt is better to replace it with:\r\n` boundary = params[b\"boundary\"]`\r\nso exception at early stage will be raised.\r\n\r\nOr make an additional input validation.\r\n```\r\n boundary = params.get(b\"boundary\")\r\n if not boundary:\r\n raise HTTPException(status_code=400)\r\n```\r\n\n", "before_files": [{"content": "import typing\nfrom enum import Enum\nfrom urllib.parse import unquote_plus\n\nfrom starlette.datastructures import FormData, Headers, UploadFile\n\ntry:\n import multipart\n from multipart.multipart import parse_options_header\nexcept ImportError: # pragma: nocover\n parse_options_header = None\n multipart = None\n\n\nclass FormMessage(Enum):\n FIELD_START = 1\n FIELD_NAME = 2\n FIELD_DATA = 3\n FIELD_END = 4\n END = 5\n\n\nclass MultiPartMessage(Enum):\n PART_BEGIN = 1\n PART_DATA = 2\n PART_END = 3\n HEADER_FIELD = 4\n HEADER_VALUE = 5\n HEADER_END = 6\n HEADERS_FINISHED = 7\n END = 8\n\n\ndef _user_safe_decode(src: bytes, codec: str) -> str:\n try:\n return src.decode(codec)\n except (UnicodeDecodeError, LookupError):\n return src.decode(\"latin-1\")\n\n\nclass FormParser:\n def __init__(\n self, headers: Headers, stream: typing.AsyncGenerator[bytes, None]\n ) -> None:\n assert (\n multipart is not None\n ), \"The `python-multipart` library must be installed to use form parsing.\"\n self.headers = headers\n self.stream = stream\n self.messages: typing.List[typing.Tuple[FormMessage, bytes]] = []\n\n def on_field_start(self) -> None:\n message = (FormMessage.FIELD_START, b\"\")\n self.messages.append(message)\n\n def on_field_name(self, data: bytes, start: int, end: int) -> None:\n message = (FormMessage.FIELD_NAME, data[start:end])\n self.messages.append(message)\n\n def on_field_data(self, data: bytes, start: int, end: int) -> None:\n message = (FormMessage.FIELD_DATA, data[start:end])\n self.messages.append(message)\n\n def on_field_end(self) -> None:\n message = (FormMessage.FIELD_END, b\"\")\n self.messages.append(message)\n\n def on_end(self) -> None:\n message = (FormMessage.END, b\"\")\n self.messages.append(message)\n\n async def parse(self) -> FormData:\n # Callbacks dictionary.\n callbacks = {\n \"on_field_start\": self.on_field_start,\n \"on_field_name\": self.on_field_name,\n \"on_field_data\": self.on_field_data,\n \"on_field_end\": self.on_field_end,\n \"on_end\": self.on_end,\n }\n\n # Create the parser.\n parser = multipart.QuerystringParser(callbacks)\n field_name = b\"\"\n field_value = b\"\"\n\n items: typing.List[typing.Tuple[str, typing.Union[str, UploadFile]]] = []\n\n # Feed the parser with data from the request.\n async for chunk in self.stream:\n if chunk:\n parser.write(chunk)\n else:\n parser.finalize()\n messages = list(self.messages)\n self.messages.clear()\n for message_type, message_bytes in messages:\n if message_type == FormMessage.FIELD_START:\n field_name = b\"\"\n field_value = b\"\"\n elif message_type == FormMessage.FIELD_NAME:\n field_name += message_bytes\n elif message_type == FormMessage.FIELD_DATA:\n field_value += message_bytes\n elif message_type == FormMessage.FIELD_END:\n name = unquote_plus(field_name.decode(\"latin-1\"))\n value = unquote_plus(field_value.decode(\"latin-1\"))\n items.append((name, value))\n\n return FormData(items)\n\n\nclass MultiPartParser:\n def __init__(\n self, headers: Headers, stream: typing.AsyncGenerator[bytes, None]\n ) -> None:\n assert (\n multipart is not None\n ), \"The `python-multipart` library must be installed to use form parsing.\"\n self.headers = headers\n self.stream = stream\n self.messages: typing.List[typing.Tuple[MultiPartMessage, bytes]] = []\n\n def on_part_begin(self) -> None:\n message = (MultiPartMessage.PART_BEGIN, b\"\")\n self.messages.append(message)\n\n def on_part_data(self, data: bytes, start: int, end: int) -> None:\n message = (MultiPartMessage.PART_DATA, data[start:end])\n self.messages.append(message)\n\n def on_part_end(self) -> None:\n message = (MultiPartMessage.PART_END, b\"\")\n self.messages.append(message)\n\n def on_header_field(self, data: bytes, start: int, end: int) -> None:\n message = (MultiPartMessage.HEADER_FIELD, data[start:end])\n self.messages.append(message)\n\n def on_header_value(self, data: bytes, start: int, end: int) -> None:\n message = (MultiPartMessage.HEADER_VALUE, data[start:end])\n self.messages.append(message)\n\n def on_header_end(self) -> None:\n message = (MultiPartMessage.HEADER_END, b\"\")\n self.messages.append(message)\n\n def on_headers_finished(self) -> None:\n message = (MultiPartMessage.HEADERS_FINISHED, b\"\")\n self.messages.append(message)\n\n def on_end(self) -> None:\n message = (MultiPartMessage.END, b\"\")\n self.messages.append(message)\n\n async def parse(self) -> FormData:\n # Parse the Content-Type header to get the multipart boundary.\n content_type, params = parse_options_header(self.headers[\"Content-Type\"])\n charset = params.get(b\"charset\", \"utf-8\")\n if type(charset) == bytes:\n charset = charset.decode(\"latin-1\")\n boundary = params.get(b\"boundary\")\n\n # Callbacks dictionary.\n callbacks = {\n \"on_part_begin\": self.on_part_begin,\n \"on_part_data\": self.on_part_data,\n \"on_part_end\": self.on_part_end,\n \"on_header_field\": self.on_header_field,\n \"on_header_value\": self.on_header_value,\n \"on_header_end\": self.on_header_end,\n \"on_headers_finished\": self.on_headers_finished,\n \"on_end\": self.on_end,\n }\n\n # Create the parser.\n parser = multipart.MultipartParser(boundary, callbacks)\n header_field = b\"\"\n header_value = b\"\"\n content_disposition = None\n content_type = b\"\"\n field_name = \"\"\n data = b\"\"\n file: typing.Optional[UploadFile] = None\n\n items: typing.List[typing.Tuple[str, typing.Union[str, UploadFile]]] = []\n item_headers: typing.List[typing.Tuple[bytes, bytes]] = []\n\n # Feed the parser with data from the request.\n async for chunk in self.stream:\n parser.write(chunk)\n messages = list(self.messages)\n self.messages.clear()\n for message_type, message_bytes in messages:\n if message_type == MultiPartMessage.PART_BEGIN:\n content_disposition = None\n content_type = b\"\"\n data = b\"\"\n item_headers = []\n elif message_type == MultiPartMessage.HEADER_FIELD:\n header_field += message_bytes\n elif message_type == MultiPartMessage.HEADER_VALUE:\n header_value += message_bytes\n elif message_type == MultiPartMessage.HEADER_END:\n field = header_field.lower()\n if field == b\"content-disposition\":\n content_disposition = header_value\n elif field == b\"content-type\":\n content_type = header_value\n item_headers.append((field, header_value))\n header_field = b\"\"\n header_value = b\"\"\n elif message_type == MultiPartMessage.HEADERS_FINISHED:\n disposition, options = parse_options_header(content_disposition)\n field_name = _user_safe_decode(options[b\"name\"], charset)\n if b\"filename\" in options:\n filename = _user_safe_decode(options[b\"filename\"], charset)\n file = UploadFile(\n filename=filename,\n content_type=content_type.decode(\"latin-1\"),\n headers=Headers(raw=item_headers),\n )\n else:\n file = None\n elif message_type == MultiPartMessage.PART_DATA:\n if file is None:\n data += message_bytes\n else:\n await file.write(message_bytes)\n elif message_type == MultiPartMessage.PART_END:\n if file is None:\n items.append((field_name, _user_safe_decode(data, charset)))\n else:\n await file.seek(0)\n items.append((field_name, file))\n\n parser.finalize()\n return FormData(items)\n", "path": "starlette/formparsers.py"}], "after_files": [{"content": "import typing\nfrom enum import Enum\nfrom urllib.parse import unquote_plus\n\nfrom starlette.datastructures import FormData, Headers, UploadFile\n\ntry:\n import multipart\n from multipart.multipart import parse_options_header\nexcept ImportError: # pragma: nocover\n parse_options_header = None\n multipart = None\n\n\nclass FormMessage(Enum):\n FIELD_START = 1\n FIELD_NAME = 2\n FIELD_DATA = 3\n FIELD_END = 4\n END = 5\n\n\nclass MultiPartMessage(Enum):\n PART_BEGIN = 1\n PART_DATA = 2\n PART_END = 3\n HEADER_FIELD = 4\n HEADER_VALUE = 5\n HEADER_END = 6\n HEADERS_FINISHED = 7\n END = 8\n\n\ndef _user_safe_decode(src: bytes, codec: str) -> str:\n try:\n return src.decode(codec)\n except (UnicodeDecodeError, LookupError):\n return src.decode(\"latin-1\")\n\n\nclass FormParser:\n def __init__(\n self, headers: Headers, stream: typing.AsyncGenerator[bytes, None]\n ) -> None:\n assert (\n multipart is not None\n ), \"The `python-multipart` library must be installed to use form parsing.\"\n self.headers = headers\n self.stream = stream\n self.messages: typing.List[typing.Tuple[FormMessage, bytes]] = []\n\n def on_field_start(self) -> None:\n message = (FormMessage.FIELD_START, b\"\")\n self.messages.append(message)\n\n def on_field_name(self, data: bytes, start: int, end: int) -> None:\n message = (FormMessage.FIELD_NAME, data[start:end])\n self.messages.append(message)\n\n def on_field_data(self, data: bytes, start: int, end: int) -> None:\n message = (FormMessage.FIELD_DATA, data[start:end])\n self.messages.append(message)\n\n def on_field_end(self) -> None:\n message = (FormMessage.FIELD_END, b\"\")\n self.messages.append(message)\n\n def on_end(self) -> None:\n message = (FormMessage.END, b\"\")\n self.messages.append(message)\n\n async def parse(self) -> FormData:\n # Callbacks dictionary.\n callbacks = {\n \"on_field_start\": self.on_field_start,\n \"on_field_name\": self.on_field_name,\n \"on_field_data\": self.on_field_data,\n \"on_field_end\": self.on_field_end,\n \"on_end\": self.on_end,\n }\n\n # Create the parser.\n parser = multipart.QuerystringParser(callbacks)\n field_name = b\"\"\n field_value = b\"\"\n\n items: typing.List[typing.Tuple[str, typing.Union[str, UploadFile]]] = []\n\n # Feed the parser with data from the request.\n async for chunk in self.stream:\n if chunk:\n parser.write(chunk)\n else:\n parser.finalize()\n messages = list(self.messages)\n self.messages.clear()\n for message_type, message_bytes in messages:\n if message_type == FormMessage.FIELD_START:\n field_name = b\"\"\n field_value = b\"\"\n elif message_type == FormMessage.FIELD_NAME:\n field_name += message_bytes\n elif message_type == FormMessage.FIELD_DATA:\n field_value += message_bytes\n elif message_type == FormMessage.FIELD_END:\n name = unquote_plus(field_name.decode(\"latin-1\"))\n value = unquote_plus(field_value.decode(\"latin-1\"))\n items.append((name, value))\n\n return FormData(items)\n\n\nclass MultiPartParser:\n def __init__(\n self, headers: Headers, stream: typing.AsyncGenerator[bytes, None]\n ) -> None:\n assert (\n multipart is not None\n ), \"The `python-multipart` library must be installed to use form parsing.\"\n self.headers = headers\n self.stream = stream\n self.messages: typing.List[typing.Tuple[MultiPartMessage, bytes]] = []\n\n def on_part_begin(self) -> None:\n message = (MultiPartMessage.PART_BEGIN, b\"\")\n self.messages.append(message)\n\n def on_part_data(self, data: bytes, start: int, end: int) -> None:\n message = (MultiPartMessage.PART_DATA, data[start:end])\n self.messages.append(message)\n\n def on_part_end(self) -> None:\n message = (MultiPartMessage.PART_END, b\"\")\n self.messages.append(message)\n\n def on_header_field(self, data: bytes, start: int, end: int) -> None:\n message = (MultiPartMessage.HEADER_FIELD, data[start:end])\n self.messages.append(message)\n\n def on_header_value(self, data: bytes, start: int, end: int) -> None:\n message = (MultiPartMessage.HEADER_VALUE, data[start:end])\n self.messages.append(message)\n\n def on_header_end(self) -> None:\n message = (MultiPartMessage.HEADER_END, b\"\")\n self.messages.append(message)\n\n def on_headers_finished(self) -> None:\n message = (MultiPartMessage.HEADERS_FINISHED, b\"\")\n self.messages.append(message)\n\n def on_end(self) -> None:\n message = (MultiPartMessage.END, b\"\")\n self.messages.append(message)\n\n async def parse(self) -> FormData:\n # Parse the Content-Type header to get the multipart boundary.\n content_type, params = parse_options_header(self.headers[\"Content-Type\"])\n charset = params.get(b\"charset\", \"utf-8\")\n if type(charset) == bytes:\n charset = charset.decode(\"latin-1\")\n boundary = params[b\"boundary\"]\n\n # Callbacks dictionary.\n callbacks = {\n \"on_part_begin\": self.on_part_begin,\n \"on_part_data\": self.on_part_data,\n \"on_part_end\": self.on_part_end,\n \"on_header_field\": self.on_header_field,\n \"on_header_value\": self.on_header_value,\n \"on_header_end\": self.on_header_end,\n \"on_headers_finished\": self.on_headers_finished,\n \"on_end\": self.on_end,\n }\n\n # Create the parser.\n parser = multipart.MultipartParser(boundary, callbacks)\n header_field = b\"\"\n header_value = b\"\"\n content_disposition = None\n content_type = b\"\"\n field_name = \"\"\n data = b\"\"\n file: typing.Optional[UploadFile] = None\n\n items: typing.List[typing.Tuple[str, typing.Union[str, UploadFile]]] = []\n item_headers: typing.List[typing.Tuple[bytes, bytes]] = []\n\n # Feed the parser with data from the request.\n async for chunk in self.stream:\n parser.write(chunk)\n messages = list(self.messages)\n self.messages.clear()\n for message_type, message_bytes in messages:\n if message_type == MultiPartMessage.PART_BEGIN:\n content_disposition = None\n content_type = b\"\"\n data = b\"\"\n item_headers = []\n elif message_type == MultiPartMessage.HEADER_FIELD:\n header_field += message_bytes\n elif message_type == MultiPartMessage.HEADER_VALUE:\n header_value += message_bytes\n elif message_type == MultiPartMessage.HEADER_END:\n field = header_field.lower()\n if field == b\"content-disposition\":\n content_disposition = header_value\n elif field == b\"content-type\":\n content_type = header_value\n item_headers.append((field, header_value))\n header_field = b\"\"\n header_value = b\"\"\n elif message_type == MultiPartMessage.HEADERS_FINISHED:\n disposition, options = parse_options_header(content_disposition)\n field_name = _user_safe_decode(options[b\"name\"], charset)\n if b\"filename\" in options:\n filename = _user_safe_decode(options[b\"filename\"], charset)\n file = UploadFile(\n filename=filename,\n content_type=content_type.decode(\"latin-1\"),\n headers=Headers(raw=item_headers),\n )\n else:\n file = None\n elif message_type == MultiPartMessage.PART_DATA:\n if file is None:\n data += message_bytes\n else:\n await file.write(message_bytes)\n elif message_type == MultiPartMessage.PART_END:\n if file is None:\n items.append((field_name, _user_safe_decode(data, charset)))\n else:\n await file.seek(0)\n items.append((field_name, file))\n\n parser.finalize()\n return FormData(items)\n", "path": "starlette/formparsers.py"}]}
| 3,427 | 109 |
gh_patches_debug_36129
|
rasdani/github-patches
|
git_diff
|
sublimelsp__LSP-1326
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
LSP Document Symbols downside compared to native one
**Describe the bug**
I've been running with `cmd+r` overridden to use Document Symbols from LSP. The functionality is better than native but it has one downside compared to native behavior. Namely, with native, I can trigger the shortcut and start typing immediately. The quick panel appears immediately (synchronously or at least with almost no latency). When using LSP's document symbol, the quick panel only opens after we receive server response so, being used to just start typing my query right away, my query ends up being inserted into the active view instead.
Ideally, the quick panel would open right away, even before the response comes, but I guess the API might not let us do that.
Alternatively, we could potentially work around that somehow by capturing user input when the command is triggered and before the quick panel is shown.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `plugin/symbols.py`
Content:
```
1 from .core.protocol import Request, Range
2 from .core.registry import LspTextCommand
3 from .core.typing import Any, List, Optional, Tuple, Dict, Generator
4 from .core.views import location_to_encoded_filename
5 from .core.views import range_to_region
6 from .core.views import SYMBOL_KINDS
7 from .core.views import text_document_identifier
8 from contextlib import contextmanager
9 import os
10 import sublime
11 import sublime_plugin
12
13
14 def unpack_lsp_kind(kind: int) -> Tuple[int, str, str, str]:
15 if 1 <= kind <= len(SYMBOL_KINDS):
16 return SYMBOL_KINDS[kind - 1]
17 return sublime.KIND_ID_AMBIGUOUS, "?", "???", "comment"
18
19
20 def format_symbol_kind(kind: int) -> str:
21 if 1 <= kind <= len(SYMBOL_KINDS):
22 return SYMBOL_KINDS[kind - 1][2]
23 return str(kind)
24
25
26 def get_symbol_scope_from_lsp_kind(kind: int) -> str:
27 if 1 <= kind <= len(SYMBOL_KINDS):
28 return SYMBOL_KINDS[kind - 1][3]
29 return 'comment'
30
31
32 @contextmanager
33 def _additional_name(names: List[str], name: str) -> Generator[None, None, None]:
34 names.append(name)
35 yield
36 names.pop(-1)
37
38
39 class LspSelectionClearCommand(sublime_plugin.TextCommand):
40 """
41 Selections may not be modified outside the run method of a text command. Thus, to allow modification in an async
42 context we need to have dedicated commands for this.
43
44 https://github.com/sublimehq/sublime_text/issues/485#issuecomment-337480388
45 """
46
47 def run(self, _: sublime.Edit) -> None:
48 self.view.sel().clear()
49
50
51 class LspSelectionAddCommand(sublime_plugin.TextCommand):
52
53 def run(self, _: sublime.Edit, regions: List[Tuple[int, int]]) -> None:
54 for region in regions:
55 self.view.sel().add(sublime.Region(*region))
56
57
58 class LspSelectionSetCommand(sublime_plugin.TextCommand):
59
60 def run(self, _: sublime.Edit, regions: List[Tuple[int, int]]) -> None:
61 self.view.sel().clear()
62 for region in regions:
63 self.view.sel().add(sublime.Region(*region))
64
65
66 class LspDocumentSymbolsCommand(LspTextCommand):
67
68 capability = 'documentSymbolProvider'
69 REGIONS_KEY = 'lsp_document_symbols'
70
71 def __init__(self, view: sublime.View) -> None:
72 super().__init__(view)
73 self.old_regions = [] # type: List[sublime.Region]
74 self.regions = [] # type: List[Tuple[sublime.Region, Optional[sublime.Region], str]]
75 self.is_first_selection = False
76
77 def run(self, edit: sublime.Edit) -> None:
78 session = self.best_session(self.capability)
79 if session:
80 session.send_request(
81 Request.documentSymbols({"textDocument": text_document_identifier(self.view)}), self.handle_response)
82
83 def handle_response(self, response: Any) -> None:
84 window = self.view.window()
85 if window and isinstance(response, list) and len(response) > 0:
86 self.old_regions = [sublime.Region(r.a, r.b) for r in self.view.sel()]
87 self.is_first_selection = True
88 window.show_quick_panel(
89 self.process_symbols(response),
90 self.on_symbol_selected,
91 sublime.KEEP_OPEN_ON_FOCUS_LOST,
92 0,
93 self.on_highlighted)
94 self.view.run_command("lsp_selection_clear")
95
96 def region(self, index: int) -> sublime.Region:
97 return self.regions[index][0]
98
99 def selection_region(self, index: int) -> Optional[sublime.Region]:
100 return self.regions[index][1]
101
102 def scope(self, index: int) -> str:
103 return self.regions[index][2]
104
105 def on_symbol_selected(self, index: int) -> None:
106 if index == -1:
107 if len(self.old_regions) > 0:
108 self.view.run_command("lsp_selection_add", {"regions": [(r.a, r.b) for r in self.old_regions]})
109 self.view.show_at_center(self.old_regions[0].begin())
110 else:
111 region = self.selection_region(index) or self.region(index)
112 self.view.run_command("lsp_selection_add", {"regions": [(region.a, region.a)]})
113 self.view.show_at_center(region.a)
114 self.view.erase_regions(self.REGIONS_KEY)
115 self.old_regions.clear()
116 self.regions.clear()
117
118 def on_highlighted(self, index: int) -> None:
119 if self.is_first_selection:
120 self.is_first_selection = False
121 return
122 region = self.region(index)
123 self.view.show_at_center(region.a)
124 self.view.add_regions(self.REGIONS_KEY, [region], self.scope(index), '', sublime.DRAW_NO_FILL)
125
126 def process_symbols(self, items: List[Dict[str, Any]]) -> List[sublime.QuickPanelItem]:
127 self.regions.clear()
128 panel_items = []
129 if 'selectionRange' in items[0]:
130 panel_items = self.process_document_symbols(items)
131 else:
132 panel_items = self.process_symbol_informations(items)
133 # Sort both lists in sync according to the range's begin point.
134 sorted_results = zip(*sorted(zip(self.regions, panel_items), key=lambda item: item[0][0].begin()))
135 sorted_regions, sorted_panel_items = sorted_results
136 self.regions = list(sorted_regions)
137 return list(sorted_panel_items)
138
139 def process_document_symbols(self, items: List[Dict[str, Any]]) -> List[sublime.QuickPanelItem]:
140 quick_panel_items = [] # type: List[sublime.QuickPanelItem]
141 names = [] # type: List[str]
142 for item in items:
143 self.process_document_symbol_recursive(quick_panel_items, item, names)
144 return quick_panel_items
145
146 def process_document_symbol_recursive(self, quick_panel_items: List[sublime.QuickPanelItem], item: Dict[str, Any],
147 names: List[str]) -> None:
148 lsp_kind = item["kind"]
149 self.regions.append((range_to_region(Range.from_lsp(item['range']), self.view),
150 range_to_region(Range.from_lsp(item['selectionRange']), self.view),
151 get_symbol_scope_from_lsp_kind(lsp_kind)))
152 name = item['name']
153 with _additional_name(names, name):
154 st_kind, st_icon, st_display_type, _ = unpack_lsp_kind(lsp_kind)
155 formatted_names = " > ".join(names)
156 st_details = item.get("detail") or ""
157 if st_details:
158 st_details = "{} | {}".format(st_details, formatted_names)
159 else:
160 st_details = formatted_names
161 quick_panel_items.append(
162 sublime.QuickPanelItem(
163 trigger=name,
164 details=st_details,
165 annotation=st_display_type,
166 kind=(st_kind, st_icon, st_display_type)))
167 children = item.get('children') or []
168 for child in children:
169 self.process_document_symbol_recursive(quick_panel_items, child, names)
170
171 def process_symbol_informations(self, items: List[Dict[str, Any]]) -> List[sublime.QuickPanelItem]:
172 quick_panel_items = [] # type: List[sublime.QuickPanelItem]
173 for item in items:
174 lsp_kind = item['kind']
175 self.regions.append((range_to_region(Range.from_lsp(item['location']['range']), self.view),
176 None, get_symbol_scope_from_lsp_kind(lsp_kind)))
177 container = item.get("containerName")
178 st_kind, st_icon, st_display_type, _ = unpack_lsp_kind(lsp_kind)
179 quick_panel_items.append(
180 sublime.QuickPanelItem(
181 trigger=item["name"],
182 details=container or "",
183 annotation=st_display_type,
184 kind=(st_kind, st_icon, st_display_type)))
185 return quick_panel_items
186
187
188 class SymbolQueryInput(sublime_plugin.TextInputHandler):
189
190 def validate(self, txt: str) -> bool:
191 return txt != ""
192
193 def placeholder(self) -> str:
194 return "Symbol"
195
196
197 class LspWorkspaceSymbolsCommand(LspTextCommand):
198
199 capability = 'workspaceSymbolProvider'
200
201 def input(self, _args: Any) -> sublime_plugin.TextInputHandler:
202 return SymbolQueryInput()
203
204 def run(self, edit: sublime.Edit, symbol_query_input: str = "") -> None:
205 if symbol_query_input:
206 session = self.best_session(self.capability)
207 if session:
208 self.view.set_status("lsp_workspace_symbols", "Searching for '{}'...".format(symbol_query_input))
209 request = Request.workspaceSymbol({"query": symbol_query_input})
210 session.send_request(request, lambda r: self._handle_response(
211 symbol_query_input, r), self._handle_error)
212
213 def _format(self, s: Dict[str, Any]) -> str:
214 file_name = os.path.basename(s['location']['uri'])
215 symbol_kind = format_symbol_kind(s["kind"])
216 name = "{} ({}) - {} -- {}".format(s['name'], symbol_kind, s.get('containerName', ""), file_name)
217 return name
218
219 def _open_file(self, symbols: List[Dict[str, Any]], index: int) -> None:
220 if index != -1:
221 symbol = symbols[index]
222 window = self.view.window()
223 if window:
224 window.open_file(location_to_encoded_filename(symbol['location']), sublime.ENCODED_POSITION)
225
226 def _handle_response(self, query: str, response: Optional[List[Dict[str, Any]]]) -> None:
227 self.view.erase_status("lsp_workspace_symbols")
228 if response:
229 matches = response
230 window = self.view.window()
231 if window:
232 window.show_quick_panel(list(map(self._format, matches)), lambda i: self._open_file(matches, i))
233 else:
234 sublime.message_dialog("No matches found for query string: '{}'".format(query))
235
236 def _handle_error(self, error: Dict[str, Any]) -> None:
237 self.view.erase_status("lsp_workspace_symbols")
238 reason = error.get("message", "none provided by server :(")
239 msg = "command 'workspace/symbol' failed. Reason: {}".format(reason)
240 sublime.error_message(msg)
241
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/plugin/symbols.py b/plugin/symbols.py
--- a/plugin/symbols.py
+++ b/plugin/symbols.py
@@ -1,5 +1,6 @@
from .core.protocol import Request, Range
from .core.registry import LspTextCommand
+from .core.rpc import print_to_status_bar
from .core.typing import Any, List, Optional, Tuple, Dict, Generator
from .core.views import location_to_encoded_filename
from .core.views import range_to_region
@@ -11,6 +12,9 @@
import sublime_plugin
+SUPPRESS_INPUT_SETTING_KEY = 'lsp_suppress_input'
+
+
def unpack_lsp_kind(kind: int) -> Tuple[int, str, str, str]:
if 1 <= kind <= len(SYMBOL_KINDS):
return SYMBOL_KINDS[kind - 1]
@@ -75,12 +79,16 @@
self.is_first_selection = False
def run(self, edit: sublime.Edit) -> None:
+ self.view.settings().set(SUPPRESS_INPUT_SETTING_KEY, True)
session = self.best_session(self.capability)
if session:
session.send_request(
- Request.documentSymbols({"textDocument": text_document_identifier(self.view)}), self.handle_response)
+ Request.documentSymbols({"textDocument": text_document_identifier(self.view)}),
+ lambda response: sublime.set_timeout(lambda: self.handle_response(response)),
+ lambda error: sublime.set_timeout(lambda: self.handle_response_error(error)))
def handle_response(self, response: Any) -> None:
+ self.view.settings().erase(SUPPRESS_INPUT_SETTING_KEY)
window = self.view.window()
if window and isinstance(response, list) and len(response) > 0:
self.old_regions = [sublime.Region(r.a, r.b) for r in self.view.sel()]
@@ -93,6 +101,10 @@
self.on_highlighted)
self.view.run_command("lsp_selection_clear")
+ def handle_response_error(self, error: Any) -> None:
+ self.view.settings().erase(SUPPRESS_INPUT_SETTING_KEY)
+ print_to_status_bar(error)
+
def region(self, index: int) -> sublime.Region:
return self.regions[index][0]
|
{"golden_diff": "diff --git a/plugin/symbols.py b/plugin/symbols.py\n--- a/plugin/symbols.py\n+++ b/plugin/symbols.py\n@@ -1,5 +1,6 @@\n from .core.protocol import Request, Range\n from .core.registry import LspTextCommand\n+from .core.rpc import print_to_status_bar\n from .core.typing import Any, List, Optional, Tuple, Dict, Generator\n from .core.views import location_to_encoded_filename\n from .core.views import range_to_region\n@@ -11,6 +12,9 @@\n import sublime_plugin\n \n \n+SUPPRESS_INPUT_SETTING_KEY = 'lsp_suppress_input'\n+\n+\n def unpack_lsp_kind(kind: int) -> Tuple[int, str, str, str]:\n if 1 <= kind <= len(SYMBOL_KINDS):\n return SYMBOL_KINDS[kind - 1]\n@@ -75,12 +79,16 @@\n self.is_first_selection = False\n \n def run(self, edit: sublime.Edit) -> None:\n+ self.view.settings().set(SUPPRESS_INPUT_SETTING_KEY, True)\n session = self.best_session(self.capability)\n if session:\n session.send_request(\n- Request.documentSymbols({\"textDocument\": text_document_identifier(self.view)}), self.handle_response)\n+ Request.documentSymbols({\"textDocument\": text_document_identifier(self.view)}),\n+ lambda response: sublime.set_timeout(lambda: self.handle_response(response)),\n+ lambda error: sublime.set_timeout(lambda: self.handle_response_error(error)))\n \n def handle_response(self, response: Any) -> None:\n+ self.view.settings().erase(SUPPRESS_INPUT_SETTING_KEY)\n window = self.view.window()\n if window and isinstance(response, list) and len(response) > 0:\n self.old_regions = [sublime.Region(r.a, r.b) for r in self.view.sel()]\n@@ -93,6 +101,10 @@\n self.on_highlighted)\n self.view.run_command(\"lsp_selection_clear\")\n \n+ def handle_response_error(self, error: Any) -> None:\n+ self.view.settings().erase(SUPPRESS_INPUT_SETTING_KEY)\n+ print_to_status_bar(error)\n+\n def region(self, index: int) -> sublime.Region:\n return self.regions[index][0]\n", "issue": "LSP Document Symbols downside compared to native one\n**Describe the bug**\r\nI've been running with `cmd+r` overridden to use Document Symbols from LSP. The functionality is better than native but it has one downside compared to native behavior. Namely, with native, I can trigger the shortcut and start typing immediately. The quick panel appears immediately (synchronously or at least with almost no latency). When using LSP's document symbol, the quick panel only opens after we receive server response so, being used to just start typing my query right away, my query ends up being inserted into the active view instead.\r\n\r\nIdeally, the quick panel would open right away, even before the response comes, but I guess the API might not let us do that.\r\n\r\nAlternatively, we could potentially work around that somehow by capturing user input when the command is triggered and before the quick panel is shown.\n", "before_files": [{"content": "from .core.protocol import Request, Range\nfrom .core.registry import LspTextCommand\nfrom .core.typing import Any, List, Optional, Tuple, Dict, Generator\nfrom .core.views import location_to_encoded_filename\nfrom .core.views import range_to_region\nfrom .core.views import SYMBOL_KINDS\nfrom .core.views import text_document_identifier\nfrom contextlib import contextmanager\nimport os\nimport sublime\nimport sublime_plugin\n\n\ndef unpack_lsp_kind(kind: int) -> Tuple[int, str, str, str]:\n if 1 <= kind <= len(SYMBOL_KINDS):\n return SYMBOL_KINDS[kind - 1]\n return sublime.KIND_ID_AMBIGUOUS, \"?\", \"???\", \"comment\"\n\n\ndef format_symbol_kind(kind: int) -> str:\n if 1 <= kind <= len(SYMBOL_KINDS):\n return SYMBOL_KINDS[kind - 1][2]\n return str(kind)\n\n\ndef get_symbol_scope_from_lsp_kind(kind: int) -> str:\n if 1 <= kind <= len(SYMBOL_KINDS):\n return SYMBOL_KINDS[kind - 1][3]\n return 'comment'\n\n\n@contextmanager\ndef _additional_name(names: List[str], name: str) -> Generator[None, None, None]:\n names.append(name)\n yield\n names.pop(-1)\n\n\nclass LspSelectionClearCommand(sublime_plugin.TextCommand):\n \"\"\"\n Selections may not be modified outside the run method of a text command. Thus, to allow modification in an async\n context we need to have dedicated commands for this.\n\n https://github.com/sublimehq/sublime_text/issues/485#issuecomment-337480388\n \"\"\"\n\n def run(self, _: sublime.Edit) -> None:\n self.view.sel().clear()\n\n\nclass LspSelectionAddCommand(sublime_plugin.TextCommand):\n\n def run(self, _: sublime.Edit, regions: List[Tuple[int, int]]) -> None:\n for region in regions:\n self.view.sel().add(sublime.Region(*region))\n\n\nclass LspSelectionSetCommand(sublime_plugin.TextCommand):\n\n def run(self, _: sublime.Edit, regions: List[Tuple[int, int]]) -> None:\n self.view.sel().clear()\n for region in regions:\n self.view.sel().add(sublime.Region(*region))\n\n\nclass LspDocumentSymbolsCommand(LspTextCommand):\n\n capability = 'documentSymbolProvider'\n REGIONS_KEY = 'lsp_document_symbols'\n\n def __init__(self, view: sublime.View) -> None:\n super().__init__(view)\n self.old_regions = [] # type: List[sublime.Region]\n self.regions = [] # type: List[Tuple[sublime.Region, Optional[sublime.Region], str]]\n self.is_first_selection = False\n\n def run(self, edit: sublime.Edit) -> None:\n session = self.best_session(self.capability)\n if session:\n session.send_request(\n Request.documentSymbols({\"textDocument\": text_document_identifier(self.view)}), self.handle_response)\n\n def handle_response(self, response: Any) -> None:\n window = self.view.window()\n if window and isinstance(response, list) and len(response) > 0:\n self.old_regions = [sublime.Region(r.a, r.b) for r in self.view.sel()]\n self.is_first_selection = True\n window.show_quick_panel(\n self.process_symbols(response),\n self.on_symbol_selected,\n sublime.KEEP_OPEN_ON_FOCUS_LOST,\n 0,\n self.on_highlighted)\n self.view.run_command(\"lsp_selection_clear\")\n\n def region(self, index: int) -> sublime.Region:\n return self.regions[index][0]\n\n def selection_region(self, index: int) -> Optional[sublime.Region]:\n return self.regions[index][1]\n\n def scope(self, index: int) -> str:\n return self.regions[index][2]\n\n def on_symbol_selected(self, index: int) -> None:\n if index == -1:\n if len(self.old_regions) > 0:\n self.view.run_command(\"lsp_selection_add\", {\"regions\": [(r.a, r.b) for r in self.old_regions]})\n self.view.show_at_center(self.old_regions[0].begin())\n else:\n region = self.selection_region(index) or self.region(index)\n self.view.run_command(\"lsp_selection_add\", {\"regions\": [(region.a, region.a)]})\n self.view.show_at_center(region.a)\n self.view.erase_regions(self.REGIONS_KEY)\n self.old_regions.clear()\n self.regions.clear()\n\n def on_highlighted(self, index: int) -> None:\n if self.is_first_selection:\n self.is_first_selection = False\n return\n region = self.region(index)\n self.view.show_at_center(region.a)\n self.view.add_regions(self.REGIONS_KEY, [region], self.scope(index), '', sublime.DRAW_NO_FILL)\n\n def process_symbols(self, items: List[Dict[str, Any]]) -> List[sublime.QuickPanelItem]:\n self.regions.clear()\n panel_items = []\n if 'selectionRange' in items[0]:\n panel_items = self.process_document_symbols(items)\n else:\n panel_items = self.process_symbol_informations(items)\n # Sort both lists in sync according to the range's begin point.\n sorted_results = zip(*sorted(zip(self.regions, panel_items), key=lambda item: item[0][0].begin()))\n sorted_regions, sorted_panel_items = sorted_results\n self.regions = list(sorted_regions)\n return list(sorted_panel_items)\n\n def process_document_symbols(self, items: List[Dict[str, Any]]) -> List[sublime.QuickPanelItem]:\n quick_panel_items = [] # type: List[sublime.QuickPanelItem]\n names = [] # type: List[str]\n for item in items:\n self.process_document_symbol_recursive(quick_panel_items, item, names)\n return quick_panel_items\n\n def process_document_symbol_recursive(self, quick_panel_items: List[sublime.QuickPanelItem], item: Dict[str, Any],\n names: List[str]) -> None:\n lsp_kind = item[\"kind\"]\n self.regions.append((range_to_region(Range.from_lsp(item['range']), self.view),\n range_to_region(Range.from_lsp(item['selectionRange']), self.view),\n get_symbol_scope_from_lsp_kind(lsp_kind)))\n name = item['name']\n with _additional_name(names, name):\n st_kind, st_icon, st_display_type, _ = unpack_lsp_kind(lsp_kind)\n formatted_names = \" > \".join(names)\n st_details = item.get(\"detail\") or \"\"\n if st_details:\n st_details = \"{} | {}\".format(st_details, formatted_names)\n else:\n st_details = formatted_names\n quick_panel_items.append(\n sublime.QuickPanelItem(\n trigger=name,\n details=st_details,\n annotation=st_display_type,\n kind=(st_kind, st_icon, st_display_type)))\n children = item.get('children') or []\n for child in children:\n self.process_document_symbol_recursive(quick_panel_items, child, names)\n\n def process_symbol_informations(self, items: List[Dict[str, Any]]) -> List[sublime.QuickPanelItem]:\n quick_panel_items = [] # type: List[sublime.QuickPanelItem]\n for item in items:\n lsp_kind = item['kind']\n self.regions.append((range_to_region(Range.from_lsp(item['location']['range']), self.view),\n None, get_symbol_scope_from_lsp_kind(lsp_kind)))\n container = item.get(\"containerName\")\n st_kind, st_icon, st_display_type, _ = unpack_lsp_kind(lsp_kind)\n quick_panel_items.append(\n sublime.QuickPanelItem(\n trigger=item[\"name\"],\n details=container or \"\",\n annotation=st_display_type,\n kind=(st_kind, st_icon, st_display_type)))\n return quick_panel_items\n\n\nclass SymbolQueryInput(sublime_plugin.TextInputHandler):\n\n def validate(self, txt: str) -> bool:\n return txt != \"\"\n\n def placeholder(self) -> str:\n return \"Symbol\"\n\n\nclass LspWorkspaceSymbolsCommand(LspTextCommand):\n\n capability = 'workspaceSymbolProvider'\n\n def input(self, _args: Any) -> sublime_plugin.TextInputHandler:\n return SymbolQueryInput()\n\n def run(self, edit: sublime.Edit, symbol_query_input: str = \"\") -> None:\n if symbol_query_input:\n session = self.best_session(self.capability)\n if session:\n self.view.set_status(\"lsp_workspace_symbols\", \"Searching for '{}'...\".format(symbol_query_input))\n request = Request.workspaceSymbol({\"query\": symbol_query_input})\n session.send_request(request, lambda r: self._handle_response(\n symbol_query_input, r), self._handle_error)\n\n def _format(self, s: Dict[str, Any]) -> str:\n file_name = os.path.basename(s['location']['uri'])\n symbol_kind = format_symbol_kind(s[\"kind\"])\n name = \"{} ({}) - {} -- {}\".format(s['name'], symbol_kind, s.get('containerName', \"\"), file_name)\n return name\n\n def _open_file(self, symbols: List[Dict[str, Any]], index: int) -> None:\n if index != -1:\n symbol = symbols[index]\n window = self.view.window()\n if window:\n window.open_file(location_to_encoded_filename(symbol['location']), sublime.ENCODED_POSITION)\n\n def _handle_response(self, query: str, response: Optional[List[Dict[str, Any]]]) -> None:\n self.view.erase_status(\"lsp_workspace_symbols\")\n if response:\n matches = response\n window = self.view.window()\n if window:\n window.show_quick_panel(list(map(self._format, matches)), lambda i: self._open_file(matches, i))\n else:\n sublime.message_dialog(\"No matches found for query string: '{}'\".format(query))\n\n def _handle_error(self, error: Dict[str, Any]) -> None:\n self.view.erase_status(\"lsp_workspace_symbols\")\n reason = error.get(\"message\", \"none provided by server :(\")\n msg = \"command 'workspace/symbol' failed. Reason: {}\".format(reason)\n sublime.error_message(msg)\n", "path": "plugin/symbols.py"}], "after_files": [{"content": "from .core.protocol import Request, Range\nfrom .core.registry import LspTextCommand\nfrom .core.rpc import print_to_status_bar\nfrom .core.typing import Any, List, Optional, Tuple, Dict, Generator\nfrom .core.views import location_to_encoded_filename\nfrom .core.views import range_to_region\nfrom .core.views import SYMBOL_KINDS\nfrom .core.views import text_document_identifier\nfrom contextlib import contextmanager\nimport os\nimport sublime\nimport sublime_plugin\n\n\nSUPPRESS_INPUT_SETTING_KEY = 'lsp_suppress_input'\n\n\ndef unpack_lsp_kind(kind: int) -> Tuple[int, str, str, str]:\n if 1 <= kind <= len(SYMBOL_KINDS):\n return SYMBOL_KINDS[kind - 1]\n return sublime.KIND_ID_AMBIGUOUS, \"?\", \"???\", \"comment\"\n\n\ndef format_symbol_kind(kind: int) -> str:\n if 1 <= kind <= len(SYMBOL_KINDS):\n return SYMBOL_KINDS[kind - 1][2]\n return str(kind)\n\n\ndef get_symbol_scope_from_lsp_kind(kind: int) -> str:\n if 1 <= kind <= len(SYMBOL_KINDS):\n return SYMBOL_KINDS[kind - 1][3]\n return 'comment'\n\n\n@contextmanager\ndef _additional_name(names: List[str], name: str) -> Generator[None, None, None]:\n names.append(name)\n yield\n names.pop(-1)\n\n\nclass LspSelectionClearCommand(sublime_plugin.TextCommand):\n \"\"\"\n Selections may not be modified outside the run method of a text command. Thus, to allow modification in an async\n context we need to have dedicated commands for this.\n\n https://github.com/sublimehq/sublime_text/issues/485#issuecomment-337480388\n \"\"\"\n\n def run(self, _: sublime.Edit) -> None:\n self.view.sel().clear()\n\n\nclass LspSelectionAddCommand(sublime_plugin.TextCommand):\n\n def run(self, _: sublime.Edit, regions: List[Tuple[int, int]]) -> None:\n for region in regions:\n self.view.sel().add(sublime.Region(*region))\n\n\nclass LspSelectionSetCommand(sublime_plugin.TextCommand):\n\n def run(self, _: sublime.Edit, regions: List[Tuple[int, int]]) -> None:\n self.view.sel().clear()\n for region in regions:\n self.view.sel().add(sublime.Region(*region))\n\n\nclass LspDocumentSymbolsCommand(LspTextCommand):\n\n capability = 'documentSymbolProvider'\n REGIONS_KEY = 'lsp_document_symbols'\n\n def __init__(self, view: sublime.View) -> None:\n super().__init__(view)\n self.old_regions = [] # type: List[sublime.Region]\n self.regions = [] # type: List[Tuple[sublime.Region, Optional[sublime.Region], str]]\n self.is_first_selection = False\n\n def run(self, edit: sublime.Edit) -> None:\n self.view.settings().set(SUPPRESS_INPUT_SETTING_KEY, True)\n session = self.best_session(self.capability)\n if session:\n session.send_request(\n Request.documentSymbols({\"textDocument\": text_document_identifier(self.view)}),\n lambda response: sublime.set_timeout(lambda: self.handle_response(response)),\n lambda error: sublime.set_timeout(lambda: self.handle_response_error(error)))\n\n def handle_response(self, response: Any) -> None:\n self.view.settings().erase(SUPPRESS_INPUT_SETTING_KEY)\n window = self.view.window()\n if window and isinstance(response, list) and len(response) > 0:\n self.old_regions = [sublime.Region(r.a, r.b) for r in self.view.sel()]\n self.is_first_selection = True\n window.show_quick_panel(\n self.process_symbols(response),\n self.on_symbol_selected,\n sublime.KEEP_OPEN_ON_FOCUS_LOST,\n 0,\n self.on_highlighted)\n self.view.run_command(\"lsp_selection_clear\")\n\n def handle_response_error(self, error: Any) -> None:\n self.view.settings().erase(SUPPRESS_INPUT_SETTING_KEY)\n print_to_status_bar(error)\n\n def region(self, index: int) -> sublime.Region:\n return self.regions[index][0]\n\n def selection_region(self, index: int) -> Optional[sublime.Region]:\n return self.regions[index][1]\n\n def scope(self, index: int) -> str:\n return self.regions[index][2]\n\n def on_symbol_selected(self, index: int) -> None:\n if index == -1:\n if len(self.old_regions) > 0:\n self.view.run_command(\"lsp_selection_add\", {\"regions\": [(r.a, r.b) for r in self.old_regions]})\n self.view.show_at_center(self.old_regions[0].begin())\n else:\n region = self.selection_region(index) or self.region(index)\n self.view.run_command(\"lsp_selection_add\", {\"regions\": [(region.a, region.a)]})\n self.view.show_at_center(region.a)\n self.view.erase_regions(self.REGIONS_KEY)\n self.old_regions.clear()\n self.regions.clear()\n\n def on_highlighted(self, index: int) -> None:\n if self.is_first_selection:\n self.is_first_selection = False\n return\n region = self.region(index)\n self.view.show_at_center(region.a)\n self.view.add_regions(self.REGIONS_KEY, [region], self.scope(index), '', sublime.DRAW_NO_FILL)\n\n def process_symbols(self, items: List[Dict[str, Any]]) -> List[sublime.QuickPanelItem]:\n self.regions.clear()\n panel_items = []\n if 'selectionRange' in items[0]:\n panel_items = self.process_document_symbols(items)\n else:\n panel_items = self.process_symbol_informations(items)\n # Sort both lists in sync according to the range's begin point.\n sorted_results = zip(*sorted(zip(self.regions, panel_items), key=lambda item: item[0][0].begin()))\n sorted_regions, sorted_panel_items = sorted_results\n self.regions = list(sorted_regions)\n return list(sorted_panel_items)\n\n def process_document_symbols(self, items: List[Dict[str, Any]]) -> List[sublime.QuickPanelItem]:\n quick_panel_items = [] # type: List[sublime.QuickPanelItem]\n names = [] # type: List[str]\n for item in items:\n self.process_document_symbol_recursive(quick_panel_items, item, names)\n return quick_panel_items\n\n def process_document_symbol_recursive(self, quick_panel_items: List[sublime.QuickPanelItem], item: Dict[str, Any],\n names: List[str]) -> None:\n lsp_kind = item[\"kind\"]\n self.regions.append((range_to_region(Range.from_lsp(item['range']), self.view),\n range_to_region(Range.from_lsp(item['selectionRange']), self.view),\n get_symbol_scope_from_lsp_kind(lsp_kind)))\n name = item['name']\n with _additional_name(names, name):\n st_kind, st_icon, st_display_type, _ = unpack_lsp_kind(lsp_kind)\n formatted_names = \" > \".join(names)\n st_details = item.get(\"detail\") or \"\"\n if st_details:\n st_details = \"{} | {}\".format(st_details, formatted_names)\n else:\n st_details = formatted_names\n quick_panel_items.append(\n sublime.QuickPanelItem(\n trigger=name,\n details=st_details,\n annotation=st_display_type,\n kind=(st_kind, st_icon, st_display_type)))\n children = item.get('children') or []\n for child in children:\n self.process_document_symbol_recursive(quick_panel_items, child, names)\n\n def process_symbol_informations(self, items: List[Dict[str, Any]]) -> List[sublime.QuickPanelItem]:\n quick_panel_items = [] # type: List[sublime.QuickPanelItem]\n for item in items:\n lsp_kind = item['kind']\n self.regions.append((range_to_region(Range.from_lsp(item['location']['range']), self.view),\n None, get_symbol_scope_from_lsp_kind(lsp_kind)))\n container = item.get(\"containerName\")\n st_kind, st_icon, st_display_type, _ = unpack_lsp_kind(lsp_kind)\n quick_panel_items.append(\n sublime.QuickPanelItem(\n trigger=item[\"name\"],\n details=container or \"\",\n annotation=st_display_type,\n kind=(st_kind, st_icon, st_display_type)))\n return quick_panel_items\n\n\nclass SymbolQueryInput(sublime_plugin.TextInputHandler):\n\n def validate(self, txt: str) -> bool:\n return txt != \"\"\n\n def placeholder(self) -> str:\n return \"Symbol\"\n\n\nclass LspWorkspaceSymbolsCommand(LspTextCommand):\n\n capability = 'workspaceSymbolProvider'\n\n def input(self, _args: Any) -> sublime_plugin.TextInputHandler:\n return SymbolQueryInput()\n\n def run(self, edit: sublime.Edit, symbol_query_input: str = \"\") -> None:\n if symbol_query_input:\n session = self.best_session(self.capability)\n if session:\n self.view.set_status(\"lsp_workspace_symbols\", \"Searching for '{}'...\".format(symbol_query_input))\n request = Request.workspaceSymbol({\"query\": symbol_query_input})\n session.send_request(request, lambda r: self._handle_response(\n symbol_query_input, r), self._handle_error)\n\n def _format(self, s: Dict[str, Any]) -> str:\n file_name = os.path.basename(s['location']['uri'])\n symbol_kind = format_symbol_kind(s[\"kind\"])\n name = \"{} ({}) - {} -- {}\".format(s['name'], symbol_kind, s.get('containerName', \"\"), file_name)\n return name\n\n def _open_file(self, symbols: List[Dict[str, Any]], index: int) -> None:\n if index != -1:\n symbol = symbols[index]\n window = self.view.window()\n if window:\n window.open_file(location_to_encoded_filename(symbol['location']), sublime.ENCODED_POSITION)\n\n def _handle_response(self, query: str, response: Optional[List[Dict[str, Any]]]) -> None:\n self.view.erase_status(\"lsp_workspace_symbols\")\n if response:\n matches = response\n window = self.view.window()\n if window:\n window.show_quick_panel(list(map(self._format, matches)), lambda i: self._open_file(matches, i))\n else:\n sublime.message_dialog(\"No matches found for query string: '{}'\".format(query))\n\n def _handle_error(self, error: Dict[str, Any]) -> None:\n self.view.erase_status(\"lsp_workspace_symbols\")\n reason = error.get(\"message\", \"none provided by server :(\")\n msg = \"command 'workspace/symbol' failed. Reason: {}\".format(reason)\n sublime.error_message(msg)\n", "path": "plugin/symbols.py"}]}
| 3,281 | 485 |
gh_patches_debug_20258
|
rasdani/github-patches
|
git_diff
|
kserve__kserve-1877
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix serving.kubeflow.org annotations in docs/samples
I've noticed that some `docs/samples` still use in `metadata.annotations` the `serving.kubeflow.org` instead of `serving.kserve.org`. See this [example](https://github.com/kserve/kserve/blob/master/docs/samples/kafka/s3_secret.yaml).
To save debugging time for others migrating from KFserving, I could create PR that fixes that.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/samples/kafka/setup.py`
Content:
```
1 #
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at
5 #
6 # http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 from setuptools import setup, find_packages
15
16 tests_require = [
17 'pytest',
18 'pytest-tornasync',
19 'mypy'
20 ]
21
22 setup(
23 name='transformer',
24 version='0.1.0',
25 author_email='[email protected]',
26 license='../../LICENSE.txt',
27 url='https://github.com/kserve/kserve/tree/master/docs/samples#deploy-inferenceservice-with-transformer',
28 description='Transformer',
29 long_description=open('README.md').read(),
30 python_requires='>=3.6',
31 packages=find_packages("transformer"),
32 install_requires=[
33 "kfserving>=0.2.1",
34 "argparse>=1.4.0",
35 "requests>=2.22.0",
36 "joblib>=0.13.2",
37 "pandas>=0.24.2",
38 "numpy>=1.16.3",
39 "kubernetes >= 9.0.0",
40 "opencv-python-headless==4.0.0.21",
41 "boto3==1.7.2"
42 ],
43 tests_require=tests_require,
44 extras_require={'test': tests_require}
45 )
46
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/docs/samples/kafka/setup.py b/docs/samples/kafka/setup.py
--- a/docs/samples/kafka/setup.py
+++ b/docs/samples/kafka/setup.py
@@ -24,21 +24,15 @@
version='0.1.0',
author_email='[email protected]',
license='../../LICENSE.txt',
- url='https://github.com/kserve/kserve/tree/master/docs/samples#deploy-inferenceservice-with-transformer',
+ url='https://github.com/kserve/kserve/tree/master/docs/samples/kafka',
description='Transformer',
long_description=open('README.md').read(),
- python_requires='>=3.6',
+ python_requires='>=3.7',
packages=find_packages("transformer"),
install_requires=[
- "kfserving>=0.2.1",
- "argparse>=1.4.0",
- "requests>=2.22.0",
- "joblib>=0.13.2",
+ "kserve>=0.7.0",
"pandas>=0.24.2",
- "numpy>=1.16.3",
- "kubernetes >= 9.0.0",
"opencv-python-headless==4.0.0.21",
- "boto3==1.7.2"
],
tests_require=tests_require,
extras_require={'test': tests_require}
|
{"golden_diff": "diff --git a/docs/samples/kafka/setup.py b/docs/samples/kafka/setup.py\n--- a/docs/samples/kafka/setup.py\n+++ b/docs/samples/kafka/setup.py\n@@ -24,21 +24,15 @@\n version='0.1.0',\n author_email='[email protected]',\n license='../../LICENSE.txt',\n- url='https://github.com/kserve/kserve/tree/master/docs/samples#deploy-inferenceservice-with-transformer',\n+ url='https://github.com/kserve/kserve/tree/master/docs/samples/kafka',\n description='Transformer',\n long_description=open('README.md').read(),\n- python_requires='>=3.6',\n+ python_requires='>=3.7',\n packages=find_packages(\"transformer\"),\n install_requires=[\n- \"kfserving>=0.2.1\",\n- \"argparse>=1.4.0\",\n- \"requests>=2.22.0\",\n- \"joblib>=0.13.2\",\n+ \"kserve>=0.7.0\",\n \"pandas>=0.24.2\",\n- \"numpy>=1.16.3\",\n- \"kubernetes >= 9.0.0\",\n \"opencv-python-headless==4.0.0.21\",\n- \"boto3==1.7.2\"\n ],\n tests_require=tests_require,\n extras_require={'test': tests_require}\n", "issue": "Fix serving.kubeflow.org annotations in docs/samples\nI've noticed that some `docs/samples` still use in `metadata.annotations` the `serving.kubeflow.org` instead of `serving.kserve.org`. See this [example](https://github.com/kserve/kserve/blob/master/docs/samples/kafka/s3_secret.yaml).\r\nTo save debugging time for others migrating from KFserving, I could create PR that fixes that.\n", "before_files": [{"content": "#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom setuptools import setup, find_packages\n\ntests_require = [\n 'pytest',\n 'pytest-tornasync',\n 'mypy'\n]\n\nsetup(\n name='transformer',\n version='0.1.0',\n author_email='[email protected]',\n license='../../LICENSE.txt',\n url='https://github.com/kserve/kserve/tree/master/docs/samples#deploy-inferenceservice-with-transformer',\n description='Transformer',\n long_description=open('README.md').read(),\n python_requires='>=3.6',\n packages=find_packages(\"transformer\"),\n install_requires=[\n \"kfserving>=0.2.1\",\n \"argparse>=1.4.0\",\n \"requests>=2.22.0\",\n \"joblib>=0.13.2\",\n \"pandas>=0.24.2\",\n \"numpy>=1.16.3\",\n \"kubernetes >= 9.0.0\",\n \"opencv-python-headless==4.0.0.21\",\n \"boto3==1.7.2\"\n ],\n tests_require=tests_require,\n extras_require={'test': tests_require}\n)\n", "path": "docs/samples/kafka/setup.py"}], "after_files": [{"content": "#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom setuptools import setup, find_packages\n\ntests_require = [\n 'pytest',\n 'pytest-tornasync',\n 'mypy'\n]\n\nsetup(\n name='transformer',\n version='0.1.0',\n author_email='[email protected]',\n license='../../LICENSE.txt',\n url='https://github.com/kserve/kserve/tree/master/docs/samples/kafka',\n description='Transformer',\n long_description=open('README.md').read(),\n python_requires='>=3.7',\n packages=find_packages(\"transformer\"),\n install_requires=[\n \"kserve>=0.7.0\",\n \"pandas>=0.24.2\",\n \"opencv-python-headless==4.0.0.21\",\n ],\n tests_require=tests_require,\n extras_require={'test': tests_require}\n)\n", "path": "docs/samples/kafka/setup.py"}]}
| 816 | 322 |
gh_patches_debug_39161
|
rasdani/github-patches
|
git_diff
|
PrefectHQ__prefect-5437
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Azure BlobStorageUpload doesn't allow for overwriting blobs
## Current behavior
You get an error if you try to upload the same file name
```
azure.core.exceptions.ResourceExistsError: The specified blob already exists.
RequestId:5bef0cf1-b01e-002e-6
```
## Proposed behavior
The task should take in an `overwrite` argument and pass it to [this line](https://github.com/PrefectHQ/prefect/blob/6cd24b023411980842fa77e6c0ca2ced47eeb83e/src/prefect/tasks/azure/blobstorage.py#L131).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/prefect/tasks/azure/blobstorage.py`
Content:
```
1 import uuid
2
3 import azure.storage.blob
4
5 from prefect import Task
6 from prefect.client import Secret
7 from prefect.utilities.tasks import defaults_from_attrs
8
9
10 class BlobStorageDownload(Task):
11 """
12 Task for downloading data from an Blob Storage container and returning it as a string.
13 Note that all initialization arguments can optionally be provided or overwritten at runtime.
14
15 Args:
16 - azure_credentials_secret (str, optional): the name of the Prefect Secret
17 that stores your Azure credentials; this Secret must be an Azure connection string
18 - container (str, optional): the name of the Azure Blob Storage to download from
19 - **kwargs (dict, optional): additional keyword arguments to pass to the
20 Task constructor
21 """
22
23 def __init__(
24 self,
25 azure_credentials_secret: str = "AZ_CONNECTION_STRING",
26 container: str = None,
27 **kwargs
28 ) -> None:
29 self.azure_credentials_secret = azure_credentials_secret
30 self.container = container
31 super().__init__(**kwargs)
32
33 @defaults_from_attrs("azure_credentials_secret", "container")
34 def run(
35 self,
36 blob_name: str,
37 azure_credentials_secret: str = "AZ_CONNECTION_STRING",
38 container: str = None,
39 ) -> str:
40 """
41 Task run method.
42
43 Args:
44 - blob_name (str): the name of the blob within this container to retrieve
45 - azure_credentials_secret (str, optional): the name of the Prefect Secret
46 that stores your Azure credentials; this Secret must be an Azure connection string
47 - container (str, optional): the name of the Blob Storage container to download from
48
49 Returns:
50 - str: the contents of this blob_name / container, as a string
51 """
52
53 if container is None:
54 raise ValueError("A container name must be provided.")
55
56 # get Azure credentials
57 azure_credentials = Secret(azure_credentials_secret).get()
58
59 blob_service = azure.storage.blob.BlobServiceClient.from_connection_string(
60 conn_str=azure_credentials
61 )
62
63 client = blob_service.get_blob_client(container=container, blob=blob_name)
64 content_string = client.download_blob().content_as_text()
65
66 return content_string
67
68
69 class BlobStorageUpload(Task):
70 """
71 Task for uploading string data (e.g., a JSON string) to an Azure Blob Storage container.
72 Note that all initialization arguments can optionally be provided or overwritten at runtime.
73
74 Args:
75 - azure_credentials_secret (str, optional): the name of the Prefect Secret
76 that stores your Azure credentials; this Secret must be an Azure connection string
77 - container (str, optional): the name of the Azure Blob Storage to upload to
78 - **kwargs (dict, optional): additional keyword arguments to pass to the
79 Task constructor
80 """
81
82 def __init__(
83 self,
84 azure_credentials_secret: str = "AZ_CONNECTION_STRING",
85 container: str = None,
86 **kwargs
87 ) -> None:
88 self.azure_credentials_secret = azure_credentials_secret
89 self.container = container
90 super().__init__(**kwargs)
91
92 @defaults_from_attrs("azure_credentials_secret", "container")
93 def run(
94 self,
95 data: str,
96 blob_name: str = None,
97 azure_credentials_secret: str = "AZ_CONNECTION_STRING",
98 container: str = None,
99 ) -> str:
100 """
101 Task run method.
102
103 Args:
104 - data (str): the data payload to upload
105 - blob_name (str, optional): the name to upload the data under; if not
106 provided, a random `uuid` will be created
107 - azure_credentials_secret (str, optional): the name of the Prefect Secret
108 that stores your Azure credentials; this Secret must be an Azure connection string
109 - container (str, optional): the name of the Blob Storage container to upload to
110
111 Returns:
112 - str: the name of the blob the data payload was uploaded to
113 """
114
115 if container is None:
116 raise ValueError("A container name must be provided.")
117
118 # get Azure credentials
119 azure_credentials = Secret(azure_credentials_secret).get()
120
121 blob_service = azure.storage.blob.BlobServiceClient.from_connection_string(
122 conn_str=azure_credentials
123 )
124
125 # create key if not provided
126 if blob_name is None:
127 blob_name = str(uuid.uuid4())
128
129 client = blob_service.get_blob_client(container=container, blob=blob_name)
130
131 client.upload_blob(data)
132
133 return blob_name
134
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/prefect/tasks/azure/blobstorage.py b/src/prefect/tasks/azure/blobstorage.py
--- a/src/prefect/tasks/azure/blobstorage.py
+++ b/src/prefect/tasks/azure/blobstorage.py
@@ -75,6 +75,8 @@
- azure_credentials_secret (str, optional): the name of the Prefect Secret
that stores your Azure credentials; this Secret must be an Azure connection string
- container (str, optional): the name of the Azure Blob Storage to upload to
+ - overwrite (bool, optional): if `True`, an existing blob with the same name will be overwritten.
+ Defaults to `False` and an error will be thrown if the blob already exists.
- **kwargs (dict, optional): additional keyword arguments to pass to the
Task constructor
"""
@@ -83,19 +85,22 @@
self,
azure_credentials_secret: str = "AZ_CONNECTION_STRING",
container: str = None,
+ overwrite: bool = False,
**kwargs
) -> None:
self.azure_credentials_secret = azure_credentials_secret
self.container = container
+ self.overwrite = overwrite
super().__init__(**kwargs)
- @defaults_from_attrs("azure_credentials_secret", "container")
+ @defaults_from_attrs("azure_credentials_secret", "container", "overwrite")
def run(
self,
data: str,
blob_name: str = None,
azure_credentials_secret: str = "AZ_CONNECTION_STRING",
container: str = None,
+ overwrite: bool = False,
) -> str:
"""
Task run method.
@@ -107,6 +112,8 @@
- azure_credentials_secret (str, optional): the name of the Prefect Secret
that stores your Azure credentials; this Secret must be an Azure connection string
- container (str, optional): the name of the Blob Storage container to upload to
+ - overwrite (bool, optional): if `True`, an existing blob with the same name will be overwritten.
+ Defaults to `False` and an error will be thrown if the blob already exists.
Returns:
- str: the name of the blob the data payload was uploaded to
@@ -128,6 +135,6 @@
client = blob_service.get_blob_client(container=container, blob=blob_name)
- client.upload_blob(data)
+ client.upload_blob(data, overwrite=overwrite)
return blob_name
|
{"golden_diff": "diff --git a/src/prefect/tasks/azure/blobstorage.py b/src/prefect/tasks/azure/blobstorage.py\n--- a/src/prefect/tasks/azure/blobstorage.py\n+++ b/src/prefect/tasks/azure/blobstorage.py\n@@ -75,6 +75,8 @@\n - azure_credentials_secret (str, optional): the name of the Prefect Secret\n that stores your Azure credentials; this Secret must be an Azure connection string\n - container (str, optional): the name of the Azure Blob Storage to upload to\n+ - overwrite (bool, optional): if `True`, an existing blob with the same name will be overwritten.\n+ Defaults to `False` and an error will be thrown if the blob already exists.\n - **kwargs (dict, optional): additional keyword arguments to pass to the\n Task constructor\n \"\"\"\n@@ -83,19 +85,22 @@\n self,\n azure_credentials_secret: str = \"AZ_CONNECTION_STRING\",\n container: str = None,\n+ overwrite: bool = False,\n **kwargs\n ) -> None:\n self.azure_credentials_secret = azure_credentials_secret\n self.container = container\n+ self.overwrite = overwrite\n super().__init__(**kwargs)\n \n- @defaults_from_attrs(\"azure_credentials_secret\", \"container\")\n+ @defaults_from_attrs(\"azure_credentials_secret\", \"container\", \"overwrite\")\n def run(\n self,\n data: str,\n blob_name: str = None,\n azure_credentials_secret: str = \"AZ_CONNECTION_STRING\",\n container: str = None,\n+ overwrite: bool = False,\n ) -> str:\n \"\"\"\n Task run method.\n@@ -107,6 +112,8 @@\n - azure_credentials_secret (str, optional): the name of the Prefect Secret\n that stores your Azure credentials; this Secret must be an Azure connection string\n - container (str, optional): the name of the Blob Storage container to upload to\n+ - overwrite (bool, optional): if `True`, an existing blob with the same name will be overwritten.\n+ Defaults to `False` and an error will be thrown if the blob already exists.\n \n Returns:\n - str: the name of the blob the data payload was uploaded to\n@@ -128,6 +135,6 @@\n \n client = blob_service.get_blob_client(container=container, blob=blob_name)\n \n- client.upload_blob(data)\n+ client.upload_blob(data, overwrite=overwrite)\n \n return blob_name\n", "issue": "Azure BlobStorageUpload doesn't allow for overwriting blobs\n## Current behavior\r\n\r\nYou get an error if you try to upload the same file name\r\n\r\n```\r\nazure.core.exceptions.ResourceExistsError: The specified blob already exists.\r\nRequestId:5bef0cf1-b01e-002e-6\r\n```\r\n\r\n## Proposed behavior\r\n\r\nThe task should take in an `overwrite` argument and pass it to [this line](https://github.com/PrefectHQ/prefect/blob/6cd24b023411980842fa77e6c0ca2ced47eeb83e/src/prefect/tasks/azure/blobstorage.py#L131).\r\n\r\n\n", "before_files": [{"content": "import uuid\n\nimport azure.storage.blob\n\nfrom prefect import Task\nfrom prefect.client import Secret\nfrom prefect.utilities.tasks import defaults_from_attrs\n\n\nclass BlobStorageDownload(Task):\n \"\"\"\n Task for downloading data from an Blob Storage container and returning it as a string.\n Note that all initialization arguments can optionally be provided or overwritten at runtime.\n\n Args:\n - azure_credentials_secret (str, optional): the name of the Prefect Secret\n that stores your Azure credentials; this Secret must be an Azure connection string\n - container (str, optional): the name of the Azure Blob Storage to download from\n - **kwargs (dict, optional): additional keyword arguments to pass to the\n Task constructor\n \"\"\"\n\n def __init__(\n self,\n azure_credentials_secret: str = \"AZ_CONNECTION_STRING\",\n container: str = None,\n **kwargs\n ) -> None:\n self.azure_credentials_secret = azure_credentials_secret\n self.container = container\n super().__init__(**kwargs)\n\n @defaults_from_attrs(\"azure_credentials_secret\", \"container\")\n def run(\n self,\n blob_name: str,\n azure_credentials_secret: str = \"AZ_CONNECTION_STRING\",\n container: str = None,\n ) -> str:\n \"\"\"\n Task run method.\n\n Args:\n - blob_name (str): the name of the blob within this container to retrieve\n - azure_credentials_secret (str, optional): the name of the Prefect Secret\n that stores your Azure credentials; this Secret must be an Azure connection string\n - container (str, optional): the name of the Blob Storage container to download from\n\n Returns:\n - str: the contents of this blob_name / container, as a string\n \"\"\"\n\n if container is None:\n raise ValueError(\"A container name must be provided.\")\n\n # get Azure credentials\n azure_credentials = Secret(azure_credentials_secret).get()\n\n blob_service = azure.storage.blob.BlobServiceClient.from_connection_string(\n conn_str=azure_credentials\n )\n\n client = blob_service.get_blob_client(container=container, blob=blob_name)\n content_string = client.download_blob().content_as_text()\n\n return content_string\n\n\nclass BlobStorageUpload(Task):\n \"\"\"\n Task for uploading string data (e.g., a JSON string) to an Azure Blob Storage container.\n Note that all initialization arguments can optionally be provided or overwritten at runtime.\n\n Args:\n - azure_credentials_secret (str, optional): the name of the Prefect Secret\n that stores your Azure credentials; this Secret must be an Azure connection string\n - container (str, optional): the name of the Azure Blob Storage to upload to\n - **kwargs (dict, optional): additional keyword arguments to pass to the\n Task constructor\n \"\"\"\n\n def __init__(\n self,\n azure_credentials_secret: str = \"AZ_CONNECTION_STRING\",\n container: str = None,\n **kwargs\n ) -> None:\n self.azure_credentials_secret = azure_credentials_secret\n self.container = container\n super().__init__(**kwargs)\n\n @defaults_from_attrs(\"azure_credentials_secret\", \"container\")\n def run(\n self,\n data: str,\n blob_name: str = None,\n azure_credentials_secret: str = \"AZ_CONNECTION_STRING\",\n container: str = None,\n ) -> str:\n \"\"\"\n Task run method.\n\n Args:\n - data (str): the data payload to upload\n - blob_name (str, optional): the name to upload the data under; if not\n provided, a random `uuid` will be created\n - azure_credentials_secret (str, optional): the name of the Prefect Secret\n that stores your Azure credentials; this Secret must be an Azure connection string\n - container (str, optional): the name of the Blob Storage container to upload to\n\n Returns:\n - str: the name of the blob the data payload was uploaded to\n \"\"\"\n\n if container is None:\n raise ValueError(\"A container name must be provided.\")\n\n # get Azure credentials\n azure_credentials = Secret(azure_credentials_secret).get()\n\n blob_service = azure.storage.blob.BlobServiceClient.from_connection_string(\n conn_str=azure_credentials\n )\n\n # create key if not provided\n if blob_name is None:\n blob_name = str(uuid.uuid4())\n\n client = blob_service.get_blob_client(container=container, blob=blob_name)\n\n client.upload_blob(data)\n\n return blob_name\n", "path": "src/prefect/tasks/azure/blobstorage.py"}], "after_files": [{"content": "import uuid\n\nimport azure.storage.blob\n\nfrom prefect import Task\nfrom prefect.client import Secret\nfrom prefect.utilities.tasks import defaults_from_attrs\n\n\nclass BlobStorageDownload(Task):\n \"\"\"\n Task for downloading data from an Blob Storage container and returning it as a string.\n Note that all initialization arguments can optionally be provided or overwritten at runtime.\n\n Args:\n - azure_credentials_secret (str, optional): the name of the Prefect Secret\n that stores your Azure credentials; this Secret must be an Azure connection string\n - container (str, optional): the name of the Azure Blob Storage to download from\n - **kwargs (dict, optional): additional keyword arguments to pass to the\n Task constructor\n \"\"\"\n\n def __init__(\n self,\n azure_credentials_secret: str = \"AZ_CONNECTION_STRING\",\n container: str = None,\n **kwargs\n ) -> None:\n self.azure_credentials_secret = azure_credentials_secret\n self.container = container\n super().__init__(**kwargs)\n\n @defaults_from_attrs(\"azure_credentials_secret\", \"container\")\n def run(\n self,\n blob_name: str,\n azure_credentials_secret: str = \"AZ_CONNECTION_STRING\",\n container: str = None,\n ) -> str:\n \"\"\"\n Task run method.\n\n Args:\n - blob_name (str): the name of the blob within this container to retrieve\n - azure_credentials_secret (str, optional): the name of the Prefect Secret\n that stores your Azure credentials; this Secret must be an Azure connection string\n - container (str, optional): the name of the Blob Storage container to download from\n\n Returns:\n - str: the contents of this blob_name / container, as a string\n \"\"\"\n\n if container is None:\n raise ValueError(\"A container name must be provided.\")\n\n # get Azure credentials\n azure_credentials = Secret(azure_credentials_secret).get()\n\n blob_service = azure.storage.blob.BlobServiceClient.from_connection_string(\n conn_str=azure_credentials\n )\n\n client = blob_service.get_blob_client(container=container, blob=blob_name)\n content_string = client.download_blob().content_as_text()\n\n return content_string\n\n\nclass BlobStorageUpload(Task):\n \"\"\"\n Task for uploading string data (e.g., a JSON string) to an Azure Blob Storage container.\n Note that all initialization arguments can optionally be provided or overwritten at runtime.\n\n Args:\n - azure_credentials_secret (str, optional): the name of the Prefect Secret\n that stores your Azure credentials; this Secret must be an Azure connection string\n - container (str, optional): the name of the Azure Blob Storage to upload to\n - overwrite (bool, optional): if `True`, an existing blob with the same name will be overwritten.\n Defaults to `False` and an error will be thrown if the blob already exists.\n - **kwargs (dict, optional): additional keyword arguments to pass to the\n Task constructor\n \"\"\"\n\n def __init__(\n self,\n azure_credentials_secret: str = \"AZ_CONNECTION_STRING\",\n container: str = None,\n overwrite: bool = False,\n **kwargs\n ) -> None:\n self.azure_credentials_secret = azure_credentials_secret\n self.container = container\n self.overwrite = overwrite\n super().__init__(**kwargs)\n\n @defaults_from_attrs(\"azure_credentials_secret\", \"container\", \"overwrite\")\n def run(\n self,\n data: str,\n blob_name: str = None,\n azure_credentials_secret: str = \"AZ_CONNECTION_STRING\",\n container: str = None,\n overwrite: bool = False,\n ) -> str:\n \"\"\"\n Task run method.\n\n Args:\n - data (str): the data payload to upload\n - blob_name (str, optional): the name to upload the data under; if not\n provided, a random `uuid` will be created\n - azure_credentials_secret (str, optional): the name of the Prefect Secret\n that stores your Azure credentials; this Secret must be an Azure connection string\n - container (str, optional): the name of the Blob Storage container to upload to\n - overwrite (bool, optional): if `True`, an existing blob with the same name will be overwritten.\n Defaults to `False` and an error will be thrown if the blob already exists.\n\n Returns:\n - str: the name of the blob the data payload was uploaded to\n \"\"\"\n\n if container is None:\n raise ValueError(\"A container name must be provided.\")\n\n # get Azure credentials\n azure_credentials = Secret(azure_credentials_secret).get()\n\n blob_service = azure.storage.blob.BlobServiceClient.from_connection_string(\n conn_str=azure_credentials\n )\n\n # create key if not provided\n if blob_name is None:\n blob_name = str(uuid.uuid4())\n\n client = blob_service.get_blob_client(container=container, blob=blob_name)\n\n client.upload_blob(data, overwrite=overwrite)\n\n return blob_name\n", "path": "src/prefect/tasks/azure/blobstorage.py"}]}
| 1,671 | 546 |
gh_patches_debug_13706
|
rasdani/github-patches
|
git_diff
|
encode__httpx-758
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
DecodingError with zlib-compressed responses
Using the following minimal WSGI app:
```python
import zlib
def app(environ, start_response):
start_response("200 OK", [("Content-Encoding", "deflate")])
return [zlib.compress(b"hello world")]
```
This works fine in a web browser.
Requests is quite happy:
```
>>> import requests
>>> requests.get("http://localhost:9999")
<Response [200]>
>>> requests.get("http://localhost:9999").text
'hello world'
```
httpx says:
```
>>> httpx.get('http://localhost:9999')
Traceback (most recent call last):
File "/Users/jamie/code/httpx/httpx/decoders.py", line 52, in decode
return self.decompressor.decompress(data)
zlib.error: Error -3 while decompressing data: invalid stored block lengths
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/jamie/code/httpx/httpx/api.py", line 170, in get
trust_env=trust_env,
File "/Users/jamie/code/httpx/httpx/api.py", line 96, in request
allow_redirects=allow_redirects,
File "/Users/jamie/code/httpx/httpx/client.py", line 568, in request
request, auth=auth, allow_redirects=allow_redirects, timeout=timeout,
File "/Users/jamie/code/httpx/httpx/client.py", line 593, in send
response.read()
File "/Users/jamie/code/httpx/httpx/models.py", line 900, in read
self._content = b"".join([part for part in self.iter_bytes()])
File "/Users/jamie/code/httpx/httpx/models.py", line 900, in <listcomp>
self._content = b"".join([part for part in self.iter_bytes()])
File "/Users/jamie/code/httpx/httpx/models.py", line 912, in iter_bytes
yield self.decoder.decode(chunk)
File "/Users/jamie/code/httpx/httpx/decoders.py", line 54, in decode
raise DecodingError from exc
httpx.exceptions.DecodingError
```
The reason this is an issue and not a PR is because I don't understand the code in `decoders.py` - it very explicitly says `self.decompressor = zlib.decompressobj(-zlib.MAX_WBITS)`, which according to [the Python docs](https://docs.python.org/3/library/zlib.html#zlib.compressobj) means "Uses the absolute value of wbits as the window size logarithm, while producing a raw output stream with no header or trailing checksum." I don't know enough about zlib to understand this, but it looks very deliberate so I thought I'd better ask.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `httpx/decoders.py`
Content:
```
1 """
2 Handlers for Content-Encoding.
3
4 See: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Encoding
5 """
6 import codecs
7 import typing
8 import zlib
9
10 import chardet
11
12 from .exceptions import DecodingError
13
14 try:
15 import brotli
16 except ImportError: # pragma: nocover
17 brotli = None
18
19
20 class Decoder:
21 def decode(self, data: bytes) -> bytes:
22 raise NotImplementedError() # pragma: nocover
23
24 def flush(self) -> bytes:
25 raise NotImplementedError() # pragma: nocover
26
27
28 class IdentityDecoder(Decoder):
29 """
30 Handle unencoded data.
31 """
32
33 def decode(self, data: bytes) -> bytes:
34 return data
35
36 def flush(self) -> bytes:
37 return b""
38
39
40 class DeflateDecoder(Decoder):
41 """
42 Handle 'deflate' decoding.
43
44 See: https://stackoverflow.com/questions/1838699
45 """
46
47 def __init__(self) -> None:
48 self.decompressor = zlib.decompressobj(-zlib.MAX_WBITS)
49
50 def decode(self, data: bytes) -> bytes:
51 try:
52 return self.decompressor.decompress(data)
53 except zlib.error as exc:
54 raise DecodingError from exc
55
56 def flush(self) -> bytes:
57 try:
58 return self.decompressor.flush()
59 except zlib.error as exc: # pragma: nocover
60 raise DecodingError from exc
61
62
63 class GZipDecoder(Decoder):
64 """
65 Handle 'gzip' decoding.
66
67 See: https://stackoverflow.com/questions/1838699
68 """
69
70 def __init__(self) -> None:
71 self.decompressor = zlib.decompressobj(zlib.MAX_WBITS | 16)
72
73 def decode(self, data: bytes) -> bytes:
74 try:
75 return self.decompressor.decompress(data)
76 except zlib.error as exc:
77 raise DecodingError from exc
78
79 def flush(self) -> bytes:
80 try:
81 return self.decompressor.flush()
82 except zlib.error as exc: # pragma: nocover
83 raise DecodingError from exc
84
85
86 class BrotliDecoder(Decoder):
87 """
88 Handle 'brotli' decoding.
89
90 Requires `pip install brotlipy`. See: https://brotlipy.readthedocs.io/
91 or `pip install brotli`. See https://github.com/google/brotli
92 Supports both 'brotlipy' and 'Brotli' packages since they share an import
93 name. The top branches are for 'brotlipy' and bottom branches for 'Brotli'
94 """
95
96 def __init__(self) -> None:
97 assert (
98 brotli is not None
99 ), "The 'brotlipy' or 'brotli' library must be installed to use 'BrotliDecoder'"
100 self.decompressor = brotli.Decompressor()
101 self.seen_data = False
102
103 def decode(self, data: bytes) -> bytes:
104 if not data:
105 return b""
106 self.seen_data = True
107 try:
108 if hasattr(self.decompressor, "decompress"):
109 return self.decompressor.decompress(data)
110 return self.decompressor.process(data) # pragma: nocover
111 except brotli.error as exc:
112 raise DecodingError from exc
113
114 def flush(self) -> bytes:
115 if not self.seen_data:
116 return b""
117 try:
118 if hasattr(self.decompressor, "finish"):
119 self.decompressor.finish()
120 return b""
121 except brotli.error as exc: # pragma: nocover
122 raise DecodingError from exc
123
124
125 class MultiDecoder(Decoder):
126 """
127 Handle the case where multiple encodings have been applied.
128 """
129
130 def __init__(self, children: typing.Sequence[Decoder]) -> None:
131 """
132 'children' should be a sequence of decoders in the order in which
133 each was applied.
134 """
135 # Note that we reverse the order for decoding.
136 self.children = list(reversed(children))
137
138 def decode(self, data: bytes) -> bytes:
139 for child in self.children:
140 data = child.decode(data)
141 return data
142
143 def flush(self) -> bytes:
144 data = b""
145 for child in self.children:
146 data = child.decode(data) + child.flush()
147 return data
148
149
150 class TextDecoder:
151 """
152 Handles incrementally decoding bytes into text
153 """
154
155 def __init__(self, encoding: typing.Optional[str] = None):
156 self.decoder: typing.Optional[codecs.IncrementalDecoder] = (
157 None if encoding is None else codecs.getincrementaldecoder(encoding)()
158 )
159 self.detector = chardet.universaldetector.UniversalDetector()
160
161 # This buffer is only needed if 'decoder' is 'None'
162 # we want to trigger errors if data is getting added to
163 # our internal buffer for some silly reason while
164 # a decoder is discovered.
165 self.buffer: typing.Optional[bytearray] = None if self.decoder else bytearray()
166
167 def decode(self, data: bytes) -> str:
168 try:
169 if self.decoder is not None:
170 text = self.decoder.decode(data)
171 else:
172 assert self.buffer is not None
173 text = ""
174 self.detector.feed(data)
175 self.buffer += data
176
177 # Should be more than enough data to process, we don't
178 # want to buffer too long as chardet will wait until
179 # detector.close() is used to give back common
180 # encodings like 'utf-8'.
181 if len(self.buffer) >= 4096:
182 self.decoder = codecs.getincrementaldecoder(
183 self._detector_result()
184 )()
185 text = self.decoder.decode(bytes(self.buffer), False)
186 self.buffer = None
187
188 return text
189 except UnicodeDecodeError: # pragma: nocover
190 raise DecodingError() from None
191
192 def flush(self) -> str:
193 try:
194 if self.decoder is None:
195 # Empty string case as chardet is guaranteed to not have a guess.
196 assert self.buffer is not None
197 if len(self.buffer) == 0:
198 return ""
199 return bytes(self.buffer).decode(self._detector_result())
200
201 return self.decoder.decode(b"", True)
202 except UnicodeDecodeError: # pragma: nocover
203 raise DecodingError() from None
204
205 def _detector_result(self) -> str:
206 self.detector.close()
207 result = self.detector.result["encoding"]
208 if not result: # pragma: nocover
209 raise DecodingError("Unable to determine encoding of content")
210
211 return result
212
213
214 class LineDecoder:
215 """
216 Handles incrementally reading lines from text.
217
218 Uses universal line decoding, supporting any of `\n`, `\r`, or `\r\n`
219 as line endings, normalizing to `\n`.
220 """
221
222 def __init__(self) -> None:
223 self.buffer = ""
224
225 def decode(self, text: str) -> typing.List[str]:
226 lines = []
227
228 if text.startswith("\n") and self.buffer and self.buffer[-1] == "\r":
229 # Handle the case where we have an "\r\n" split across
230 # our previous input, and our new chunk.
231 lines.append(self.buffer[:-1] + "\n")
232 self.buffer = ""
233 text = text[1:]
234
235 while text:
236 num_chars = len(text)
237 for idx in range(num_chars):
238 char = text[idx]
239 next_char = None if idx + 1 == num_chars else text[idx + 1]
240 if char == "\n":
241 lines.append(self.buffer + text[: idx + 1])
242 self.buffer = ""
243 text = text[idx + 1 :]
244 break
245 elif char == "\r" and next_char == "\n":
246 lines.append(self.buffer + text[:idx] + "\n")
247 self.buffer = ""
248 text = text[idx + 2 :]
249 break
250 elif char == "\r" and next_char is not None:
251 lines.append(self.buffer + text[:idx] + "\n")
252 self.buffer = ""
253 text = text[idx + 1 :]
254 break
255 elif next_char is None:
256 self.buffer = text
257 text = ""
258 break
259
260 return lines
261
262 def flush(self) -> typing.List[str]:
263 if self.buffer.endswith("\r"):
264 # Handle the case where we had a trailing '\r', which could have
265 # been a '\r\n' pair.
266 lines = [self.buffer[:-1] + "\n"]
267 elif self.buffer:
268 lines = [self.buffer]
269 else:
270 lines = []
271 self.buffer = ""
272 return lines
273
274
275 SUPPORTED_DECODERS = {
276 "identity": IdentityDecoder,
277 "gzip": GZipDecoder,
278 "deflate": DeflateDecoder,
279 "br": BrotliDecoder,
280 }
281
282
283 if brotli is None:
284 SUPPORTED_DECODERS.pop("br") # pragma: nocover
285
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/httpx/decoders.py b/httpx/decoders.py
--- a/httpx/decoders.py
+++ b/httpx/decoders.py
@@ -45,12 +45,18 @@
"""
def __init__(self) -> None:
- self.decompressor = zlib.decompressobj(-zlib.MAX_WBITS)
+ self.first_attempt = True
+ self.decompressor = zlib.decompressobj()
def decode(self, data: bytes) -> bytes:
+ was_first_attempt = self.first_attempt
+ self.first_attempt = False
try:
return self.decompressor.decompress(data)
except zlib.error as exc:
+ if was_first_attempt:
+ self.decompressor = zlib.decompressobj(-zlib.MAX_WBITS)
+ return self.decode(data)
raise DecodingError from exc
def flush(self) -> bytes:
|
{"golden_diff": "diff --git a/httpx/decoders.py b/httpx/decoders.py\n--- a/httpx/decoders.py\n+++ b/httpx/decoders.py\n@@ -45,12 +45,18 @@\n \"\"\"\n \n def __init__(self) -> None:\n- self.decompressor = zlib.decompressobj(-zlib.MAX_WBITS)\n+ self.first_attempt = True\n+ self.decompressor = zlib.decompressobj()\n \n def decode(self, data: bytes) -> bytes:\n+ was_first_attempt = self.first_attempt\n+ self.first_attempt = False\n try:\n return self.decompressor.decompress(data)\n except zlib.error as exc:\n+ if was_first_attempt:\n+ self.decompressor = zlib.decompressobj(-zlib.MAX_WBITS)\n+ return self.decode(data)\n raise DecodingError from exc\n \n def flush(self) -> bytes:\n", "issue": "DecodingError with zlib-compressed responses\nUsing the following minimal WSGI app:\r\n\r\n```python\r\nimport zlib\r\n\r\ndef app(environ, start_response):\r\n start_response(\"200 OK\", [(\"Content-Encoding\", \"deflate\")])\r\n return [zlib.compress(b\"hello world\")]\r\n```\r\n\r\nThis works fine in a web browser.\r\n\r\nRequests is quite happy:\r\n\r\n```\r\n>>> import requests\r\n>>> requests.get(\"http://localhost:9999\")\r\n<Response [200]>\r\n>>> requests.get(\"http://localhost:9999\").text\r\n'hello world'\r\n```\r\n\r\nhttpx says:\r\n\r\n```\r\n>>> httpx.get('http://localhost:9999')\r\nTraceback (most recent call last):\r\n File \"/Users/jamie/code/httpx/httpx/decoders.py\", line 52, in decode\r\n return self.decompressor.decompress(data)\r\nzlib.error: Error -3 while decompressing data: invalid stored block lengths\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"/Users/jamie/code/httpx/httpx/api.py\", line 170, in get\r\n trust_env=trust_env,\r\n File \"/Users/jamie/code/httpx/httpx/api.py\", line 96, in request\r\n allow_redirects=allow_redirects,\r\n File \"/Users/jamie/code/httpx/httpx/client.py\", line 568, in request\r\n request, auth=auth, allow_redirects=allow_redirects, timeout=timeout,\r\n File \"/Users/jamie/code/httpx/httpx/client.py\", line 593, in send\r\n response.read()\r\n File \"/Users/jamie/code/httpx/httpx/models.py\", line 900, in read\r\n self._content = b\"\".join([part for part in self.iter_bytes()])\r\n File \"/Users/jamie/code/httpx/httpx/models.py\", line 900, in <listcomp>\r\n self._content = b\"\".join([part for part in self.iter_bytes()])\r\n File \"/Users/jamie/code/httpx/httpx/models.py\", line 912, in iter_bytes\r\n yield self.decoder.decode(chunk)\r\n File \"/Users/jamie/code/httpx/httpx/decoders.py\", line 54, in decode\r\n raise DecodingError from exc\r\nhttpx.exceptions.DecodingError\r\n```\r\n\r\nThe reason this is an issue and not a PR is because I don't understand the code in `decoders.py` - it very explicitly says `self.decompressor = zlib.decompressobj(-zlib.MAX_WBITS)`, which according to [the Python docs](https://docs.python.org/3/library/zlib.html#zlib.compressobj) means \"Uses the absolute value of wbits as the window size logarithm, while producing a raw output stream with no header or trailing checksum.\" I don't know enough about zlib to understand this, but it looks very deliberate so I thought I'd better ask.\n", "before_files": [{"content": "\"\"\"\nHandlers for Content-Encoding.\n\nSee: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Encoding\n\"\"\"\nimport codecs\nimport typing\nimport zlib\n\nimport chardet\n\nfrom .exceptions import DecodingError\n\ntry:\n import brotli\nexcept ImportError: # pragma: nocover\n brotli = None\n\n\nclass Decoder:\n def decode(self, data: bytes) -> bytes:\n raise NotImplementedError() # pragma: nocover\n\n def flush(self) -> bytes:\n raise NotImplementedError() # pragma: nocover\n\n\nclass IdentityDecoder(Decoder):\n \"\"\"\n Handle unencoded data.\n \"\"\"\n\n def decode(self, data: bytes) -> bytes:\n return data\n\n def flush(self) -> bytes:\n return b\"\"\n\n\nclass DeflateDecoder(Decoder):\n \"\"\"\n Handle 'deflate' decoding.\n\n See: https://stackoverflow.com/questions/1838699\n \"\"\"\n\n def __init__(self) -> None:\n self.decompressor = zlib.decompressobj(-zlib.MAX_WBITS)\n\n def decode(self, data: bytes) -> bytes:\n try:\n return self.decompressor.decompress(data)\n except zlib.error as exc:\n raise DecodingError from exc\n\n def flush(self) -> bytes:\n try:\n return self.decompressor.flush()\n except zlib.error as exc: # pragma: nocover\n raise DecodingError from exc\n\n\nclass GZipDecoder(Decoder):\n \"\"\"\n Handle 'gzip' decoding.\n\n See: https://stackoverflow.com/questions/1838699\n \"\"\"\n\n def __init__(self) -> None:\n self.decompressor = zlib.decompressobj(zlib.MAX_WBITS | 16)\n\n def decode(self, data: bytes) -> bytes:\n try:\n return self.decompressor.decompress(data)\n except zlib.error as exc:\n raise DecodingError from exc\n\n def flush(self) -> bytes:\n try:\n return self.decompressor.flush()\n except zlib.error as exc: # pragma: nocover\n raise DecodingError from exc\n\n\nclass BrotliDecoder(Decoder):\n \"\"\"\n Handle 'brotli' decoding.\n\n Requires `pip install brotlipy`. See: https://brotlipy.readthedocs.io/\n or `pip install brotli`. See https://github.com/google/brotli\n Supports both 'brotlipy' and 'Brotli' packages since they share an import\n name. The top branches are for 'brotlipy' and bottom branches for 'Brotli'\n \"\"\"\n\n def __init__(self) -> None:\n assert (\n brotli is not None\n ), \"The 'brotlipy' or 'brotli' library must be installed to use 'BrotliDecoder'\"\n self.decompressor = brotli.Decompressor()\n self.seen_data = False\n\n def decode(self, data: bytes) -> bytes:\n if not data:\n return b\"\"\n self.seen_data = True\n try:\n if hasattr(self.decompressor, \"decompress\"):\n return self.decompressor.decompress(data)\n return self.decompressor.process(data) # pragma: nocover\n except brotli.error as exc:\n raise DecodingError from exc\n\n def flush(self) -> bytes:\n if not self.seen_data:\n return b\"\"\n try:\n if hasattr(self.decompressor, \"finish\"):\n self.decompressor.finish()\n return b\"\"\n except brotli.error as exc: # pragma: nocover\n raise DecodingError from exc\n\n\nclass MultiDecoder(Decoder):\n \"\"\"\n Handle the case where multiple encodings have been applied.\n \"\"\"\n\n def __init__(self, children: typing.Sequence[Decoder]) -> None:\n \"\"\"\n 'children' should be a sequence of decoders in the order in which\n each was applied.\n \"\"\"\n # Note that we reverse the order for decoding.\n self.children = list(reversed(children))\n\n def decode(self, data: bytes) -> bytes:\n for child in self.children:\n data = child.decode(data)\n return data\n\n def flush(self) -> bytes:\n data = b\"\"\n for child in self.children:\n data = child.decode(data) + child.flush()\n return data\n\n\nclass TextDecoder:\n \"\"\"\n Handles incrementally decoding bytes into text\n \"\"\"\n\n def __init__(self, encoding: typing.Optional[str] = None):\n self.decoder: typing.Optional[codecs.IncrementalDecoder] = (\n None if encoding is None else codecs.getincrementaldecoder(encoding)()\n )\n self.detector = chardet.universaldetector.UniversalDetector()\n\n # This buffer is only needed if 'decoder' is 'None'\n # we want to trigger errors if data is getting added to\n # our internal buffer for some silly reason while\n # a decoder is discovered.\n self.buffer: typing.Optional[bytearray] = None if self.decoder else bytearray()\n\n def decode(self, data: bytes) -> str:\n try:\n if self.decoder is not None:\n text = self.decoder.decode(data)\n else:\n assert self.buffer is not None\n text = \"\"\n self.detector.feed(data)\n self.buffer += data\n\n # Should be more than enough data to process, we don't\n # want to buffer too long as chardet will wait until\n # detector.close() is used to give back common\n # encodings like 'utf-8'.\n if len(self.buffer) >= 4096:\n self.decoder = codecs.getincrementaldecoder(\n self._detector_result()\n )()\n text = self.decoder.decode(bytes(self.buffer), False)\n self.buffer = None\n\n return text\n except UnicodeDecodeError: # pragma: nocover\n raise DecodingError() from None\n\n def flush(self) -> str:\n try:\n if self.decoder is None:\n # Empty string case as chardet is guaranteed to not have a guess.\n assert self.buffer is not None\n if len(self.buffer) == 0:\n return \"\"\n return bytes(self.buffer).decode(self._detector_result())\n\n return self.decoder.decode(b\"\", True)\n except UnicodeDecodeError: # pragma: nocover\n raise DecodingError() from None\n\n def _detector_result(self) -> str:\n self.detector.close()\n result = self.detector.result[\"encoding\"]\n if not result: # pragma: nocover\n raise DecodingError(\"Unable to determine encoding of content\")\n\n return result\n\n\nclass LineDecoder:\n \"\"\"\n Handles incrementally reading lines from text.\n\n Uses universal line decoding, supporting any of `\\n`, `\\r`, or `\\r\\n`\n as line endings, normalizing to `\\n`.\n \"\"\"\n\n def __init__(self) -> None:\n self.buffer = \"\"\n\n def decode(self, text: str) -> typing.List[str]:\n lines = []\n\n if text.startswith(\"\\n\") and self.buffer and self.buffer[-1] == \"\\r\":\n # Handle the case where we have an \"\\r\\n\" split across\n # our previous input, and our new chunk.\n lines.append(self.buffer[:-1] + \"\\n\")\n self.buffer = \"\"\n text = text[1:]\n\n while text:\n num_chars = len(text)\n for idx in range(num_chars):\n char = text[idx]\n next_char = None if idx + 1 == num_chars else text[idx + 1]\n if char == \"\\n\":\n lines.append(self.buffer + text[: idx + 1])\n self.buffer = \"\"\n text = text[idx + 1 :]\n break\n elif char == \"\\r\" and next_char == \"\\n\":\n lines.append(self.buffer + text[:idx] + \"\\n\")\n self.buffer = \"\"\n text = text[idx + 2 :]\n break\n elif char == \"\\r\" and next_char is not None:\n lines.append(self.buffer + text[:idx] + \"\\n\")\n self.buffer = \"\"\n text = text[idx + 1 :]\n break\n elif next_char is None:\n self.buffer = text\n text = \"\"\n break\n\n return lines\n\n def flush(self) -> typing.List[str]:\n if self.buffer.endswith(\"\\r\"):\n # Handle the case where we had a trailing '\\r', which could have\n # been a '\\r\\n' pair.\n lines = [self.buffer[:-1] + \"\\n\"]\n elif self.buffer:\n lines = [self.buffer]\n else:\n lines = []\n self.buffer = \"\"\n return lines\n\n\nSUPPORTED_DECODERS = {\n \"identity\": IdentityDecoder,\n \"gzip\": GZipDecoder,\n \"deflate\": DeflateDecoder,\n \"br\": BrotliDecoder,\n}\n\n\nif brotli is None:\n SUPPORTED_DECODERS.pop(\"br\") # pragma: nocover\n", "path": "httpx/decoders.py"}], "after_files": [{"content": "\"\"\"\nHandlers for Content-Encoding.\n\nSee: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Encoding\n\"\"\"\nimport codecs\nimport typing\nimport zlib\n\nimport chardet\n\nfrom .exceptions import DecodingError\n\ntry:\n import brotli\nexcept ImportError: # pragma: nocover\n brotli = None\n\n\nclass Decoder:\n def decode(self, data: bytes) -> bytes:\n raise NotImplementedError() # pragma: nocover\n\n def flush(self) -> bytes:\n raise NotImplementedError() # pragma: nocover\n\n\nclass IdentityDecoder(Decoder):\n \"\"\"\n Handle unencoded data.\n \"\"\"\n\n def decode(self, data: bytes) -> bytes:\n return data\n\n def flush(self) -> bytes:\n return b\"\"\n\n\nclass DeflateDecoder(Decoder):\n \"\"\"\n Handle 'deflate' decoding.\n\n See: https://stackoverflow.com/questions/1838699\n \"\"\"\n\n def __init__(self) -> None:\n self.first_attempt = True\n self.decompressor = zlib.decompressobj()\n\n def decode(self, data: bytes) -> bytes:\n was_first_attempt = self.first_attempt\n self.first_attempt = False\n try:\n return self.decompressor.decompress(data)\n except zlib.error as exc:\n if was_first_attempt:\n self.decompressor = zlib.decompressobj(-zlib.MAX_WBITS)\n return self.decode(data)\n raise DecodingError from exc\n\n def flush(self) -> bytes:\n try:\n return self.decompressor.flush()\n except zlib.error as exc: # pragma: nocover\n raise DecodingError from exc\n\n\nclass GZipDecoder(Decoder):\n \"\"\"\n Handle 'gzip' decoding.\n\n See: https://stackoverflow.com/questions/1838699\n \"\"\"\n\n def __init__(self) -> None:\n self.decompressor = zlib.decompressobj(zlib.MAX_WBITS | 16)\n\n def decode(self, data: bytes) -> bytes:\n try:\n return self.decompressor.decompress(data)\n except zlib.error as exc:\n raise DecodingError from exc\n\n def flush(self) -> bytes:\n try:\n return self.decompressor.flush()\n except zlib.error as exc: # pragma: nocover\n raise DecodingError from exc\n\n\nclass BrotliDecoder(Decoder):\n \"\"\"\n Handle 'brotli' decoding.\n\n Requires `pip install brotlipy`. See: https://brotlipy.readthedocs.io/\n or `pip install brotli`. See https://github.com/google/brotli\n Supports both 'brotlipy' and 'Brotli' packages since they share an import\n name. The top branches are for 'brotlipy' and bottom branches for 'Brotli'\n \"\"\"\n\n def __init__(self) -> None:\n assert (\n brotli is not None\n ), \"The 'brotlipy' or 'brotli' library must be installed to use 'BrotliDecoder'\"\n self.decompressor = brotli.Decompressor()\n self.seen_data = False\n\n def decode(self, data: bytes) -> bytes:\n if not data:\n return b\"\"\n self.seen_data = True\n try:\n if hasattr(self.decompressor, \"decompress\"):\n return self.decompressor.decompress(data)\n return self.decompressor.process(data) # pragma: nocover\n except brotli.error as exc:\n raise DecodingError from exc\n\n def flush(self) -> bytes:\n if not self.seen_data:\n return b\"\"\n try:\n if hasattr(self.decompressor, \"finish\"):\n self.decompressor.finish()\n return b\"\"\n except brotli.error as exc: # pragma: nocover\n raise DecodingError from exc\n\n\nclass MultiDecoder(Decoder):\n \"\"\"\n Handle the case where multiple encodings have been applied.\n \"\"\"\n\n def __init__(self, children: typing.Sequence[Decoder]) -> None:\n \"\"\"\n 'children' should be a sequence of decoders in the order in which\n each was applied.\n \"\"\"\n # Note that we reverse the order for decoding.\n self.children = list(reversed(children))\n\n def decode(self, data: bytes) -> bytes:\n for child in self.children:\n data = child.decode(data)\n return data\n\n def flush(self) -> bytes:\n data = b\"\"\n for child in self.children:\n data = child.decode(data) + child.flush()\n return data\n\n\nclass TextDecoder:\n \"\"\"\n Handles incrementally decoding bytes into text\n \"\"\"\n\n def __init__(self, encoding: typing.Optional[str] = None):\n self.decoder: typing.Optional[codecs.IncrementalDecoder] = (\n None if encoding is None else codecs.getincrementaldecoder(encoding)()\n )\n self.detector = chardet.universaldetector.UniversalDetector()\n\n # This buffer is only needed if 'decoder' is 'None'\n # we want to trigger errors if data is getting added to\n # our internal buffer for some silly reason while\n # a decoder is discovered.\n self.buffer: typing.Optional[bytearray] = None if self.decoder else bytearray()\n\n def decode(self, data: bytes) -> str:\n try:\n if self.decoder is not None:\n text = self.decoder.decode(data)\n else:\n assert self.buffer is not None\n text = \"\"\n self.detector.feed(data)\n self.buffer += data\n\n # Should be more than enough data to process, we don't\n # want to buffer too long as chardet will wait until\n # detector.close() is used to give back common\n # encodings like 'utf-8'.\n if len(self.buffer) >= 4096:\n self.decoder = codecs.getincrementaldecoder(\n self._detector_result()\n )()\n text = self.decoder.decode(bytes(self.buffer), False)\n self.buffer = None\n\n return text\n except UnicodeDecodeError: # pragma: nocover\n raise DecodingError() from None\n\n def flush(self) -> str:\n try:\n if self.decoder is None:\n # Empty string case as chardet is guaranteed to not have a guess.\n assert self.buffer is not None\n if len(self.buffer) == 0:\n return \"\"\n return bytes(self.buffer).decode(self._detector_result())\n\n return self.decoder.decode(b\"\", True)\n except UnicodeDecodeError: # pragma: nocover\n raise DecodingError() from None\n\n def _detector_result(self) -> str:\n self.detector.close()\n result = self.detector.result[\"encoding\"]\n if not result: # pragma: nocover\n raise DecodingError(\"Unable to determine encoding of content\")\n\n return result\n\n\nclass LineDecoder:\n \"\"\"\n Handles incrementally reading lines from text.\n\n Uses universal line decoding, supporting any of `\\n`, `\\r`, or `\\r\\n`\n as line endings, normalizing to `\\n`.\n \"\"\"\n\n def __init__(self) -> None:\n self.buffer = \"\"\n\n def decode(self, text: str) -> typing.List[str]:\n lines = []\n\n if text.startswith(\"\\n\") and self.buffer and self.buffer[-1] == \"\\r\":\n # Handle the case where we have an \"\\r\\n\" split across\n # our previous input, and our new chunk.\n lines.append(self.buffer[:-1] + \"\\n\")\n self.buffer = \"\"\n text = text[1:]\n\n while text:\n num_chars = len(text)\n for idx in range(num_chars):\n char = text[idx]\n next_char = None if idx + 1 == num_chars else text[idx + 1]\n if char == \"\\n\":\n lines.append(self.buffer + text[: idx + 1])\n self.buffer = \"\"\n text = text[idx + 1 :]\n break\n elif char == \"\\r\" and next_char == \"\\n\":\n lines.append(self.buffer + text[:idx] + \"\\n\")\n self.buffer = \"\"\n text = text[idx + 2 :]\n break\n elif char == \"\\r\" and next_char is not None:\n lines.append(self.buffer + text[:idx] + \"\\n\")\n self.buffer = \"\"\n text = text[idx + 1 :]\n break\n elif next_char is None:\n self.buffer = text\n text = \"\"\n break\n\n return lines\n\n def flush(self) -> typing.List[str]:\n if self.buffer.endswith(\"\\r\"):\n # Handle the case where we had a trailing '\\r', which could have\n # been a '\\r\\n' pair.\n lines = [self.buffer[:-1] + \"\\n\"]\n elif self.buffer:\n lines = [self.buffer]\n else:\n lines = []\n self.buffer = \"\"\n return lines\n\n\nSUPPORTED_DECODERS = {\n \"identity\": IdentityDecoder,\n \"gzip\": GZipDecoder,\n \"deflate\": DeflateDecoder,\n \"br\": BrotliDecoder,\n}\n\n\nif brotli is None:\n SUPPORTED_DECODERS.pop(\"br\") # pragma: nocover\n", "path": "httpx/decoders.py"}]}
| 3,644 | 200 |
gh_patches_debug_7233
|
rasdani/github-patches
|
git_diff
|
graspologic-org__graspologic-431
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
remove * import in simulations
https://github.com/neurodata/graspy/blob/master/graspy/simulations/__init__.py
should not be using * import here
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `graspy/simulations/__init__.py`
Content:
```
1 # Copyright (c) Microsoft Corporation and contributors.
2 # Licensed under the MIT License.
3
4 from .simulations import *
5 from .simulations_corr import *
6 from .rdpg_corr import *
7
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/graspy/simulations/__init__.py b/graspy/simulations/__init__.py
--- a/graspy/simulations/__init__.py
+++ b/graspy/simulations/__init__.py
@@ -1,6 +1,19 @@
# Copyright (c) Microsoft Corporation and contributors.
# Licensed under the MIT License.
-from .simulations import *
-from .simulations_corr import *
-from .rdpg_corr import *
+from .simulations import sample_edges, er_np, er_nm, sbm, rdpg, p_from_latent
+from .simulations_corr import sample_edges_corr, er_corr, sbm_corr
+from .rdpg_corr import rdpg_corr
+
+__all__ = [
+ "sample_edges",
+ "er_np",
+ "er_nm",
+ "sbm",
+ "rdpg",
+ "p_from_latent",
+ "sample_edges_corr",
+ "er_corr",
+ "sbm_corr",
+ "rdpg_corr",
+]
|
{"golden_diff": "diff --git a/graspy/simulations/__init__.py b/graspy/simulations/__init__.py\n--- a/graspy/simulations/__init__.py\n+++ b/graspy/simulations/__init__.py\n@@ -1,6 +1,19 @@\n # Copyright (c) Microsoft Corporation and contributors.\n # Licensed under the MIT License.\n \n-from .simulations import *\n-from .simulations_corr import *\n-from .rdpg_corr import *\n+from .simulations import sample_edges, er_np, er_nm, sbm, rdpg, p_from_latent\n+from .simulations_corr import sample_edges_corr, er_corr, sbm_corr\n+from .rdpg_corr import rdpg_corr\n+\n+__all__ = [\n+ \"sample_edges\",\n+ \"er_np\",\n+ \"er_nm\",\n+ \"sbm\",\n+ \"rdpg\",\n+ \"p_from_latent\",\n+ \"sample_edges_corr\",\n+ \"er_corr\",\n+ \"sbm_corr\",\n+ \"rdpg_corr\",\n+]\n", "issue": "remove * import in simulations\nhttps://github.com/neurodata/graspy/blob/master/graspy/simulations/__init__.py\r\n\r\nshould not be using * import here\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation and contributors.\n# Licensed under the MIT License.\n\nfrom .simulations import *\nfrom .simulations_corr import *\nfrom .rdpg_corr import *\n", "path": "graspy/simulations/__init__.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation and contributors.\n# Licensed under the MIT License.\n\nfrom .simulations import sample_edges, er_np, er_nm, sbm, rdpg, p_from_latent\nfrom .simulations_corr import sample_edges_corr, er_corr, sbm_corr\nfrom .rdpg_corr import rdpg_corr\n\n__all__ = [\n \"sample_edges\",\n \"er_np\",\n \"er_nm\",\n \"sbm\",\n \"rdpg\",\n \"p_from_latent\",\n \"sample_edges_corr\",\n \"er_corr\",\n \"sbm_corr\",\n \"rdpg_corr\",\n]\n", "path": "graspy/simulations/__init__.py"}]}
| 346 | 230 |
gh_patches_debug_34287
|
rasdani/github-patches
|
git_diff
|
bridgecrewio__checkov-4091
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Details has not been printed
**Describe the issue**
I'm trying to add details to a custom policy, but it has not been printed.
Run: On MacOS and on Github Action [email protected]
Version: 2.2.168
**Additional context**
MANDATORY_TAGS = [
"Application",
"Env",
"Team",
]
class MandatoryTags(BaseResourceCheck):
def __init__(self):
name = "Ensure all resources has madatory tags"
id = "CUSTOM_AWS_001"
supported_resources = ['aws_kms_key']
categories = [CheckCategories.GENERAL_SECURITY]
guideline = "Check page blah blah"
super().__init__(name=name, id=id, categories=categories,
supported_resources=supported_resources, guideline=guideline)
def scan_resource_conf(self, conf):
if 'tags_all' in conf.keys():
resource_tags = list(conf['tags_all'][0].keys())
if not all(tag in resource_tags for tag in MANDATORY_TAGS):
self.details.append("default_tags need to be set on provider level")
return CheckResult.FAILED
return CheckResult.PASSED
check = MandatoryTags()
** output **
` "check_id": "CUSTOM_AWS_001",
"bc_check_id": null,
"check_name": "Ensure all resources has mandatory tags",
"check_result": {
"result": "FAILED",
"evaluated_keys": []
},
"code_block": [...],
"file_path": "/plan_fmt.json",
"file_abs_path": "/private/tmp/platform-checkov-custom-policies/plan_fmt.json",
"repo_file_path": "/plan_fmt.json",
"file_line_range": [...],
"resource": "aws_kms_key.default",
"evaluations": null,
"check_class": "CheckMandatoryTags",
"fixed_definition": null,
"entity_tags": null,
"caller_file_path": null,
"caller_file_line_range": null,
"resource_address": "aws_kms_key.default",
"severity": null,
"bc_category": null,
"benchmarks": null,
"description": null,
"short_description": null,
"vulnerability_details": null,
"connected_node": null,
"guideline": "",
"details": [],
"check_len": null
},`
In the output, it shows the details list as empty.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `checkov/terraform/plan_runner.py`
Content:
```
1 from __future__ import annotations
2
3 import logging
4 import os
5 import platform
6
7 from typing import Type, Optional
8
9 from checkov.common.graph.checks_infra.registry import BaseRegistry
10 from checkov.common.graph.db_connectors.networkx.networkx_db_connector import NetworkxConnector
11 from checkov.terraform.graph_builder.graph_components.block_types import BlockType
12 from checkov.terraform.graph_manager import TerraformGraphManager
13 from checkov.terraform.graph_builder.local_graph import TerraformLocalGraph
14 from checkov.common.checks_infra.registry import get_graph_checks_registry
15 from checkov.common.graph.graph_builder.graph_components.attribute_names import CustomAttributes
16 from checkov.common.output.record import Record
17 from checkov.common.util.secrets import omit_secret_value_from_checks, omit_secret_value_from_definitions
18
19 from checkov.common.bridgecrew.check_type import CheckType
20 from checkov.common.output.report import Report
21 from checkov.common.runners.base_runner import CHECKOV_CREATE_GRAPH
22 from checkov.runner_filter import RunnerFilter
23 from checkov.terraform.checks.resource.registry import resource_registry
24 from checkov.terraform.context_parsers.registry import parser_registry
25 from checkov.terraform.plan_utils import create_definitions, build_definitions_context
26 from checkov.terraform.runner import Runner as TerraformRunner, merge_reports
27 from checkov.terraform.deep_analysis_plan_graph_manager import DeepAnalysisGraphManager
28
29 # set of check IDs with lifecycle condition
30 TF_LIFECYCLE_CHECK_IDS = {
31 "CKV_AWS_217",
32 "CKV_AWS_233",
33 "CKV_AWS_237",
34 "CKV_GCP_82",
35 }
36
37 RESOURCE_ATTRIBUTES_TO_OMIT = {
38 'aws_db_instance': ['password'],
39 'aws_secretsmanager_secret_version': ['secret_string'],
40 'aws_ssm_parameter': ['value'],
41 'azurerm_container_registry': ['admin_password'],
42 'azurerm_key_vault_secret': ['value'],
43 'azurerm_linux_virtual_machine': ['admin_password'],
44 'azurerm_mssql_managed_instance_vulnerability_assessment': ['storage_container_path'],
45 'azurerm_mssql_server': ['administrator_login_password'],
46 'azurerm_mssql_server_vulnerability_assessment': ['storage_container_path'],
47 'azurerm_redis_cache': ['primary_access_key', 'secondary_access_key', 'primary_connection_string',
48 'secondary_connection_string'],
49 'azurerm_sql_server': ['administrator_login_password'],
50 'azurerm_sql_managed_instance': ['administrator_login_password'],
51 'azurerm_storage_account': ['primary_access_key', 'secondary_access_key', 'primary_blob_connection_string',
52 'secondary_blob_connection_string', 'primary_blob_endpoint', 'primary_blob_host',
53 'secondary_blob_endpoint', 'secondary_blob_host', 'primary_connection_string',
54 'secondary_connection_string'],
55 'azurerm_synapse_workspace_vulnerability_assessment': ['storage_container_path'],
56 'azurerm_synapse_sql_pool_vulnerability_assessment': ['storage_container_path'],
57 'azurerm_virtual_machine': ['admin_password'],
58 'azurerm_windows_virtual_machine': ['admin_password'],
59 'google_kms_secret_ciphertext': ['plaintext']
60 }
61
62
63 class Runner(TerraformRunner):
64 check_type = CheckType.TERRAFORM_PLAN # noqa: CCE003 # a static attribute
65
66 def __init__(self, graph_class: Type[TerraformLocalGraph] = TerraformLocalGraph,
67 graph_manager: TerraformGraphManager | None = None,
68 db_connector: NetworkxConnector | None = None,
69 external_registries: list[BaseRegistry] | None = None,
70 source: str = "Terraform") -> None:
71 super().__init__(
72 graph_class=graph_class,
73 graph_manager=graph_manager,
74 db_connector=db_connector or NetworkxConnector(),
75 external_registries=external_registries,
76 source=source,
77 )
78 self.file_extensions = ['.json'] # override what gets set from the TF runner
79 self.definitions = None
80 self.context = None
81 self.graph_registry = get_graph_checks_registry(super().check_type)
82 self.deep_analysis = False
83 self.repo_root_for_plan_enrichment = []
84 self.tf_plan_local_graph = None
85
86 block_type_registries = { # noqa: CCE003 # a static attribute
87 'resource': resource_registry,
88 }
89
90 def run(
91 self,
92 root_folder: str | None = None,
93 external_checks_dir: list[str] | None = None,
94 files: list[str] | None = None,
95 runner_filter: RunnerFilter | None = None,
96 collect_skip_comments: bool = True
97 ) -> Report | list[Report]:
98 runner_filter = runner_filter or RunnerFilter()
99 self.deep_analysis = runner_filter.deep_analysis
100 if runner_filter.repo_root_for_plan_enrichment:
101 self.repo_root_for_plan_enrichment = runner_filter.repo_root_for_plan_enrichment[0]
102 report = Report(self.check_type)
103 parsing_errors: dict[str, str] = {}
104 tf_local_graph: Optional[TerraformLocalGraph] = None
105 if self.definitions is None or self.context is None:
106 self.definitions, definitions_raw = create_definitions(root_folder, files, runner_filter, parsing_errors)
107 self.context = build_definitions_context(self.definitions, definitions_raw)
108 if CHECKOV_CREATE_GRAPH:
109 censored_definitions = omit_secret_value_from_definitions(definitions=self.definitions,
110 resource_attributes_to_omit=RESOURCE_ATTRIBUTES_TO_OMIT)
111 self.tf_plan_local_graph = self.graph_manager.build_graph_from_definitions(censored_definitions, render_variables=False)
112 for vertex in self.tf_plan_local_graph.vertices:
113 if vertex.block_type == BlockType.RESOURCE:
114 report.add_resource(f'{vertex.path}:{vertex.id}')
115 self.graph_manager.save_graph(self.tf_plan_local_graph)
116 if self._should_run_deep_analysis:
117 tf_local_graph = self._create_terraform_graph()
118
119 if external_checks_dir:
120 for directory in external_checks_dir:
121 resource_registry.load_external_checks(directory)
122 self.graph_registry.load_external_checks(directory)
123 if not root_folder:
124 root_folder = os.path.split(os.path.commonprefix(files))[0]
125 self.check_tf_definition(report, root_folder, runner_filter)
126 report.add_parsing_errors(parsing_errors.keys())
127
128 if self.definitions:
129 graph_report = self._get_graph_report(root_folder, runner_filter, tf_local_graph)
130 merge_reports(report, graph_report)
131
132 if runner_filter.run_image_referencer:
133 image_report = self.check_container_image_references(
134 graph_connector=self.graph_manager.get_reader_endpoint(),
135 root_path=root_folder,
136 runner_filter=runner_filter,
137 )
138
139 if image_report:
140 # due too many tests failing only return a list, if there is an image report
141 return [report, image_report]
142
143 return report
144
145 def _get_graph_report(self, root_folder: str, runner_filter: RunnerFilter, tf_local_graph: Optional[TerraformLocalGraph]) -> Report:
146 if self._should_run_deep_analysis and tf_local_graph:
147 deep_analysis_graph_manager = DeepAnalysisGraphManager(tf_local_graph, self.tf_plan_local_graph)
148 deep_analysis_graph_manager.enrich_tf_graph_attributes()
149 self.graph_manager.save_graph(tf_local_graph)
150 graph_report = self.get_graph_checks_report(root_folder, runner_filter)
151 deep_analysis_graph_manager.filter_report(graph_report)
152 return graph_report
153 return self.get_graph_checks_report(root_folder, runner_filter)
154
155 def _create_terraform_graph(self) -> TerraformLocalGraph:
156 graph_manager = TerraformGraphManager(db_connector=NetworkxConnector())
157 tf_local_graph, _ = graph_manager.build_graph_from_source_directory(self.repo_root_for_plan_enrichment,
158 render_variables=True)
159 self.graph_manager = graph_manager
160 return tf_local_graph
161
162 def check_tf_definition(self, report, root_folder, runner_filter, collect_skip_comments=True):
163 for full_file_path, definition in self.definitions.items():
164 if platform.system() == "Windows":
165 temp = os.path.split(full_file_path)[0]
166 scanned_file = f"/{os.path.relpath(full_file_path,temp)}"
167 else:
168 scanned_file = f"/{os.path.relpath(full_file_path, root_folder)}"
169 logging.debug(f"Scanning file: {scanned_file}")
170 for block_type in definition.keys():
171 if block_type in self.block_type_registries.keys():
172 self.run_block(definition[block_type], None, full_file_path, root_folder, report, scanned_file,
173 block_type, runner_filter)
174
175 def run_block(self, entities,
176 definition_context,
177 full_file_path, root_folder, report, scanned_file,
178 block_type, runner_filter=None, entity_context_path_header=None,
179 module_referrer: str | None = None):
180 registry = self.block_type_registries[block_type]
181 if registry:
182 for entity in entities:
183 context_parser = parser_registry.context_parsers[block_type]
184 definition_path = context_parser.get_entity_context_path(entity)
185 entity_id = ".".join(definition_path)
186 # Entity can exist only once per dir, for file as well
187 entity_context = self.get_entity_context(definition_path, full_file_path)
188 entity_lines_range = [entity_context.get('start_line'), entity_context.get('end_line')]
189 entity_code_lines = entity_context.get('code_lines')
190 entity_address = entity_context.get('address')
191 _, _, entity_config = registry.extract_entity_details(entity)
192
193 results = registry.scan(scanned_file, entity, [], runner_filter, report_type=CheckType.TERRAFORM_PLAN)
194 for check, check_result in results.items():
195 if check.id in TF_LIFECYCLE_CHECK_IDS:
196 # can't be evaluated in TF plan
197 continue
198 censored_code_lines = omit_secret_value_from_checks(check=check,
199 check_result=check_result,
200 entity_code_lines=entity_code_lines,
201 entity_config=entity_config,
202 resource_attributes_to_omit=RESOURCE_ATTRIBUTES_TO_OMIT)
203 record = Record(check_id=check.id, bc_check_id=check.bc_id, check_name=check.name,
204 check_result=check_result,
205 code_block=censored_code_lines, file_path=scanned_file,
206 file_line_range=entity_lines_range,
207 resource=entity_id, resource_address=entity_address, evaluations=None,
208 check_class=check.__class__.__module__, file_abs_path=full_file_path,
209 severity=check.severity)
210 record.set_guideline(check.guideline)
211 report.add_record(record=record)
212
213 def get_entity_context_and_evaluations(self, entity):
214 raw_context = self.get_entity_context(entity[CustomAttributes.BLOCK_NAME].split("."),
215 entity[CustomAttributes.FILE_PATH])
216 raw_context['definition_path'] = entity[CustomAttributes.BLOCK_NAME].split('.')
217 return raw_context, None
218
219 def get_entity_context(self, definition_path, full_file_path):
220 entity_id = ".".join(definition_path)
221 return self.context.get(full_file_path, {}).get(entity_id)
222
223 @property
224 def _should_run_deep_analysis(self) -> bool:
225 return self.deep_analysis and self.repo_root_for_plan_enrichment and self.tf_plan_local_graph
226
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/checkov/terraform/plan_runner.py b/checkov/terraform/plan_runner.py
--- a/checkov/terraform/plan_runner.py
+++ b/checkov/terraform/plan_runner.py
@@ -195,18 +195,30 @@
if check.id in TF_LIFECYCLE_CHECK_IDS:
# can't be evaluated in TF plan
continue
- censored_code_lines = omit_secret_value_from_checks(check=check,
- check_result=check_result,
- entity_code_lines=entity_code_lines,
- entity_config=entity_config,
- resource_attributes_to_omit=RESOURCE_ATTRIBUTES_TO_OMIT)
- record = Record(check_id=check.id, bc_check_id=check.bc_id, check_name=check.name,
- check_result=check_result,
- code_block=censored_code_lines, file_path=scanned_file,
- file_line_range=entity_lines_range,
- resource=entity_id, resource_address=entity_address, evaluations=None,
- check_class=check.__class__.__module__, file_abs_path=full_file_path,
- severity=check.severity)
+
+ censored_code_lines = omit_secret_value_from_checks(
+ check=check,
+ check_result=check_result,
+ entity_code_lines=entity_code_lines,
+ entity_config=entity_config,
+ resource_attributes_to_omit=RESOURCE_ATTRIBUTES_TO_OMIT
+ )
+ record = Record(
+ check_id=check.id,
+ bc_check_id=check.bc_id,
+ check_name=check.name,
+ check_result=check_result,
+ code_block=censored_code_lines,
+ file_path=scanned_file,
+ file_line_range=entity_lines_range,
+ resource=entity_id,
+ resource_address=entity_address,
+ evaluations=None,
+ check_class=check.__class__.__module__,
+ file_abs_path=full_file_path,
+ severity=check.severity,
+ details=check.details,
+ )
record.set_guideline(check.guideline)
report.add_record(record=record)
|
{"golden_diff": "diff --git a/checkov/terraform/plan_runner.py b/checkov/terraform/plan_runner.py\n--- a/checkov/terraform/plan_runner.py\n+++ b/checkov/terraform/plan_runner.py\n@@ -195,18 +195,30 @@\n if check.id in TF_LIFECYCLE_CHECK_IDS:\n # can't be evaluated in TF plan\n continue\n- censored_code_lines = omit_secret_value_from_checks(check=check,\n- check_result=check_result,\n- entity_code_lines=entity_code_lines,\n- entity_config=entity_config,\n- resource_attributes_to_omit=RESOURCE_ATTRIBUTES_TO_OMIT)\n- record = Record(check_id=check.id, bc_check_id=check.bc_id, check_name=check.name,\n- check_result=check_result,\n- code_block=censored_code_lines, file_path=scanned_file,\n- file_line_range=entity_lines_range,\n- resource=entity_id, resource_address=entity_address, evaluations=None,\n- check_class=check.__class__.__module__, file_abs_path=full_file_path,\n- severity=check.severity)\n+\n+ censored_code_lines = omit_secret_value_from_checks(\n+ check=check,\n+ check_result=check_result,\n+ entity_code_lines=entity_code_lines,\n+ entity_config=entity_config,\n+ resource_attributes_to_omit=RESOURCE_ATTRIBUTES_TO_OMIT\n+ )\n+ record = Record(\n+ check_id=check.id,\n+ bc_check_id=check.bc_id,\n+ check_name=check.name,\n+ check_result=check_result,\n+ code_block=censored_code_lines,\n+ file_path=scanned_file,\n+ file_line_range=entity_lines_range,\n+ resource=entity_id,\n+ resource_address=entity_address,\n+ evaluations=None,\n+ check_class=check.__class__.__module__,\n+ file_abs_path=full_file_path,\n+ severity=check.severity,\n+ details=check.details,\n+ )\n record.set_guideline(check.guideline)\n report.add_record(record=record)\n", "issue": "Details has not been printed\n**Describe the issue**\r\nI'm trying to add details to a custom policy, but it has not been printed.\r\nRun: On MacOS and on Github Action [email protected]\r\nVersion: 2.2.168\r\n**Additional context**\r\n\r\n MANDATORY_TAGS = [\r\n \"Application\",\r\n\t \"Env\",\r\n\t \"Team\",\r\n ]\r\n \r\n \r\n class MandatoryTags(BaseResourceCheck):\r\n\r\n def __init__(self):\r\n name = \"Ensure all resources has madatory tags\"\r\n id = \"CUSTOM_AWS_001\"\r\n supported_resources = ['aws_kms_key']\r\n categories = [CheckCategories.GENERAL_SECURITY]\r\n guideline = \"Check page blah blah\"\r\n super().__init__(name=name, id=id, categories=categories,\r\n supported_resources=supported_resources, guideline=guideline)\r\n \r\n def scan_resource_conf(self, conf):\r\n if 'tags_all' in conf.keys():\r\n resource_tags = list(conf['tags_all'][0].keys())\r\n if not all(tag in resource_tags for tag in MANDATORY_TAGS):\r\n self.details.append(\"default_tags need to be set on provider level\")\r\n return CheckResult.FAILED\r\n return CheckResult.PASSED\r\n \r\n \r\n \r\n check = MandatoryTags()\r\n \r\n ** output **\r\n ` \"check_id\": \"CUSTOM_AWS_001\",\r\n \"bc_check_id\": null,\r\n \"check_name\": \"Ensure all resources has mandatory tags\",\r\n \"check_result\": {\r\n \"result\": \"FAILED\",\r\n \"evaluated_keys\": []\r\n },\r\n \"code_block\": [...],\r\n \"file_path\": \"/plan_fmt.json\",\r\n \"file_abs_path\": \"/private/tmp/platform-checkov-custom-policies/plan_fmt.json\",\r\n \"repo_file_path\": \"/plan_fmt.json\",\r\n \"file_line_range\": [...],\r\n \"resource\": \"aws_kms_key.default\",\r\n \"evaluations\": null,\r\n \"check_class\": \"CheckMandatoryTags\",\r\n \"fixed_definition\": null,\r\n \"entity_tags\": null,\r\n \"caller_file_path\": null,\r\n \"caller_file_line_range\": null,\r\n \"resource_address\": \"aws_kms_key.default\",\r\n \"severity\": null,\r\n \"bc_category\": null,\r\n \"benchmarks\": null,\r\n \"description\": null,\r\n \"short_description\": null,\r\n \"vulnerability_details\": null,\r\n \"connected_node\": null,\r\n \"guideline\": \"\",\r\n \"details\": [],\r\n \"check_len\": null\r\n },`\r\n\r\nIn the output, it shows the details list as empty.\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nimport logging\nimport os\nimport platform\n\nfrom typing import Type, Optional\n\nfrom checkov.common.graph.checks_infra.registry import BaseRegistry\nfrom checkov.common.graph.db_connectors.networkx.networkx_db_connector import NetworkxConnector\nfrom checkov.terraform.graph_builder.graph_components.block_types import BlockType\nfrom checkov.terraform.graph_manager import TerraformGraphManager\nfrom checkov.terraform.graph_builder.local_graph import TerraformLocalGraph\nfrom checkov.common.checks_infra.registry import get_graph_checks_registry\nfrom checkov.common.graph.graph_builder.graph_components.attribute_names import CustomAttributes\nfrom checkov.common.output.record import Record\nfrom checkov.common.util.secrets import omit_secret_value_from_checks, omit_secret_value_from_definitions\n\nfrom checkov.common.bridgecrew.check_type import CheckType\nfrom checkov.common.output.report import Report\nfrom checkov.common.runners.base_runner import CHECKOV_CREATE_GRAPH\nfrom checkov.runner_filter import RunnerFilter\nfrom checkov.terraform.checks.resource.registry import resource_registry\nfrom checkov.terraform.context_parsers.registry import parser_registry\nfrom checkov.terraform.plan_utils import create_definitions, build_definitions_context\nfrom checkov.terraform.runner import Runner as TerraformRunner, merge_reports\nfrom checkov.terraform.deep_analysis_plan_graph_manager import DeepAnalysisGraphManager\n\n# set of check IDs with lifecycle condition\nTF_LIFECYCLE_CHECK_IDS = {\n \"CKV_AWS_217\",\n \"CKV_AWS_233\",\n \"CKV_AWS_237\",\n \"CKV_GCP_82\",\n}\n\nRESOURCE_ATTRIBUTES_TO_OMIT = {\n 'aws_db_instance': ['password'],\n 'aws_secretsmanager_secret_version': ['secret_string'],\n 'aws_ssm_parameter': ['value'],\n 'azurerm_container_registry': ['admin_password'],\n 'azurerm_key_vault_secret': ['value'],\n 'azurerm_linux_virtual_machine': ['admin_password'],\n 'azurerm_mssql_managed_instance_vulnerability_assessment': ['storage_container_path'],\n 'azurerm_mssql_server': ['administrator_login_password'],\n 'azurerm_mssql_server_vulnerability_assessment': ['storage_container_path'],\n 'azurerm_redis_cache': ['primary_access_key', 'secondary_access_key', 'primary_connection_string',\n 'secondary_connection_string'],\n 'azurerm_sql_server': ['administrator_login_password'],\n 'azurerm_sql_managed_instance': ['administrator_login_password'],\n 'azurerm_storage_account': ['primary_access_key', 'secondary_access_key', 'primary_blob_connection_string',\n 'secondary_blob_connection_string', 'primary_blob_endpoint', 'primary_blob_host',\n 'secondary_blob_endpoint', 'secondary_blob_host', 'primary_connection_string',\n 'secondary_connection_string'],\n 'azurerm_synapse_workspace_vulnerability_assessment': ['storage_container_path'],\n 'azurerm_synapse_sql_pool_vulnerability_assessment': ['storage_container_path'],\n 'azurerm_virtual_machine': ['admin_password'],\n 'azurerm_windows_virtual_machine': ['admin_password'],\n 'google_kms_secret_ciphertext': ['plaintext']\n}\n\n\nclass Runner(TerraformRunner):\n check_type = CheckType.TERRAFORM_PLAN # noqa: CCE003 # a static attribute\n\n def __init__(self, graph_class: Type[TerraformLocalGraph] = TerraformLocalGraph,\n graph_manager: TerraformGraphManager | None = None,\n db_connector: NetworkxConnector | None = None,\n external_registries: list[BaseRegistry] | None = None,\n source: str = \"Terraform\") -> None:\n super().__init__(\n graph_class=graph_class,\n graph_manager=graph_manager,\n db_connector=db_connector or NetworkxConnector(),\n external_registries=external_registries,\n source=source,\n )\n self.file_extensions = ['.json'] # override what gets set from the TF runner\n self.definitions = None\n self.context = None\n self.graph_registry = get_graph_checks_registry(super().check_type)\n self.deep_analysis = False\n self.repo_root_for_plan_enrichment = []\n self.tf_plan_local_graph = None\n\n block_type_registries = { # noqa: CCE003 # a static attribute\n 'resource': resource_registry,\n }\n\n def run(\n self,\n root_folder: str | None = None,\n external_checks_dir: list[str] | None = None,\n files: list[str] | None = None,\n runner_filter: RunnerFilter | None = None,\n collect_skip_comments: bool = True\n ) -> Report | list[Report]:\n runner_filter = runner_filter or RunnerFilter()\n self.deep_analysis = runner_filter.deep_analysis\n if runner_filter.repo_root_for_plan_enrichment:\n self.repo_root_for_plan_enrichment = runner_filter.repo_root_for_plan_enrichment[0]\n report = Report(self.check_type)\n parsing_errors: dict[str, str] = {}\n tf_local_graph: Optional[TerraformLocalGraph] = None\n if self.definitions is None or self.context is None:\n self.definitions, definitions_raw = create_definitions(root_folder, files, runner_filter, parsing_errors)\n self.context = build_definitions_context(self.definitions, definitions_raw)\n if CHECKOV_CREATE_GRAPH:\n censored_definitions = omit_secret_value_from_definitions(definitions=self.definitions,\n resource_attributes_to_omit=RESOURCE_ATTRIBUTES_TO_OMIT)\n self.tf_plan_local_graph = self.graph_manager.build_graph_from_definitions(censored_definitions, render_variables=False)\n for vertex in self.tf_plan_local_graph.vertices:\n if vertex.block_type == BlockType.RESOURCE:\n report.add_resource(f'{vertex.path}:{vertex.id}')\n self.graph_manager.save_graph(self.tf_plan_local_graph)\n if self._should_run_deep_analysis:\n tf_local_graph = self._create_terraform_graph()\n\n if external_checks_dir:\n for directory in external_checks_dir:\n resource_registry.load_external_checks(directory)\n self.graph_registry.load_external_checks(directory)\n if not root_folder:\n root_folder = os.path.split(os.path.commonprefix(files))[0]\n self.check_tf_definition(report, root_folder, runner_filter)\n report.add_parsing_errors(parsing_errors.keys())\n\n if self.definitions:\n graph_report = self._get_graph_report(root_folder, runner_filter, tf_local_graph)\n merge_reports(report, graph_report)\n\n if runner_filter.run_image_referencer:\n image_report = self.check_container_image_references(\n graph_connector=self.graph_manager.get_reader_endpoint(),\n root_path=root_folder,\n runner_filter=runner_filter,\n )\n\n if image_report:\n # due too many tests failing only return a list, if there is an image report\n return [report, image_report]\n\n return report\n\n def _get_graph_report(self, root_folder: str, runner_filter: RunnerFilter, tf_local_graph: Optional[TerraformLocalGraph]) -> Report:\n if self._should_run_deep_analysis and tf_local_graph:\n deep_analysis_graph_manager = DeepAnalysisGraphManager(tf_local_graph, self.tf_plan_local_graph)\n deep_analysis_graph_manager.enrich_tf_graph_attributes()\n self.graph_manager.save_graph(tf_local_graph)\n graph_report = self.get_graph_checks_report(root_folder, runner_filter)\n deep_analysis_graph_manager.filter_report(graph_report)\n return graph_report\n return self.get_graph_checks_report(root_folder, runner_filter)\n\n def _create_terraform_graph(self) -> TerraformLocalGraph:\n graph_manager = TerraformGraphManager(db_connector=NetworkxConnector())\n tf_local_graph, _ = graph_manager.build_graph_from_source_directory(self.repo_root_for_plan_enrichment,\n render_variables=True)\n self.graph_manager = graph_manager\n return tf_local_graph\n\n def check_tf_definition(self, report, root_folder, runner_filter, collect_skip_comments=True):\n for full_file_path, definition in self.definitions.items():\n if platform.system() == \"Windows\":\n temp = os.path.split(full_file_path)[0]\n scanned_file = f\"/{os.path.relpath(full_file_path,temp)}\"\n else:\n scanned_file = f\"/{os.path.relpath(full_file_path, root_folder)}\"\n logging.debug(f\"Scanning file: {scanned_file}\")\n for block_type in definition.keys():\n if block_type in self.block_type_registries.keys():\n self.run_block(definition[block_type], None, full_file_path, root_folder, report, scanned_file,\n block_type, runner_filter)\n\n def run_block(self, entities,\n definition_context,\n full_file_path, root_folder, report, scanned_file,\n block_type, runner_filter=None, entity_context_path_header=None,\n module_referrer: str | None = None):\n registry = self.block_type_registries[block_type]\n if registry:\n for entity in entities:\n context_parser = parser_registry.context_parsers[block_type]\n definition_path = context_parser.get_entity_context_path(entity)\n entity_id = \".\".join(definition_path)\n # Entity can exist only once per dir, for file as well\n entity_context = self.get_entity_context(definition_path, full_file_path)\n entity_lines_range = [entity_context.get('start_line'), entity_context.get('end_line')]\n entity_code_lines = entity_context.get('code_lines')\n entity_address = entity_context.get('address')\n _, _, entity_config = registry.extract_entity_details(entity)\n\n results = registry.scan(scanned_file, entity, [], runner_filter, report_type=CheckType.TERRAFORM_PLAN)\n for check, check_result in results.items():\n if check.id in TF_LIFECYCLE_CHECK_IDS:\n # can't be evaluated in TF plan\n continue\n censored_code_lines = omit_secret_value_from_checks(check=check,\n check_result=check_result,\n entity_code_lines=entity_code_lines,\n entity_config=entity_config,\n resource_attributes_to_omit=RESOURCE_ATTRIBUTES_TO_OMIT)\n record = Record(check_id=check.id, bc_check_id=check.bc_id, check_name=check.name,\n check_result=check_result,\n code_block=censored_code_lines, file_path=scanned_file,\n file_line_range=entity_lines_range,\n resource=entity_id, resource_address=entity_address, evaluations=None,\n check_class=check.__class__.__module__, file_abs_path=full_file_path,\n severity=check.severity)\n record.set_guideline(check.guideline)\n report.add_record(record=record)\n\n def get_entity_context_and_evaluations(self, entity):\n raw_context = self.get_entity_context(entity[CustomAttributes.BLOCK_NAME].split(\".\"),\n entity[CustomAttributes.FILE_PATH])\n raw_context['definition_path'] = entity[CustomAttributes.BLOCK_NAME].split('.')\n return raw_context, None\n\n def get_entity_context(self, definition_path, full_file_path):\n entity_id = \".\".join(definition_path)\n return self.context.get(full_file_path, {}).get(entity_id)\n\n @property\n def _should_run_deep_analysis(self) -> bool:\n return self.deep_analysis and self.repo_root_for_plan_enrichment and self.tf_plan_local_graph\n", "path": "checkov/terraform/plan_runner.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport logging\nimport os\nimport platform\n\nfrom typing import Type, Optional\n\nfrom checkov.common.graph.checks_infra.registry import BaseRegistry\nfrom checkov.common.graph.db_connectors.networkx.networkx_db_connector import NetworkxConnector\nfrom checkov.terraform.graph_builder.graph_components.block_types import BlockType\nfrom checkov.terraform.graph_manager import TerraformGraphManager\nfrom checkov.terraform.graph_builder.local_graph import TerraformLocalGraph\nfrom checkov.common.checks_infra.registry import get_graph_checks_registry\nfrom checkov.common.graph.graph_builder.graph_components.attribute_names import CustomAttributes\nfrom checkov.common.output.record import Record\nfrom checkov.common.util.secrets import omit_secret_value_from_checks, omit_secret_value_from_definitions\n\nfrom checkov.common.bridgecrew.check_type import CheckType\nfrom checkov.common.output.report import Report\nfrom checkov.common.runners.base_runner import CHECKOV_CREATE_GRAPH\nfrom checkov.runner_filter import RunnerFilter\nfrom checkov.terraform.checks.resource.registry import resource_registry\nfrom checkov.terraform.context_parsers.registry import parser_registry\nfrom checkov.terraform.plan_utils import create_definitions, build_definitions_context\nfrom checkov.terraform.runner import Runner as TerraformRunner, merge_reports\nfrom checkov.terraform.deep_analysis_plan_graph_manager import DeepAnalysisGraphManager\n\n# set of check IDs with lifecycle condition\nTF_LIFECYCLE_CHECK_IDS = {\n \"CKV_AWS_217\",\n \"CKV_AWS_233\",\n \"CKV_AWS_237\",\n \"CKV_GCP_82\",\n}\n\nRESOURCE_ATTRIBUTES_TO_OMIT = {\n 'aws_db_instance': ['password'],\n 'aws_secretsmanager_secret_version': ['secret_string'],\n 'aws_ssm_parameter': ['value'],\n 'azurerm_container_registry': ['admin_password'],\n 'azurerm_key_vault_secret': ['value'],\n 'azurerm_linux_virtual_machine': ['admin_password'],\n 'azurerm_mssql_managed_instance_vulnerability_assessment': ['storage_container_path'],\n 'azurerm_mssql_server': ['administrator_login_password'],\n 'azurerm_mssql_server_vulnerability_assessment': ['storage_container_path'],\n 'azurerm_redis_cache': ['primary_access_key', 'secondary_access_key', 'primary_connection_string',\n 'secondary_connection_string'],\n 'azurerm_sql_server': ['administrator_login_password'],\n 'azurerm_sql_managed_instance': ['administrator_login_password'],\n 'azurerm_storage_account': ['primary_access_key', 'secondary_access_key', 'primary_blob_connection_string',\n 'secondary_blob_connection_string', 'primary_blob_endpoint', 'primary_blob_host',\n 'secondary_blob_endpoint', 'secondary_blob_host', 'primary_connection_string',\n 'secondary_connection_string'],\n 'azurerm_synapse_workspace_vulnerability_assessment': ['storage_container_path'],\n 'azurerm_synapse_sql_pool_vulnerability_assessment': ['storage_container_path'],\n 'azurerm_virtual_machine': ['admin_password'],\n 'azurerm_windows_virtual_machine': ['admin_password'],\n 'google_kms_secret_ciphertext': ['plaintext']\n}\n\n\nclass Runner(TerraformRunner):\n check_type = CheckType.TERRAFORM_PLAN # noqa: CCE003 # a static attribute\n\n def __init__(self, graph_class: Type[TerraformLocalGraph] = TerraformLocalGraph,\n graph_manager: TerraformGraphManager | None = None,\n db_connector: NetworkxConnector | None = None,\n external_registries: list[BaseRegistry] | None = None,\n source: str = \"Terraform\") -> None:\n super().__init__(\n graph_class=graph_class,\n graph_manager=graph_manager,\n db_connector=db_connector or NetworkxConnector(),\n external_registries=external_registries,\n source=source,\n )\n self.file_extensions = ['.json'] # override what gets set from the TF runner\n self.definitions = None\n self.context = None\n self.graph_registry = get_graph_checks_registry(super().check_type)\n self.deep_analysis = False\n self.repo_root_for_plan_enrichment = []\n self.tf_plan_local_graph = None\n\n block_type_registries = { # noqa: CCE003 # a static attribute\n 'resource': resource_registry,\n }\n\n def run(\n self,\n root_folder: str | None = None,\n external_checks_dir: list[str] | None = None,\n files: list[str] | None = None,\n runner_filter: RunnerFilter | None = None,\n collect_skip_comments: bool = True\n ) -> Report | list[Report]:\n runner_filter = runner_filter or RunnerFilter()\n self.deep_analysis = runner_filter.deep_analysis\n if runner_filter.repo_root_for_plan_enrichment:\n self.repo_root_for_plan_enrichment = runner_filter.repo_root_for_plan_enrichment[0]\n report = Report(self.check_type)\n parsing_errors: dict[str, str] = {}\n tf_local_graph: Optional[TerraformLocalGraph] = None\n if self.definitions is None or self.context is None:\n self.definitions, definitions_raw = create_definitions(root_folder, files, runner_filter, parsing_errors)\n self.context = build_definitions_context(self.definitions, definitions_raw)\n if CHECKOV_CREATE_GRAPH:\n censored_definitions = omit_secret_value_from_definitions(definitions=self.definitions,\n resource_attributes_to_omit=RESOURCE_ATTRIBUTES_TO_OMIT)\n self.tf_plan_local_graph = self.graph_manager.build_graph_from_definitions(censored_definitions, render_variables=False)\n for vertex in self.tf_plan_local_graph.vertices:\n if vertex.block_type == BlockType.RESOURCE:\n report.add_resource(f'{vertex.path}:{vertex.id}')\n self.graph_manager.save_graph(self.tf_plan_local_graph)\n if self._should_run_deep_analysis:\n tf_local_graph = self._create_terraform_graph()\n\n if external_checks_dir:\n for directory in external_checks_dir:\n resource_registry.load_external_checks(directory)\n self.graph_registry.load_external_checks(directory)\n if not root_folder:\n root_folder = os.path.split(os.path.commonprefix(files))[0]\n self.check_tf_definition(report, root_folder, runner_filter)\n report.add_parsing_errors(parsing_errors.keys())\n\n if self.definitions:\n graph_report = self._get_graph_report(root_folder, runner_filter, tf_local_graph)\n merge_reports(report, graph_report)\n\n if runner_filter.run_image_referencer:\n image_report = self.check_container_image_references(\n graph_connector=self.graph_manager.get_reader_endpoint(),\n root_path=root_folder,\n runner_filter=runner_filter,\n )\n\n if image_report:\n # due too many tests failing only return a list, if there is an image report\n return [report, image_report]\n\n return report\n\n def _get_graph_report(self, root_folder: str, runner_filter: RunnerFilter, tf_local_graph: Optional[TerraformLocalGraph]) -> Report:\n if self._should_run_deep_analysis and tf_local_graph:\n deep_analysis_graph_manager = DeepAnalysisGraphManager(tf_local_graph, self.tf_plan_local_graph)\n deep_analysis_graph_manager.enrich_tf_graph_attributes()\n self.graph_manager.save_graph(tf_local_graph)\n graph_report = self.get_graph_checks_report(root_folder, runner_filter)\n deep_analysis_graph_manager.filter_report(graph_report)\n return graph_report\n return self.get_graph_checks_report(root_folder, runner_filter)\n\n def _create_terraform_graph(self) -> TerraformLocalGraph:\n graph_manager = TerraformGraphManager(db_connector=NetworkxConnector())\n tf_local_graph, _ = graph_manager.build_graph_from_source_directory(self.repo_root_for_plan_enrichment,\n render_variables=True)\n self.graph_manager = graph_manager\n return tf_local_graph\n\n def check_tf_definition(self, report, root_folder, runner_filter, collect_skip_comments=True):\n for full_file_path, definition in self.definitions.items():\n if platform.system() == \"Windows\":\n temp = os.path.split(full_file_path)[0]\n scanned_file = f\"/{os.path.relpath(full_file_path,temp)}\"\n else:\n scanned_file = f\"/{os.path.relpath(full_file_path, root_folder)}\"\n logging.debug(f\"Scanning file: {scanned_file}\")\n for block_type in definition.keys():\n if block_type in self.block_type_registries.keys():\n self.run_block(definition[block_type], None, full_file_path, root_folder, report, scanned_file,\n block_type, runner_filter)\n\n def run_block(self, entities,\n definition_context,\n full_file_path, root_folder, report, scanned_file,\n block_type, runner_filter=None, entity_context_path_header=None,\n module_referrer: str | None = None):\n registry = self.block_type_registries[block_type]\n if registry:\n for entity in entities:\n context_parser = parser_registry.context_parsers[block_type]\n definition_path = context_parser.get_entity_context_path(entity)\n entity_id = \".\".join(definition_path)\n # Entity can exist only once per dir, for file as well\n entity_context = self.get_entity_context(definition_path, full_file_path)\n entity_lines_range = [entity_context.get('start_line'), entity_context.get('end_line')]\n entity_code_lines = entity_context.get('code_lines')\n entity_address = entity_context.get('address')\n _, _, entity_config = registry.extract_entity_details(entity)\n\n results = registry.scan(scanned_file, entity, [], runner_filter, report_type=CheckType.TERRAFORM_PLAN)\n for check, check_result in results.items():\n if check.id in TF_LIFECYCLE_CHECK_IDS:\n # can't be evaluated in TF plan\n continue\n\n censored_code_lines = omit_secret_value_from_checks(\n check=check,\n check_result=check_result,\n entity_code_lines=entity_code_lines,\n entity_config=entity_config,\n resource_attributes_to_omit=RESOURCE_ATTRIBUTES_TO_OMIT\n )\n record = Record(\n check_id=check.id,\n bc_check_id=check.bc_id,\n check_name=check.name,\n check_result=check_result,\n code_block=censored_code_lines,\n file_path=scanned_file,\n file_line_range=entity_lines_range,\n resource=entity_id,\n resource_address=entity_address,\n evaluations=None,\n check_class=check.__class__.__module__,\n file_abs_path=full_file_path,\n severity=check.severity,\n details=check.details,\n )\n record.set_guideline(check.guideline)\n report.add_record(record=record)\n\n def get_entity_context_and_evaluations(self, entity):\n raw_context = self.get_entity_context(entity[CustomAttributes.BLOCK_NAME].split(\".\"),\n entity[CustomAttributes.FILE_PATH])\n raw_context['definition_path'] = entity[CustomAttributes.BLOCK_NAME].split('.')\n return raw_context, None\n\n def get_entity_context(self, definition_path, full_file_path):\n entity_id = \".\".join(definition_path)\n return self.context.get(full_file_path, {}).get(entity_id)\n\n @property\n def _should_run_deep_analysis(self) -> bool:\n return self.deep_analysis and self.repo_root_for_plan_enrichment and self.tf_plan_local_graph\n", "path": "checkov/terraform/plan_runner.py"}]}
| 3,768 | 450 |
gh_patches_debug_62674
|
rasdani/github-patches
|
git_diff
|
oppia__oppia-1713
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add an OutputContains rule to the CodeRepl interaction.
We've had a request to add an OutputContains rule to the CodeRepl interaction.
The use case is as follows: the student will type in the body of a function, and their code will be checked by calling the function on several inputs and printing the results. We don't want to stop the student from printing their own stuff from the function first, though, hence the idea of checking to see whether a substring of the student's output matches the expected output.
Note that this is a straightforward starter project. The files to modify are extensions/interactions/CodeRepl/CodeRepl.js (see codeReplRulesService) and the corresponding test suite in extensions/interactions/CodeRepl/CodeReplRulesServiceSpec.js.
/cc @anuzis
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `extensions/rules/code_evaluation.py`
Content:
```
1 # coding: utf-8
2 #
3 # Copyright 2014 The Oppia Authors. All Rights Reserved.
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, softwar
12 # distributed under the License is distributed on an "AS-IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
16
17 """Rules for CodeEvaluation objects."""
18
19 from extensions.rules import base
20
21
22 class CodeEquals(base.CodeEvaluationRule):
23 description = 'has code equal to {{x|CodeString}}'
24
25
26 class CodeContains(base.CodeEvaluationRule):
27 description = 'has code that contains {{x|CodeString}}'
28
29
30 class CodeDoesNotContain(base.CodeEvaluationRule):
31 description = 'has code that does not contain {{x|CodeString}}'
32
33
34 class OutputEquals(base.CodeEvaluationRule):
35 description = 'has output equal to {{x|CodeString}}'
36
37
38 class ResultsInError(base.CodeEvaluationRule):
39 description = 'results in an error when run'
40
41
42 class ErrorContains(base.CodeEvaluationRule):
43 description = (
44 'has error message that contains {{x|UnicodeString}}')
45
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/extensions/rules/code_evaluation.py b/extensions/rules/code_evaluation.py
--- a/extensions/rules/code_evaluation.py
+++ b/extensions/rules/code_evaluation.py
@@ -30,6 +30,8 @@
class CodeDoesNotContain(base.CodeEvaluationRule):
description = 'has code that does not contain {{x|CodeString}}'
+class OutputContains(base.CodeEvaluationRule):
+ description = 'has output that contains {{x|CodeString}}'
class OutputEquals(base.CodeEvaluationRule):
description = 'has output equal to {{x|CodeString}}'
|
{"golden_diff": "diff --git a/extensions/rules/code_evaluation.py b/extensions/rules/code_evaluation.py\n--- a/extensions/rules/code_evaluation.py\n+++ b/extensions/rules/code_evaluation.py\n@@ -30,6 +30,8 @@\n class CodeDoesNotContain(base.CodeEvaluationRule):\n description = 'has code that does not contain {{x|CodeString}}'\n \n+class OutputContains(base.CodeEvaluationRule):\n+ description = 'has output that contains {{x|CodeString}}'\n \n class OutputEquals(base.CodeEvaluationRule):\n description = 'has output equal to {{x|CodeString}}'\n", "issue": "Add an OutputContains rule to the CodeRepl interaction.\nWe've had a request to add an OutputContains rule to the CodeRepl interaction.\n\nThe use case is as follows: the student will type in the body of a function, and their code will be checked by calling the function on several inputs and printing the results. We don't want to stop the student from printing their own stuff from the function first, though, hence the idea of checking to see whether a substring of the student's output matches the expected output.\n\nNote that this is a straightforward starter project. The files to modify are extensions/interactions/CodeRepl/CodeRepl.js (see codeReplRulesService) and the corresponding test suite in extensions/interactions/CodeRepl/CodeReplRulesServiceSpec.js.\n\n/cc @anuzis \n\n", "before_files": [{"content": "# coding: utf-8\n#\n# Copyright 2014 The Oppia Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, softwar\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Rules for CodeEvaluation objects.\"\"\"\n\nfrom extensions.rules import base\n\n\nclass CodeEquals(base.CodeEvaluationRule):\n description = 'has code equal to {{x|CodeString}}'\n\n\nclass CodeContains(base.CodeEvaluationRule):\n description = 'has code that contains {{x|CodeString}}'\n\n\nclass CodeDoesNotContain(base.CodeEvaluationRule):\n description = 'has code that does not contain {{x|CodeString}}'\n\n\nclass OutputEquals(base.CodeEvaluationRule):\n description = 'has output equal to {{x|CodeString}}'\n\n\nclass ResultsInError(base.CodeEvaluationRule):\n description = 'results in an error when run'\n\n\nclass ErrorContains(base.CodeEvaluationRule):\n description = (\n 'has error message that contains {{x|UnicodeString}}')\n", "path": "extensions/rules/code_evaluation.py"}], "after_files": [{"content": "# coding: utf-8\n#\n# Copyright 2014 The Oppia Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, softwar\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Rules for CodeEvaluation objects.\"\"\"\n\nfrom extensions.rules import base\n\n\nclass CodeEquals(base.CodeEvaluationRule):\n description = 'has code equal to {{x|CodeString}}'\n\n\nclass CodeContains(base.CodeEvaluationRule):\n description = 'has code that contains {{x|CodeString}}'\n\n\nclass CodeDoesNotContain(base.CodeEvaluationRule):\n description = 'has code that does not contain {{x|CodeString}}'\n\nclass OutputContains(base.CodeEvaluationRule):\n description = 'has output that contains {{x|CodeString}}'\n\nclass OutputEquals(base.CodeEvaluationRule):\n description = 'has output equal to {{x|CodeString}}'\n\n\nclass ResultsInError(base.CodeEvaluationRule):\n description = 'results in an error when run'\n\n\nclass ErrorContains(base.CodeEvaluationRule):\n description = (\n 'has error message that contains {{x|UnicodeString}}')\n", "path": "extensions/rules/code_evaluation.py"}]}
| 824 | 121 |
gh_patches_debug_22767
|
rasdani/github-patches
|
git_diff
|
alltheplaces__alltheplaces-4224
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Pizza Hut Spider returns some closed outlets
It looks like the GB Pizza Hut spider "pizza_hut_gb" is returning a number of outlets that have closed. These are evident when the website either redirects to https://www.pizzahut.co.uk/restaurants/find or https://www.pizzahut.co.uk/restaurants/error/filenotfound . It seems that Pizza Hut are leaving up the https://www.pizzahut.co.uk/huts/uk-2/... web page after the outlet has closed, presumably for SEO reasons. These pages still contain the old location and web address, which the spider then picks up.
Examples include https://www.pizzahut.co.uk/huts/uk-2/437-ayr/ and https://www.pizzahut.co.uk/huts/uk-2/390-barrow/ .
I think these closed outlets can probably be removed from the dataset returned by looking at the openingHours LD field on the /huts/uk-2/ pages. The closed outlets seem to always have "openingHours":[]. The open branches have some sensible content there.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `locations/spiders/pizza_hut_gb.py`
Content:
```
1 from scrapy.spiders import SitemapSpider
2
3 from locations.spiders.vapestore_gb import clean_address
4 from locations.structured_data_spider import StructuredDataSpider
5
6
7 class PizzaHutGB(SitemapSpider, StructuredDataSpider):
8 name = "pizza_hut_gb"
9 item_attributes = {"brand": "Pizza Hut", "brand_wikidata": "Q191615"}
10 sitemap_urls = ["https://www.pizzahut.co.uk/sitemap.xml"]
11 sitemap_rules = [
12 (r"https:\/\/www\.pizzahut\.co\.uk\/huts\/[-\w]+\/([-.\w]+)\/$", "parse_sd")
13 ]
14 wanted_types = ["FastFoodRestaurant"]
15
16 def inspect_item(self, item, response):
17 item["street_address"] = clean_address(item["street_address"])
18
19 if item["website"].startswith("https://www.pizzahut.co.uk/huts/"):
20 item["brand"] = "Pizza Hut Delivery"
21 item["brand_wikidata"] = "Q107293079"
22
23 yield item
24
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/locations/spiders/pizza_hut_gb.py b/locations/spiders/pizza_hut_gb.py
--- a/locations/spiders/pizza_hut_gb.py
+++ b/locations/spiders/pizza_hut_gb.py
@@ -7,17 +7,19 @@
class PizzaHutGB(SitemapSpider, StructuredDataSpider):
name = "pizza_hut_gb"
item_attributes = {"brand": "Pizza Hut", "brand_wikidata": "Q191615"}
+ PIZZA_HUT_DELIVERY = {"brand": "Pizza Hut Delivery", "brand_wikidata": "Q107293079"}
sitemap_urls = ["https://www.pizzahut.co.uk/sitemap.xml"]
sitemap_rules = [
(r"https:\/\/www\.pizzahut\.co\.uk\/huts\/[-\w]+\/([-.\w]+)\/$", "parse_sd")
]
- wanted_types = ["FastFoodRestaurant"]
- def inspect_item(self, item, response):
+ def post_process_item(self, item, response, ld_data, **kwargs):
item["street_address"] = clean_address(item["street_address"])
if item["website"].startswith("https://www.pizzahut.co.uk/huts/"):
- item["brand"] = "Pizza Hut Delivery"
- item["brand_wikidata"] = "Q107293079"
+ item.update(self.PIZZA_HUT_DELIVERY)
+
+ if not item["opening_hours"]:
+ return
yield item
|
{"golden_diff": "diff --git a/locations/spiders/pizza_hut_gb.py b/locations/spiders/pizza_hut_gb.py\n--- a/locations/spiders/pizza_hut_gb.py\n+++ b/locations/spiders/pizza_hut_gb.py\n@@ -7,17 +7,19 @@\n class PizzaHutGB(SitemapSpider, StructuredDataSpider):\n name = \"pizza_hut_gb\"\n item_attributes = {\"brand\": \"Pizza Hut\", \"brand_wikidata\": \"Q191615\"}\n+ PIZZA_HUT_DELIVERY = {\"brand\": \"Pizza Hut Delivery\", \"brand_wikidata\": \"Q107293079\"}\n sitemap_urls = [\"https://www.pizzahut.co.uk/sitemap.xml\"]\n sitemap_rules = [\n (r\"https:\\/\\/www\\.pizzahut\\.co\\.uk\\/huts\\/[-\\w]+\\/([-.\\w]+)\\/$\", \"parse_sd\")\n ]\n- wanted_types = [\"FastFoodRestaurant\"]\n \n- def inspect_item(self, item, response):\n+ def post_process_item(self, item, response, ld_data, **kwargs):\n item[\"street_address\"] = clean_address(item[\"street_address\"])\n \n if item[\"website\"].startswith(\"https://www.pizzahut.co.uk/huts/\"):\n- item[\"brand\"] = \"Pizza Hut Delivery\"\n- item[\"brand_wikidata\"] = \"Q107293079\"\n+ item.update(self.PIZZA_HUT_DELIVERY)\n+\n+ if not item[\"opening_hours\"]:\n+ return\n \n yield item\n", "issue": "Pizza Hut Spider returns some closed outlets\nIt looks like the GB Pizza Hut spider \"pizza_hut_gb\" is returning a number of outlets that have closed. These are evident when the website either redirects to https://www.pizzahut.co.uk/restaurants/find or https://www.pizzahut.co.uk/restaurants/error/filenotfound . It seems that Pizza Hut are leaving up the https://www.pizzahut.co.uk/huts/uk-2/... web page after the outlet has closed, presumably for SEO reasons. These pages still contain the old location and web address, which the spider then picks up.\r\n\r\nExamples include https://www.pizzahut.co.uk/huts/uk-2/437-ayr/ and https://www.pizzahut.co.uk/huts/uk-2/390-barrow/ .\r\n\r\nI think these closed outlets can probably be removed from the dataset returned by looking at the openingHours LD field on the /huts/uk-2/ pages. The closed outlets seem to always have \"openingHours\":[]. The open branches have some sensible content there.\n", "before_files": [{"content": "from scrapy.spiders import SitemapSpider\n\nfrom locations.spiders.vapestore_gb import clean_address\nfrom locations.structured_data_spider import StructuredDataSpider\n\n\nclass PizzaHutGB(SitemapSpider, StructuredDataSpider):\n name = \"pizza_hut_gb\"\n item_attributes = {\"brand\": \"Pizza Hut\", \"brand_wikidata\": \"Q191615\"}\n sitemap_urls = [\"https://www.pizzahut.co.uk/sitemap.xml\"]\n sitemap_rules = [\n (r\"https:\\/\\/www\\.pizzahut\\.co\\.uk\\/huts\\/[-\\w]+\\/([-.\\w]+)\\/$\", \"parse_sd\")\n ]\n wanted_types = [\"FastFoodRestaurant\"]\n\n def inspect_item(self, item, response):\n item[\"street_address\"] = clean_address(item[\"street_address\"])\n\n if item[\"website\"].startswith(\"https://www.pizzahut.co.uk/huts/\"):\n item[\"brand\"] = \"Pizza Hut Delivery\"\n item[\"brand_wikidata\"] = \"Q107293079\"\n\n yield item\n", "path": "locations/spiders/pizza_hut_gb.py"}], "after_files": [{"content": "from scrapy.spiders import SitemapSpider\n\nfrom locations.spiders.vapestore_gb import clean_address\nfrom locations.structured_data_spider import StructuredDataSpider\n\n\nclass PizzaHutGB(SitemapSpider, StructuredDataSpider):\n name = \"pizza_hut_gb\"\n item_attributes = {\"brand\": \"Pizza Hut\", \"brand_wikidata\": \"Q191615\"}\n PIZZA_HUT_DELIVERY = {\"brand\": \"Pizza Hut Delivery\", \"brand_wikidata\": \"Q107293079\"}\n sitemap_urls = [\"https://www.pizzahut.co.uk/sitemap.xml\"]\n sitemap_rules = [\n (r\"https:\\/\\/www\\.pizzahut\\.co\\.uk\\/huts\\/[-\\w]+\\/([-.\\w]+)\\/$\", \"parse_sd\")\n ]\n\n def post_process_item(self, item, response, ld_data, **kwargs):\n item[\"street_address\"] = clean_address(item[\"street_address\"])\n\n if item[\"website\"].startswith(\"https://www.pizzahut.co.uk/huts/\"):\n item.update(self.PIZZA_HUT_DELIVERY)\n\n if not item[\"opening_hours\"]:\n return\n\n yield item\n", "path": "locations/spiders/pizza_hut_gb.py"}]}
| 775 | 352 |
gh_patches_debug_11835
|
rasdani/github-patches
|
git_diff
|
PokemonGoF__PokemonGo-Bot-3511
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
IOError: [Errno 25] Inappropriate ioctl for device
Running in a docker container based on the DockerFile:
```
Traceback (most recent call last):
File "/usr/src/app/pokecli.py", line 530, in <module>
main()
File "/usr/src/app/pokecli.py", line 96, in main
bot.tick()
File "/usr/src/app/pokemongo_bot/__init__.py", line 472, in tick
if worker.work() == WorkerResult.RUNNING:
File "/usr/src/app/pokemongo_bot/cell_workers/catch_visible_pokemon.py", line 45, in work
'expiration_timestamp_ms': pokemon['expiration_timestamp_ms'],
File "/usr/src/app/pokemongo_bot/base_task.py", line 28, in emit_event
data=data
File "/usr/src/app/pokemongo_bot/event_manager.py", line 65, in emit
handler.handle_event(event, sender, level, formatted_msg, data)
File "/usr/src/app/pokemongo_bot/event_handlers/colored_logging_handler.py", line 130, in handle_event
terminal_width = self._terminal_width()
File "/usr/src/app/pokemongo_bot/event_handlers/colored_logging_handler.py", line 171, in _terminal_width
struct.pack(str('HHHH'), 0, 0, 0, 0)))
IOError: [Errno 25] Inappropriate ioctl for device
```
Git Commit: 9ae1b785f0836d8769d011626d2bc071ed338bff
This happens after encountering a pokemon.
Looks like it is now having a problem printing colors to the screen.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pokemongo_bot/event_handlers/colored_logging_handler.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 from __future__ import unicode_literals
3 import time
4 import sys
5 import struct
6
7 from pokemongo_bot.event_manager import EventHandler
8
9 class ColoredLoggingHandler(EventHandler):
10 EVENT_COLOR_MAP = {
11 'api_error': 'red',
12 'bot_exit': 'red',
13 'bot_start': 'green',
14 'config_error': 'red',
15 'egg_already_incubating': 'yellow',
16 'egg_hatched': 'green',
17 'future_pokemon_release': 'yellow',
18 'incubate': 'green',
19 'incubator_already_used': 'yellow',
20 'inventory_full': 'yellow',
21 'item_discard_fail': 'red',
22 'item_discarded': 'green',
23 'keep_best_release': 'green',
24 'level_up': 'green',
25 'level_up_reward': 'green',
26 'location_cache_error': 'yellow',
27 'location_cache_ignored': 'yellow',
28 'login_failed': 'red',
29 'login_successful': 'green',
30 'lucky_egg_error': 'red',
31 'move_to_map_pokemon_encounter': 'green',
32 'move_to_map_pokemon_fail': 'red',
33 'next_egg_incubates': 'yellow',
34 'next_sleep': 'green',
35 'no_pokeballs': 'red',
36 'pokemon_appeared': 'yellow',
37 'pokemon_capture_failed': 'red',
38 'pokemon_caught': 'blue',
39 'pokemon_evolved': 'green',
40 'pokemon_fled': 'red',
41 'pokemon_inventory_full': 'red',
42 'pokemon_nickname_invalid': 'red',
43 'pokemon_not_in_range': 'yellow',
44 'pokemon_release': 'green',
45 'pokemon_vanished': 'red',
46 'pokestop_empty': 'yellow',
47 'pokestop_searching_too_often': 'yellow',
48 'rename_pokemon': 'green',
49 'skip_evolve': 'yellow',
50 'softban': 'red',
51 'spun_pokestop': 'cyan',
52 'threw_berry_failed': 'red',
53 'unknown_spin_result': 'red',
54 'unset_pokemon_nickname': 'red',
55 'vip_pokemon': 'red',
56
57 # event names for 'white' still here to remember that these events are already determined its color.
58 'arrived_at_cluster': 'white',
59 'arrived_at_fort': 'white',
60 'bot_sleep': 'white',
61 'catchable_pokemon': 'white',
62 'found_cluster': 'white',
63 'incubate_try': 'white',
64 'load_cached_location': 'white',
65 'location_found': 'white',
66 'login_started': 'white',
67 'lured_pokemon_found': 'white',
68 'move_to_map_pokemon_move_towards': 'white',
69 'move_to_map_pokemon_teleport_back': 'white',
70 'move_to_map_pokemon_updated_map': 'white',
71 'moving_to_fort': 'white',
72 'moving_to_lured_fort': 'white',
73 'pokemon_catch_rate': 'white',
74 'pokemon_evolve_fail': 'white',
75 'pokestop_on_cooldown': 'white',
76 'pokestop_out_of_range': 'white',
77 'polyline_request': 'white',
78 'position_update': 'white',
79 'set_start_location': 'white',
80 'softban_fix': 'white',
81 'softban_fix_done': 'white',
82 'spun_fort': 'white',
83 'threw_berry': 'white',
84 'threw_pokeball': 'white',
85 'used_lucky_egg': 'white'
86 }
87 CONTINUOUS_EVENT_NAMES = [
88 'catchable_pokemon',
89 'moving_to_lured_fort',
90 'spun_fort'
91 ]
92 COLOR_CODE = {
93 'red': '91',
94 'green': '92',
95 'yellow': '93',
96 'blue': '94',
97 'cyan': '96'
98 }
99
100 def __init__(self):
101 self._last_event = None
102 try:
103 # this `try ... except` is for ImportError on Windows
104 import fcntl
105 import termios
106 self._ioctl = fcntl.ioctl
107 self._TIOCGWINSZ = termios.TIOCGWINSZ
108 except ImportError:
109 self._ioctl = None
110 self._TIOCGWINSZ = None
111
112 def handle_event(self, event, sender, level, formatted_msg, data):
113 # Prepare message string
114 message = None
115 if formatted_msg:
116 try:
117 message = formatted_msg.decode('utf-8')
118 except UnicodeEncodeError:
119 message = formatted_msg
120 else:
121 message = '{}'.format(str(data))
122
123 # Replace message if necessary
124 if event == 'catchable_pokemon':
125 message = 'Something rustles nearby!'
126
127 # Truncate previous line if same event continues
128 if event in ColoredLoggingHandler.CONTINUOUS_EVENT_NAMES and self._last_event == event:
129 # Filling with "' ' * terminal_width" in order to completely clear last line
130 terminal_width = self._terminal_width()
131 if terminal_width:
132 sys.stdout.write('\r{}\r'.format(' ' * terminal_width))
133 else:
134 sys.stdout.write('\r')
135 else:
136 sys.stdout.write("\n")
137
138 color_name = None
139 if event in ColoredLoggingHandler.EVENT_COLOR_MAP:
140 color_name = ColoredLoggingHandler.EVENT_COLOR_MAP[event]
141
142 # Change color if necessary
143 if event == 'egg_hatched' and data.get('pokemon', 'error') == 'error':
144 # `egg_hatched` event will be dispatched in both cases: hatched pokemon info is successfully taken or not.
145 # change color from 'green' to 'red' in case of error.
146 color_name = 'red'
147
148 if color_name in ColoredLoggingHandler.COLOR_CODE:
149 sys.stdout.write(
150 '[{time}] \033[{color}m{message}\033[0m'.format(
151 time=time.strftime("%H:%M:%S"),
152 color=ColoredLoggingHandler.COLOR_CODE[color_name],
153 message=message
154 )
155 )
156 else:
157 sys.stdout.write('[{time}] {message}'.format(
158 time=time.strftime("%H:%M:%S"),
159 message=message
160 ))
161
162 sys.stdout.flush()
163 self._last_event = event
164
165 def _terminal_width(self):
166 if self._ioctl is None or self._TIOCGWINSZ is None:
167 return None
168
169 h, w, hp, wp = struct.unpack(str('HHHH'),
170 self._ioctl(0, self._TIOCGWINSZ,
171 struct.pack(str('HHHH'), 0, 0, 0, 0)))
172 return w
173
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pokemongo_bot/event_handlers/colored_logging_handler.py b/pokemongo_bot/event_handlers/colored_logging_handler.py
--- a/pokemongo_bot/event_handlers/colored_logging_handler.py
+++ b/pokemongo_bot/event_handlers/colored_logging_handler.py
@@ -125,7 +125,7 @@
message = 'Something rustles nearby!'
# Truncate previous line if same event continues
- if event in ColoredLoggingHandler.CONTINUOUS_EVENT_NAMES and self._last_event == event:
+ if event in ColoredLoggingHandler.CONTINUOUS_EVENT_NAMES and self._last_event == event and sys.stdout.isatty():
# Filling with "' ' * terminal_width" in order to completely clear last line
terminal_width = self._terminal_width()
if terminal_width:
|
{"golden_diff": "diff --git a/pokemongo_bot/event_handlers/colored_logging_handler.py b/pokemongo_bot/event_handlers/colored_logging_handler.py\n--- a/pokemongo_bot/event_handlers/colored_logging_handler.py\n+++ b/pokemongo_bot/event_handlers/colored_logging_handler.py\n@@ -125,7 +125,7 @@\n message = 'Something rustles nearby!'\n \n # Truncate previous line if same event continues\n- if event in ColoredLoggingHandler.CONTINUOUS_EVENT_NAMES and self._last_event == event:\n+ if event in ColoredLoggingHandler.CONTINUOUS_EVENT_NAMES and self._last_event == event and sys.stdout.isatty():\n # Filling with \"' ' * terminal_width\" in order to completely clear last line\n terminal_width = self._terminal_width()\n if terminal_width:\n", "issue": "IOError: [Errno 25] Inappropriate ioctl for device\nRunning in a docker container based on the DockerFile:\n\n```\nTraceback (most recent call last):\n File \"/usr/src/app/pokecli.py\", line 530, in <module>\n main()\n File \"/usr/src/app/pokecli.py\", line 96, in main\n bot.tick()\n File \"/usr/src/app/pokemongo_bot/__init__.py\", line 472, in tick\n if worker.work() == WorkerResult.RUNNING:\n File \"/usr/src/app/pokemongo_bot/cell_workers/catch_visible_pokemon.py\", line 45, in work\n 'expiration_timestamp_ms': pokemon['expiration_timestamp_ms'],\n File \"/usr/src/app/pokemongo_bot/base_task.py\", line 28, in emit_event\n data=data\n File \"/usr/src/app/pokemongo_bot/event_manager.py\", line 65, in emit\n handler.handle_event(event, sender, level, formatted_msg, data)\n File \"/usr/src/app/pokemongo_bot/event_handlers/colored_logging_handler.py\", line 130, in handle_event\n terminal_width = self._terminal_width()\n File \"/usr/src/app/pokemongo_bot/event_handlers/colored_logging_handler.py\", line 171, in _terminal_width\n struct.pack(str('HHHH'), 0, 0, 0, 0)))\nIOError: [Errno 25] Inappropriate ioctl for device\n```\n\nGit Commit: 9ae1b785f0836d8769d011626d2bc071ed338bff\n\nThis happens after encountering a pokemon.\nLooks like it is now having a problem printing colors to the screen.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nimport time\nimport sys\nimport struct\n\nfrom pokemongo_bot.event_manager import EventHandler\n\nclass ColoredLoggingHandler(EventHandler):\n EVENT_COLOR_MAP = {\n 'api_error': 'red',\n 'bot_exit': 'red',\n 'bot_start': 'green',\n 'config_error': 'red',\n 'egg_already_incubating': 'yellow',\n 'egg_hatched': 'green',\n 'future_pokemon_release': 'yellow',\n 'incubate': 'green',\n 'incubator_already_used': 'yellow',\n 'inventory_full': 'yellow',\n 'item_discard_fail': 'red',\n 'item_discarded': 'green',\n 'keep_best_release': 'green',\n 'level_up': 'green',\n 'level_up_reward': 'green',\n 'location_cache_error': 'yellow',\n 'location_cache_ignored': 'yellow',\n 'login_failed': 'red',\n 'login_successful': 'green',\n 'lucky_egg_error': 'red',\n 'move_to_map_pokemon_encounter': 'green',\n 'move_to_map_pokemon_fail': 'red',\n 'next_egg_incubates': 'yellow',\n 'next_sleep': 'green',\n 'no_pokeballs': 'red',\n 'pokemon_appeared': 'yellow',\n 'pokemon_capture_failed': 'red',\n 'pokemon_caught': 'blue',\n 'pokemon_evolved': 'green',\n 'pokemon_fled': 'red',\n 'pokemon_inventory_full': 'red',\n 'pokemon_nickname_invalid': 'red',\n 'pokemon_not_in_range': 'yellow',\n 'pokemon_release': 'green',\n 'pokemon_vanished': 'red',\n 'pokestop_empty': 'yellow',\n 'pokestop_searching_too_often': 'yellow',\n 'rename_pokemon': 'green',\n 'skip_evolve': 'yellow',\n 'softban': 'red',\n 'spun_pokestop': 'cyan',\n 'threw_berry_failed': 'red',\n 'unknown_spin_result': 'red',\n 'unset_pokemon_nickname': 'red',\n 'vip_pokemon': 'red',\n\n # event names for 'white' still here to remember that these events are already determined its color.\n 'arrived_at_cluster': 'white',\n 'arrived_at_fort': 'white',\n 'bot_sleep': 'white',\n 'catchable_pokemon': 'white',\n 'found_cluster': 'white',\n 'incubate_try': 'white',\n 'load_cached_location': 'white',\n 'location_found': 'white',\n 'login_started': 'white',\n 'lured_pokemon_found': 'white',\n 'move_to_map_pokemon_move_towards': 'white',\n 'move_to_map_pokemon_teleport_back': 'white',\n 'move_to_map_pokemon_updated_map': 'white',\n 'moving_to_fort': 'white',\n 'moving_to_lured_fort': 'white',\n 'pokemon_catch_rate': 'white',\n 'pokemon_evolve_fail': 'white',\n 'pokestop_on_cooldown': 'white',\n 'pokestop_out_of_range': 'white',\n 'polyline_request': 'white',\n 'position_update': 'white',\n 'set_start_location': 'white',\n 'softban_fix': 'white',\n 'softban_fix_done': 'white',\n 'spun_fort': 'white',\n 'threw_berry': 'white',\n 'threw_pokeball': 'white',\n 'used_lucky_egg': 'white'\n }\n CONTINUOUS_EVENT_NAMES = [\n 'catchable_pokemon',\n 'moving_to_lured_fort',\n 'spun_fort'\n ]\n COLOR_CODE = {\n 'red': '91',\n 'green': '92',\n 'yellow': '93',\n 'blue': '94',\n 'cyan': '96'\n }\n\n def __init__(self):\n self._last_event = None\n try:\n # this `try ... except` is for ImportError on Windows\n import fcntl\n import termios\n self._ioctl = fcntl.ioctl\n self._TIOCGWINSZ = termios.TIOCGWINSZ\n except ImportError:\n self._ioctl = None\n self._TIOCGWINSZ = None\n\n def handle_event(self, event, sender, level, formatted_msg, data):\n # Prepare message string\n message = None\n if formatted_msg:\n try:\n message = formatted_msg.decode('utf-8')\n except UnicodeEncodeError:\n message = formatted_msg\n else:\n message = '{}'.format(str(data))\n\n # Replace message if necessary\n if event == 'catchable_pokemon':\n message = 'Something rustles nearby!'\n\n # Truncate previous line if same event continues\n if event in ColoredLoggingHandler.CONTINUOUS_EVENT_NAMES and self._last_event == event:\n # Filling with \"' ' * terminal_width\" in order to completely clear last line\n terminal_width = self._terminal_width()\n if terminal_width:\n sys.stdout.write('\\r{}\\r'.format(' ' * terminal_width))\n else:\n sys.stdout.write('\\r')\n else:\n sys.stdout.write(\"\\n\")\n\n color_name = None\n if event in ColoredLoggingHandler.EVENT_COLOR_MAP:\n color_name = ColoredLoggingHandler.EVENT_COLOR_MAP[event]\n\n # Change color if necessary\n if event == 'egg_hatched' and data.get('pokemon', 'error') == 'error':\n # `egg_hatched` event will be dispatched in both cases: hatched pokemon info is successfully taken or not.\n # change color from 'green' to 'red' in case of error.\n color_name = 'red'\n\n if color_name in ColoredLoggingHandler.COLOR_CODE:\n sys.stdout.write(\n '[{time}] \\033[{color}m{message}\\033[0m'.format(\n time=time.strftime(\"%H:%M:%S\"),\n color=ColoredLoggingHandler.COLOR_CODE[color_name],\n message=message\n )\n )\n else:\n sys.stdout.write('[{time}] {message}'.format(\n time=time.strftime(\"%H:%M:%S\"),\n message=message\n ))\n\n sys.stdout.flush()\n self._last_event = event\n\n def _terminal_width(self):\n if self._ioctl is None or self._TIOCGWINSZ is None:\n return None\n\n h, w, hp, wp = struct.unpack(str('HHHH'),\n self._ioctl(0, self._TIOCGWINSZ,\n struct.pack(str('HHHH'), 0, 0, 0, 0)))\n return w\n", "path": "pokemongo_bot/event_handlers/colored_logging_handler.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nimport time\nimport sys\nimport struct\n\nfrom pokemongo_bot.event_manager import EventHandler\n\nclass ColoredLoggingHandler(EventHandler):\n EVENT_COLOR_MAP = {\n 'api_error': 'red',\n 'bot_exit': 'red',\n 'bot_start': 'green',\n 'config_error': 'red',\n 'egg_already_incubating': 'yellow',\n 'egg_hatched': 'green',\n 'future_pokemon_release': 'yellow',\n 'incubate': 'green',\n 'incubator_already_used': 'yellow',\n 'inventory_full': 'yellow',\n 'item_discard_fail': 'red',\n 'item_discarded': 'green',\n 'keep_best_release': 'green',\n 'level_up': 'green',\n 'level_up_reward': 'green',\n 'location_cache_error': 'yellow',\n 'location_cache_ignored': 'yellow',\n 'login_failed': 'red',\n 'login_successful': 'green',\n 'lucky_egg_error': 'red',\n 'move_to_map_pokemon_encounter': 'green',\n 'move_to_map_pokemon_fail': 'red',\n 'next_egg_incubates': 'yellow',\n 'next_sleep': 'green',\n 'no_pokeballs': 'red',\n 'pokemon_appeared': 'yellow',\n 'pokemon_capture_failed': 'red',\n 'pokemon_caught': 'blue',\n 'pokemon_evolved': 'green',\n 'pokemon_fled': 'red',\n 'pokemon_inventory_full': 'red',\n 'pokemon_nickname_invalid': 'red',\n 'pokemon_not_in_range': 'yellow',\n 'pokemon_release': 'green',\n 'pokemon_vanished': 'red',\n 'pokestop_empty': 'yellow',\n 'pokestop_searching_too_often': 'yellow',\n 'rename_pokemon': 'green',\n 'skip_evolve': 'yellow',\n 'softban': 'red',\n 'spun_pokestop': 'cyan',\n 'threw_berry_failed': 'red',\n 'unknown_spin_result': 'red',\n 'unset_pokemon_nickname': 'red',\n 'vip_pokemon': 'red',\n\n # event names for 'white' still here to remember that these events are already determined its color.\n 'arrived_at_cluster': 'white',\n 'arrived_at_fort': 'white',\n 'bot_sleep': 'white',\n 'catchable_pokemon': 'white',\n 'found_cluster': 'white',\n 'incubate_try': 'white',\n 'load_cached_location': 'white',\n 'location_found': 'white',\n 'login_started': 'white',\n 'lured_pokemon_found': 'white',\n 'move_to_map_pokemon_move_towards': 'white',\n 'move_to_map_pokemon_teleport_back': 'white',\n 'move_to_map_pokemon_updated_map': 'white',\n 'moving_to_fort': 'white',\n 'moving_to_lured_fort': 'white',\n 'pokemon_catch_rate': 'white',\n 'pokemon_evolve_fail': 'white',\n 'pokestop_on_cooldown': 'white',\n 'pokestop_out_of_range': 'white',\n 'polyline_request': 'white',\n 'position_update': 'white',\n 'set_start_location': 'white',\n 'softban_fix': 'white',\n 'softban_fix_done': 'white',\n 'spun_fort': 'white',\n 'threw_berry': 'white',\n 'threw_pokeball': 'white',\n 'used_lucky_egg': 'white'\n }\n CONTINUOUS_EVENT_NAMES = [\n 'catchable_pokemon',\n 'moving_to_lured_fort',\n 'spun_fort'\n ]\n COLOR_CODE = {\n 'red': '91',\n 'green': '92',\n 'yellow': '93',\n 'blue': '94',\n 'cyan': '96'\n }\n\n def __init__(self):\n self._last_event = None\n try:\n # this `try ... except` is for ImportError on Windows\n import fcntl\n import termios\n self._ioctl = fcntl.ioctl\n self._TIOCGWINSZ = termios.TIOCGWINSZ\n except ImportError:\n self._ioctl = None\n self._TIOCGWINSZ = None\n\n def handle_event(self, event, sender, level, formatted_msg, data):\n # Prepare message string\n message = None\n if formatted_msg:\n try:\n message = formatted_msg.decode('utf-8')\n except UnicodeEncodeError:\n message = formatted_msg\n else:\n message = '{}'.format(str(data))\n\n # Replace message if necessary\n if event == 'catchable_pokemon':\n message = 'Something rustles nearby!'\n\n # Truncate previous line if same event continues\n if event in ColoredLoggingHandler.CONTINUOUS_EVENT_NAMES and self._last_event == event and sys.stdout.isatty():\n # Filling with \"' ' * terminal_width\" in order to completely clear last line\n terminal_width = self._terminal_width()\n if terminal_width:\n sys.stdout.write('\\r{}\\r'.format(' ' * terminal_width))\n else:\n sys.stdout.write('\\r')\n else:\n sys.stdout.write(\"\\n\")\n\n color_name = None\n if event in ColoredLoggingHandler.EVENT_COLOR_MAP:\n color_name = ColoredLoggingHandler.EVENT_COLOR_MAP[event]\n\n # Change color if necessary\n if event == 'egg_hatched' and data.get('pokemon', 'error') == 'error':\n # `egg_hatched` event will be dispatched in both cases: hatched pokemon info is successfully taken or not.\n # change color from 'green' to 'red' in case of error.\n color_name = 'red'\n\n if color_name in ColoredLoggingHandler.COLOR_CODE:\n sys.stdout.write(\n '[{time}] \\033[{color}m{message}\\033[0m'.format(\n time=time.strftime(\"%H:%M:%S\"),\n color=ColoredLoggingHandler.COLOR_CODE[color_name],\n message=message\n )\n )\n else:\n sys.stdout.write('[{time}] {message}'.format(\n time=time.strftime(\"%H:%M:%S\"),\n message=message\n ))\n\n sys.stdout.flush()\n self._last_event = event\n\n def _terminal_width(self):\n if self._ioctl is None or self._TIOCGWINSZ is None:\n return None\n\n h, w, hp, wp = struct.unpack(str('HHHH'),\n self._ioctl(0, self._TIOCGWINSZ,\n struct.pack(str('HHHH'), 0, 0, 0, 0)))\n return w\n", "path": "pokemongo_bot/event_handlers/colored_logging_handler.py"}]}
| 2,637 | 181 |
gh_patches_debug_15139
|
rasdani/github-patches
|
git_diff
|
acl-org__acl-anthology-1453
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Make volumes adhere to the flat directory structure
All our files are relatively flat, but we still have nested structures for volumes (in the build dir under `anthology/papers/`).
This looks like an oversight from a previous change and noone has complained yet.
Make volumes adhere to the flat directory structure
All our files are relatively flat, but we still have nested structures for volumes (in the build dir under `anthology/papers/`).
This looks like an oversight from a previous change and noone has complained yet.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bin/create_hugo_pages.py`
Content:
```
1 #!/usr/bin/env python3
2 # -*- coding: utf-8 -*-
3 #
4 # Copyright 2019 Marcel Bollmann <[email protected]>
5 #
6 # Licensed under the Apache License, Version 2.0 (the "License");
7 # you may not use this file except in compliance with the License.
8 # You may obtain a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS,
14 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 # See the License for the specific language governing permissions and
16 # limitations under the License.
17
18 """Usage: create_hugo_pages.py [--dir=DIR] [-c] [--debug]
19
20 Creates page stubs for the full anthology based on the YAML data files.
21
22 This script can only be run after create_hugo_yaml.py!
23
24 Options:
25 --dir=DIR Hugo project directory. [default: {scriptdir}/../build/]
26 --debug Output debug-level log messages.
27 -c, --clean Delete existing files in target directory before generation.
28 -h, --help Display this helpful text.
29 """
30
31 from docopt import docopt
32 from glob import glob
33 from slugify import slugify
34 from tqdm import tqdm
35 import logging as log
36 import os
37 import shutil
38 import yaml
39
40 try:
41 from yaml import CLoader as Loader
42 except ImportError:
43 log.info("Can't load yaml C bindings, reverting to slow pure Python version")
44 from yaml import Loader
45
46 from anthology.utils import SeverityTracker
47
48
49 def check_directory(cdir, clean=False):
50 if not os.path.isdir(cdir) and not os.path.exists(cdir):
51 os.mkdir(cdir)
52 return True
53 entries = os.listdir(cdir)
54 if "_index.md" in entries:
55 entries.remove("_index.md")
56 if entries and not clean:
57 log.critical("Directory already exists and has content files: {}".format(cdir))
58 log.info(
59 "Call this script with the -c/--clean flag to automatically DELETE existing files"
60 )
61 return False
62 for entry in entries:
63 entry = "{}/{}".format(cdir, entry)
64 if os.path.isdir(entry):
65 shutil.rmtree(entry)
66 else:
67 os.remove(entry)
68 return True
69
70
71 def create_papers(srcdir, clean=False):
72 """Creates page stubs for all papers in the Anthology."""
73 log.info("Creating stubs for papers...")
74 if not check_directory("{}/content/papers".format(srcdir), clean=clean):
75 return
76
77 # Go through all paper volumes
78 for yamlfile in tqdm(glob("{}/data/papers/*.yaml".format(srcdir))):
79 log.debug("Processing {}".format(yamlfile))
80 with open(yamlfile, "r") as f:
81 data = yaml.load(f, Loader=Loader)
82 # Create a paper stub for each entry in the volume
83 for anthology_id, entry in data.items():
84 paper_dir = "{}/content/papers/{}".format(srcdir, anthology_id.split("-")[0])
85 if not os.path.exists(paper_dir):
86 os.makedirs(paper_dir)
87 with open("{}/{}.md".format(paper_dir, anthology_id), "w") as f:
88 print("---", file=f)
89 yaml.dump(
90 {"anthology_id": anthology_id, "title": entry["title"]},
91 default_flow_style=False,
92 stream=f,
93 )
94 print("---", file=f)
95
96
97 def create_volumes(srcdir, clean=False):
98 """Creates page stubs for all proceedings volumes in the Anthology."""
99 log.info("Creating stubs for volumes...")
100 if not check_directory("{}/content/volumes".format(srcdir), clean=clean):
101 return
102
103 yamlfile = "{}/data/volumes.yaml".format(srcdir)
104 log.debug("Processing {}".format(yamlfile))
105 with open(yamlfile, "r") as f:
106 data = yaml.load(f, Loader=Loader)
107 # Create a paper stub for each proceedings volume
108 for anthology_id, entry in data.items():
109 with open("{}/content/volumes/{}.md".format(srcdir, anthology_id), "w") as f:
110 print("---", file=f)
111 paper_dir = "/papers/{}/{}/".format(anthology_id.split("-")[0], anthology_id)
112 yaml.dump(
113 {
114 "anthology_id": anthology_id,
115 "title": entry["title"],
116 "aliases": [
117 paper_dir,
118 ],
119 },
120 default_flow_style=False,
121 stream=f,
122 )
123 print("---", file=f)
124
125 return data
126
127
128 def create_people(srcdir, clean=False):
129 """Creates page stubs for all authors/editors in the Anthology."""
130 log.info("Creating stubs for people...")
131 if not check_directory("{}/content/people".format(srcdir), clean=clean):
132 return
133
134 for yamlfile in tqdm(glob("{}/data/people/*.yaml".format(srcdir))):
135 log.debug("Processing {}".format(yamlfile))
136 with open(yamlfile, "r") as f:
137 data = yaml.load(f, Loader=Loader)
138 # Create a page stub for each person
139 for name, entry in data.items():
140 person_dir = "{}/content/people/{}".format(srcdir, name[0])
141 if not os.path.exists(person_dir):
142 os.makedirs(person_dir)
143 yaml_data = {"name": name, "title": entry["full"], "lastname": entry["last"]}
144 with open("{}/{}.md".format(person_dir, name), "w") as f:
145 print("---", file=f)
146 # "lastname" is dumped to allow sorting by it in Hugo
147 yaml.dump(yaml_data, default_flow_style=False, stream=f)
148 print("---", file=f)
149
150 return data
151
152
153 def create_venues_and_events(srcdir, clean=False):
154 """Creates page stubs for all venues and events in the Anthology."""
155 yamlfile = "{}/data/venues.yaml".format(srcdir)
156 log.debug("Processing {}".format(yamlfile))
157 with open(yamlfile, "r") as f:
158 data = yaml.load(f, Loader=Loader)
159
160 log.info("Creating stubs for venues...")
161 if not check_directory("{}/content/venues".format(srcdir), clean=clean):
162 return
163 # Create a paper stub for each venue (e.g. ACL)
164 for venue, venue_data in data.items():
165 venue_str = venue_data["slug"]
166 with open("{}/content/venues/{}.md".format(srcdir, venue_str), "w") as f:
167 print("---", file=f)
168 yaml_data = {"venue": venue, "title": venue_data["name"]}
169 yaml.dump(yaml_data, default_flow_style=False, stream=f)
170 print("---", file=f)
171
172 log.info("Creating stubs for events...")
173 if not check_directory("{}/content/events".format(srcdir), clean=clean):
174 return
175 # Create a paper stub for each event (= venue + year, e.g. ACL 2018)
176 for venue, venue_data in data.items():
177 venue_str = venue_data["slug"]
178 for year in venue_data["volumes_by_year"]:
179 with open(
180 "{}/content/events/{}-{}.md".format(srcdir, venue_str, year), "w"
181 ) as f:
182 print("---", file=f)
183 yaml_data = {
184 "venue": venue,
185 "year": year,
186 "title": "{} ({})".format(venue_data["name"], year),
187 }
188 yaml.dump(yaml_data, default_flow_style=False, stream=f)
189 print("---", file=f)
190
191
192 def create_sigs(srcdir, clean=False):
193 """Creates page stubs for all SIGs in the Anthology."""
194 yamlfile = "{}/data/sigs.yaml".format(srcdir)
195 log.debug("Processing {}".format(yamlfile))
196 with open(yamlfile, "r") as f:
197 data = yaml.load(f, Loader=Loader)
198
199 log.info("Creating stubs for SIGs...")
200 if not check_directory("{}/content/sigs".format(srcdir), clean=clean):
201 return
202 # Create a paper stub for each SIGS (e.g. SIGMORPHON)
203 for sig, sig_data in data.items():
204 sig_str = sig_data["slug"]
205 with open("{}/content/sigs/{}.md".format(srcdir, sig_str), "w") as f:
206 print("---", file=f)
207 yaml.dump(
208 {
209 "acronym": sig,
210 "short_acronym": sig[3:] if sig.startswith("SIG") else sig,
211 "title": sig_data["name"],
212 },
213 default_flow_style=False,
214 stream=f,
215 )
216 print("---", file=f)
217
218
219 if __name__ == "__main__":
220 args = docopt(__doc__)
221 scriptdir = os.path.dirname(os.path.abspath(__file__))
222 if "{scriptdir}" in args["--dir"]:
223 args["--dir"] = args["--dir"].format(scriptdir=scriptdir)
224 dir_ = os.path.abspath(args["--dir"])
225
226 log_level = log.DEBUG if args["--debug"] else log.INFO
227 log.basicConfig(format="%(levelname)-8s %(message)s", level=log_level)
228 tracker = SeverityTracker()
229 log.getLogger().addHandler(tracker)
230
231 create_papers(dir_, clean=args["--clean"])
232 create_volumes(dir_, clean=args["--clean"])
233 create_people(dir_, clean=args["--clean"])
234 create_venues_and_events(dir_, clean=args["--clean"])
235 create_sigs(dir_, clean=args["--clean"])
236
237 if tracker.highest >= log.ERROR:
238 exit(1)
239
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/bin/create_hugo_pages.py b/bin/create_hugo_pages.py
--- a/bin/create_hugo_pages.py
+++ b/bin/create_hugo_pages.py
@@ -108,14 +108,10 @@
for anthology_id, entry in data.items():
with open("{}/content/volumes/{}.md".format(srcdir, anthology_id), "w") as f:
print("---", file=f)
- paper_dir = "/papers/{}/{}/".format(anthology_id.split("-")[0], anthology_id)
yaml.dump(
{
"anthology_id": anthology_id,
"title": entry["title"],
- "aliases": [
- paper_dir,
- ],
},
default_flow_style=False,
stream=f,
|
{"golden_diff": "diff --git a/bin/create_hugo_pages.py b/bin/create_hugo_pages.py\n--- a/bin/create_hugo_pages.py\n+++ b/bin/create_hugo_pages.py\n@@ -108,14 +108,10 @@\n for anthology_id, entry in data.items():\n with open(\"{}/content/volumes/{}.md\".format(srcdir, anthology_id), \"w\") as f:\n print(\"---\", file=f)\n- paper_dir = \"/papers/{}/{}/\".format(anthology_id.split(\"-\")[0], anthology_id)\n yaml.dump(\n {\n \"anthology_id\": anthology_id,\n \"title\": entry[\"title\"],\n- \"aliases\": [\n- paper_dir,\n- ],\n },\n default_flow_style=False,\n stream=f,\n", "issue": "Make volumes adhere to the flat directory structure\nAll our files are relatively flat, but we still have nested structures for volumes (in the build dir under `anthology/papers/`).\r\n\r\nThis looks like an oversight from a previous change and noone has complained yet.\nMake volumes adhere to the flat directory structure\nAll our files are relatively flat, but we still have nested structures for volumes (in the build dir under `anthology/papers/`).\r\n\r\nThis looks like an oversight from a previous change and noone has complained yet.\n", "before_files": [{"content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Copyright 2019 Marcel Bollmann <[email protected]>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Usage: create_hugo_pages.py [--dir=DIR] [-c] [--debug]\n\nCreates page stubs for the full anthology based on the YAML data files.\n\nThis script can only be run after create_hugo_yaml.py!\n\nOptions:\n --dir=DIR Hugo project directory. [default: {scriptdir}/../build/]\n --debug Output debug-level log messages.\n -c, --clean Delete existing files in target directory before generation.\n -h, --help Display this helpful text.\n\"\"\"\n\nfrom docopt import docopt\nfrom glob import glob\nfrom slugify import slugify\nfrom tqdm import tqdm\nimport logging as log\nimport os\nimport shutil\nimport yaml\n\ntry:\n from yaml import CLoader as Loader\nexcept ImportError:\n log.info(\"Can't load yaml C bindings, reverting to slow pure Python version\")\n from yaml import Loader\n\nfrom anthology.utils import SeverityTracker\n\n\ndef check_directory(cdir, clean=False):\n if not os.path.isdir(cdir) and not os.path.exists(cdir):\n os.mkdir(cdir)\n return True\n entries = os.listdir(cdir)\n if \"_index.md\" in entries:\n entries.remove(\"_index.md\")\n if entries and not clean:\n log.critical(\"Directory already exists and has content files: {}\".format(cdir))\n log.info(\n \"Call this script with the -c/--clean flag to automatically DELETE existing files\"\n )\n return False\n for entry in entries:\n entry = \"{}/{}\".format(cdir, entry)\n if os.path.isdir(entry):\n shutil.rmtree(entry)\n else:\n os.remove(entry)\n return True\n\n\ndef create_papers(srcdir, clean=False):\n \"\"\"Creates page stubs for all papers in the Anthology.\"\"\"\n log.info(\"Creating stubs for papers...\")\n if not check_directory(\"{}/content/papers\".format(srcdir), clean=clean):\n return\n\n # Go through all paper volumes\n for yamlfile in tqdm(glob(\"{}/data/papers/*.yaml\".format(srcdir))):\n log.debug(\"Processing {}\".format(yamlfile))\n with open(yamlfile, \"r\") as f:\n data = yaml.load(f, Loader=Loader)\n # Create a paper stub for each entry in the volume\n for anthology_id, entry in data.items():\n paper_dir = \"{}/content/papers/{}\".format(srcdir, anthology_id.split(\"-\")[0])\n if not os.path.exists(paper_dir):\n os.makedirs(paper_dir)\n with open(\"{}/{}.md\".format(paper_dir, anthology_id), \"w\") as f:\n print(\"---\", file=f)\n yaml.dump(\n {\"anthology_id\": anthology_id, \"title\": entry[\"title\"]},\n default_flow_style=False,\n stream=f,\n )\n print(\"---\", file=f)\n\n\ndef create_volumes(srcdir, clean=False):\n \"\"\"Creates page stubs for all proceedings volumes in the Anthology.\"\"\"\n log.info(\"Creating stubs for volumes...\")\n if not check_directory(\"{}/content/volumes\".format(srcdir), clean=clean):\n return\n\n yamlfile = \"{}/data/volumes.yaml\".format(srcdir)\n log.debug(\"Processing {}\".format(yamlfile))\n with open(yamlfile, \"r\") as f:\n data = yaml.load(f, Loader=Loader)\n # Create a paper stub for each proceedings volume\n for anthology_id, entry in data.items():\n with open(\"{}/content/volumes/{}.md\".format(srcdir, anthology_id), \"w\") as f:\n print(\"---\", file=f)\n paper_dir = \"/papers/{}/{}/\".format(anthology_id.split(\"-\")[0], anthology_id)\n yaml.dump(\n {\n \"anthology_id\": anthology_id,\n \"title\": entry[\"title\"],\n \"aliases\": [\n paper_dir,\n ],\n },\n default_flow_style=False,\n stream=f,\n )\n print(\"---\", file=f)\n\n return data\n\n\ndef create_people(srcdir, clean=False):\n \"\"\"Creates page stubs for all authors/editors in the Anthology.\"\"\"\n log.info(\"Creating stubs for people...\")\n if not check_directory(\"{}/content/people\".format(srcdir), clean=clean):\n return\n\n for yamlfile in tqdm(glob(\"{}/data/people/*.yaml\".format(srcdir))):\n log.debug(\"Processing {}\".format(yamlfile))\n with open(yamlfile, \"r\") as f:\n data = yaml.load(f, Loader=Loader)\n # Create a page stub for each person\n for name, entry in data.items():\n person_dir = \"{}/content/people/{}\".format(srcdir, name[0])\n if not os.path.exists(person_dir):\n os.makedirs(person_dir)\n yaml_data = {\"name\": name, \"title\": entry[\"full\"], \"lastname\": entry[\"last\"]}\n with open(\"{}/{}.md\".format(person_dir, name), \"w\") as f:\n print(\"---\", file=f)\n # \"lastname\" is dumped to allow sorting by it in Hugo\n yaml.dump(yaml_data, default_flow_style=False, stream=f)\n print(\"---\", file=f)\n\n return data\n\n\ndef create_venues_and_events(srcdir, clean=False):\n \"\"\"Creates page stubs for all venues and events in the Anthology.\"\"\"\n yamlfile = \"{}/data/venues.yaml\".format(srcdir)\n log.debug(\"Processing {}\".format(yamlfile))\n with open(yamlfile, \"r\") as f:\n data = yaml.load(f, Loader=Loader)\n\n log.info(\"Creating stubs for venues...\")\n if not check_directory(\"{}/content/venues\".format(srcdir), clean=clean):\n return\n # Create a paper stub for each venue (e.g. ACL)\n for venue, venue_data in data.items():\n venue_str = venue_data[\"slug\"]\n with open(\"{}/content/venues/{}.md\".format(srcdir, venue_str), \"w\") as f:\n print(\"---\", file=f)\n yaml_data = {\"venue\": venue, \"title\": venue_data[\"name\"]}\n yaml.dump(yaml_data, default_flow_style=False, stream=f)\n print(\"---\", file=f)\n\n log.info(\"Creating stubs for events...\")\n if not check_directory(\"{}/content/events\".format(srcdir), clean=clean):\n return\n # Create a paper stub for each event (= venue + year, e.g. ACL 2018)\n for venue, venue_data in data.items():\n venue_str = venue_data[\"slug\"]\n for year in venue_data[\"volumes_by_year\"]:\n with open(\n \"{}/content/events/{}-{}.md\".format(srcdir, venue_str, year), \"w\"\n ) as f:\n print(\"---\", file=f)\n yaml_data = {\n \"venue\": venue,\n \"year\": year,\n \"title\": \"{} ({})\".format(venue_data[\"name\"], year),\n }\n yaml.dump(yaml_data, default_flow_style=False, stream=f)\n print(\"---\", file=f)\n\n\ndef create_sigs(srcdir, clean=False):\n \"\"\"Creates page stubs for all SIGs in the Anthology.\"\"\"\n yamlfile = \"{}/data/sigs.yaml\".format(srcdir)\n log.debug(\"Processing {}\".format(yamlfile))\n with open(yamlfile, \"r\") as f:\n data = yaml.load(f, Loader=Loader)\n\n log.info(\"Creating stubs for SIGs...\")\n if not check_directory(\"{}/content/sigs\".format(srcdir), clean=clean):\n return\n # Create a paper stub for each SIGS (e.g. SIGMORPHON)\n for sig, sig_data in data.items():\n sig_str = sig_data[\"slug\"]\n with open(\"{}/content/sigs/{}.md\".format(srcdir, sig_str), \"w\") as f:\n print(\"---\", file=f)\n yaml.dump(\n {\n \"acronym\": sig,\n \"short_acronym\": sig[3:] if sig.startswith(\"SIG\") else sig,\n \"title\": sig_data[\"name\"],\n },\n default_flow_style=False,\n stream=f,\n )\n print(\"---\", file=f)\n\n\nif __name__ == \"__main__\":\n args = docopt(__doc__)\n scriptdir = os.path.dirname(os.path.abspath(__file__))\n if \"{scriptdir}\" in args[\"--dir\"]:\n args[\"--dir\"] = args[\"--dir\"].format(scriptdir=scriptdir)\n dir_ = os.path.abspath(args[\"--dir\"])\n\n log_level = log.DEBUG if args[\"--debug\"] else log.INFO\n log.basicConfig(format=\"%(levelname)-8s %(message)s\", level=log_level)\n tracker = SeverityTracker()\n log.getLogger().addHandler(tracker)\n\n create_papers(dir_, clean=args[\"--clean\"])\n create_volumes(dir_, clean=args[\"--clean\"])\n create_people(dir_, clean=args[\"--clean\"])\n create_venues_and_events(dir_, clean=args[\"--clean\"])\n create_sigs(dir_, clean=args[\"--clean\"])\n\n if tracker.highest >= log.ERROR:\n exit(1)\n", "path": "bin/create_hugo_pages.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Copyright 2019 Marcel Bollmann <[email protected]>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Usage: create_hugo_pages.py [--dir=DIR] [-c] [--debug]\n\nCreates page stubs for the full anthology based on the YAML data files.\n\nThis script can only be run after create_hugo_yaml.py!\n\nOptions:\n --dir=DIR Hugo project directory. [default: {scriptdir}/../build/]\n --debug Output debug-level log messages.\n -c, --clean Delete existing files in target directory before generation.\n -h, --help Display this helpful text.\n\"\"\"\n\nfrom docopt import docopt\nfrom glob import glob\nfrom slugify import slugify\nfrom tqdm import tqdm\nimport logging as log\nimport os\nimport shutil\nimport yaml\n\ntry:\n from yaml import CLoader as Loader\nexcept ImportError:\n log.info(\"Can't load yaml C bindings, reverting to slow pure Python version\")\n from yaml import Loader\n\nfrom anthology.utils import SeverityTracker\n\n\ndef check_directory(cdir, clean=False):\n if not os.path.isdir(cdir) and not os.path.exists(cdir):\n os.mkdir(cdir)\n return True\n entries = os.listdir(cdir)\n if \"_index.md\" in entries:\n entries.remove(\"_index.md\")\n if entries and not clean:\n log.critical(\"Directory already exists and has content files: {}\".format(cdir))\n log.info(\n \"Call this script with the -c/--clean flag to automatically DELETE existing files\"\n )\n return False\n for entry in entries:\n entry = \"{}/{}\".format(cdir, entry)\n if os.path.isdir(entry):\n shutil.rmtree(entry)\n else:\n os.remove(entry)\n return True\n\n\ndef create_papers(srcdir, clean=False):\n \"\"\"Creates page stubs for all papers in the Anthology.\"\"\"\n log.info(\"Creating stubs for papers...\")\n if not check_directory(\"{}/content/papers\".format(srcdir), clean=clean):\n return\n\n # Go through all paper volumes\n for yamlfile in tqdm(glob(\"{}/data/papers/*.yaml\".format(srcdir))):\n log.debug(\"Processing {}\".format(yamlfile))\n with open(yamlfile, \"r\") as f:\n data = yaml.load(f, Loader=Loader)\n # Create a paper stub for each entry in the volume\n for anthology_id, entry in data.items():\n paper_dir = \"{}/content/papers/{}\".format(srcdir, anthology_id.split(\"-\")[0])\n if not os.path.exists(paper_dir):\n os.makedirs(paper_dir)\n with open(\"{}/{}.md\".format(paper_dir, anthology_id), \"w\") as f:\n print(\"---\", file=f)\n yaml.dump(\n {\"anthology_id\": anthology_id, \"title\": entry[\"title\"]},\n default_flow_style=False,\n stream=f,\n )\n print(\"---\", file=f)\n\n\ndef create_volumes(srcdir, clean=False):\n \"\"\"Creates page stubs for all proceedings volumes in the Anthology.\"\"\"\n log.info(\"Creating stubs for volumes...\")\n if not check_directory(\"{}/content/volumes\".format(srcdir), clean=clean):\n return\n\n yamlfile = \"{}/data/volumes.yaml\".format(srcdir)\n log.debug(\"Processing {}\".format(yamlfile))\n with open(yamlfile, \"r\") as f:\n data = yaml.load(f, Loader=Loader)\n # Create a paper stub for each proceedings volume\n for anthology_id, entry in data.items():\n with open(\"{}/content/volumes/{}.md\".format(srcdir, anthology_id), \"w\") as f:\n print(\"---\", file=f)\n yaml.dump(\n {\n \"anthology_id\": anthology_id,\n \"title\": entry[\"title\"],\n },\n default_flow_style=False,\n stream=f,\n )\n print(\"---\", file=f)\n\n return data\n\n\ndef create_people(srcdir, clean=False):\n \"\"\"Creates page stubs for all authors/editors in the Anthology.\"\"\"\n log.info(\"Creating stubs for people...\")\n if not check_directory(\"{}/content/people\".format(srcdir), clean=clean):\n return\n\n for yamlfile in tqdm(glob(\"{}/data/people/*.yaml\".format(srcdir))):\n log.debug(\"Processing {}\".format(yamlfile))\n with open(yamlfile, \"r\") as f:\n data = yaml.load(f, Loader=Loader)\n # Create a page stub for each person\n for name, entry in data.items():\n person_dir = \"{}/content/people/{}\".format(srcdir, name[0])\n if not os.path.exists(person_dir):\n os.makedirs(person_dir)\n yaml_data = {\"name\": name, \"title\": entry[\"full\"], \"lastname\": entry[\"last\"]}\n with open(\"{}/{}.md\".format(person_dir, name), \"w\") as f:\n print(\"---\", file=f)\n # \"lastname\" is dumped to allow sorting by it in Hugo\n yaml.dump(yaml_data, default_flow_style=False, stream=f)\n print(\"---\", file=f)\n\n return data\n\n\ndef create_venues_and_events(srcdir, clean=False):\n \"\"\"Creates page stubs for all venues and events in the Anthology.\"\"\"\n yamlfile = \"{}/data/venues.yaml\".format(srcdir)\n log.debug(\"Processing {}\".format(yamlfile))\n with open(yamlfile, \"r\") as f:\n data = yaml.load(f, Loader=Loader)\n\n log.info(\"Creating stubs for venues...\")\n if not check_directory(\"{}/content/venues\".format(srcdir), clean=clean):\n return\n # Create a paper stub for each venue (e.g. ACL)\n for venue, venue_data in data.items():\n venue_str = venue_data[\"slug\"]\n with open(\"{}/content/venues/{}.md\".format(srcdir, venue_str), \"w\") as f:\n print(\"---\", file=f)\n yaml_data = {\"venue\": venue, \"title\": venue_data[\"name\"]}\n yaml.dump(yaml_data, default_flow_style=False, stream=f)\n print(\"---\", file=f)\n\n log.info(\"Creating stubs for events...\")\n if not check_directory(\"{}/content/events\".format(srcdir), clean=clean):\n return\n # Create a paper stub for each event (= venue + year, e.g. ACL 2018)\n for venue, venue_data in data.items():\n venue_str = venue_data[\"slug\"]\n for year in venue_data[\"volumes_by_year\"]:\n with open(\n \"{}/content/events/{}-{}.md\".format(srcdir, venue_str, year), \"w\"\n ) as f:\n print(\"---\", file=f)\n yaml_data = {\n \"venue\": venue,\n \"year\": year,\n \"title\": \"{} ({})\".format(venue_data[\"name\"], year),\n }\n yaml.dump(yaml_data, default_flow_style=False, stream=f)\n print(\"---\", file=f)\n\n\ndef create_sigs(srcdir, clean=False):\n \"\"\"Creates page stubs for all SIGs in the Anthology.\"\"\"\n yamlfile = \"{}/data/sigs.yaml\".format(srcdir)\n log.debug(\"Processing {}\".format(yamlfile))\n with open(yamlfile, \"r\") as f:\n data = yaml.load(f, Loader=Loader)\n\n log.info(\"Creating stubs for SIGs...\")\n if not check_directory(\"{}/content/sigs\".format(srcdir), clean=clean):\n return\n # Create a paper stub for each SIGS (e.g. SIGMORPHON)\n for sig, sig_data in data.items():\n sig_str = sig_data[\"slug\"]\n with open(\"{}/content/sigs/{}.md\".format(srcdir, sig_str), \"w\") as f:\n print(\"---\", file=f)\n yaml.dump(\n {\n \"acronym\": sig,\n \"short_acronym\": sig[3:] if sig.startswith(\"SIG\") else sig,\n \"title\": sig_data[\"name\"],\n },\n default_flow_style=False,\n stream=f,\n )\n print(\"---\", file=f)\n\n\nif __name__ == \"__main__\":\n args = docopt(__doc__)\n scriptdir = os.path.dirname(os.path.abspath(__file__))\n if \"{scriptdir}\" in args[\"--dir\"]:\n args[\"--dir\"] = args[\"--dir\"].format(scriptdir=scriptdir)\n dir_ = os.path.abspath(args[\"--dir\"])\n\n log_level = log.DEBUG if args[\"--debug\"] else log.INFO\n log.basicConfig(format=\"%(levelname)-8s %(message)s\", level=log_level)\n tracker = SeverityTracker()\n log.getLogger().addHandler(tracker)\n\n create_papers(dir_, clean=args[\"--clean\"])\n create_volumes(dir_, clean=args[\"--clean\"])\n create_people(dir_, clean=args[\"--clean\"])\n create_venues_and_events(dir_, clean=args[\"--clean\"])\n create_sigs(dir_, clean=args[\"--clean\"])\n\n if tracker.highest >= log.ERROR:\n exit(1)\n", "path": "bin/create_hugo_pages.py"}]}
| 3,094 | 167 |
gh_patches_debug_3009
|
rasdani/github-patches
|
git_diff
|
ethereum__web3.py-475
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
web3.auto raises unclear exception if no client is live
* Version: 4.0.0-beta.1
* OS: linux
### What was wrong?
If no client is live, I expect w3 to return as `None` in this case, but instead I get an exception.
```
from web3.auto import w3
```
cc @Sebohe
> ~/code/ethtoken/ethtoken/main.py in eip20_token(address, w3, **kwargs)
> 23 '''
> 24 if w3 is None:
> ---> 25 from web3.auto import w3
> 26 if w3 is None:
> 27 raise RuntimeError("Could not auto-detect web3 connection, please supply it as arg w3")
>
> ~/code/ethtoken/venv/lib/python3.5/site-packages/web3/auto/__init__.py in <module>()
> 2
> 3 for connector in ('ipc', 'http'):
> ----> 4 connection = importlib.import_module('web3.auto.' + connector)
> 5 if connection.w3:
> 6 w3 = connection.w3
>
> /usr/lib/python3.5/importlib/__init__.py in import_module(name, package)
> 124 break
> 125 level += 1
> --> 126 return _bootstrap._gcd_import(name[level:], package, level)
> 127
> 128
>
> ~/code/ethtoken/venv/lib/python3.5/site-packages/web3/auto/ipc.py in <module>()
> 14
> 15
> ---> 16 w3 = connect()
>
> ~/code/ethtoken/venv/lib/python3.5/site-packages/web3/auto/ipc.py in connect()
> 8 def connect():
> 9 w3 = Web3(IPCProvider(get_default_ipc_path()))
> ---> 10 if w3.isConnected():
> 11 return w3
> 12
>
> ~/code/ethtoken/venv/lib/python3.5/site-packages/web3/main.py in isConnected(self)
> 155 def isConnected(self):
> 156 for provider in self.providers:
> --> 157 if provider.isConnected():
> 158 return True
> 159 else:
>
> ~/code/ethtoken/venv/lib/python3.5/site-packages/web3/providers/base.py in isConnected(self)
> 73 def isConnected(self):
> 74 try:
> ---> 75 response = self.make_request('web3_clientVersion', [])
> 76 except IOError:
> 77 return False
>
> ~/code/ethtoken/venv/lib/python3.5/site-packages/web3/providers/ipc.py in make_request(self, method, params)
> 139 request = self.encode_rpc_request(method, params)
> 140
> --> 141 with self._lock, self._socket as sock:
> 142 sock.sendall(request)
> 143 raw_response = b""
>
> ~/code/ethtoken/venv/lib/python3.5/site-packages/web3/providers/ipc.py in __enter__(self)
> 37 def __enter__(self):
> 38 if not self.sock:
> ---> 39 self.sock = get_ipc_socket(self.ipc_path)
> 40 return self.sock
> 41
>
> ~/code/ethtoken/venv/lib/python3.5/site-packages/web3/providers/ipc.py in get_ipc_socket(ipc_path, timeout)
> 24 else:
> 25 sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
> ---> 26 sock.connect(ipc_path)
> 27 sock.settimeout(timeout)
> 28 return sock
>
> TypeError: a bytes-like object is required, not 'NoneType'
### How can it be fixed?
* Add a new test to verify the situation, and prevent regressions
* `isConnected` should short-circuit with something like: `if self.ipc_path is None: return False`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `web3/providers/ipc.py`
Content:
```
1 import os
2 import socket
3 import sys
4 import threading
5
6 try:
7 from json import JSONDecodeError
8 except ImportError:
9 JSONDecodeError = ValueError
10
11 from web3.utils.threads import (
12 Timeout,
13 )
14
15 from .base import JSONBaseProvider
16
17
18 def get_ipc_socket(ipc_path, timeout=0.1):
19 if sys.platform == 'win32':
20 # On Windows named pipe is used. Simulate socket with it.
21 from web3.utils.windows import NamedPipe
22
23 return NamedPipe(ipc_path)
24 else:
25 sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
26 sock.connect(ipc_path)
27 sock.settimeout(timeout)
28 return sock
29
30
31 class PersistantSocket(object):
32 sock = None
33
34 def __init__(self, ipc_path):
35 self.ipc_path = ipc_path
36
37 def __enter__(self):
38 if not self.sock:
39 self.sock = get_ipc_socket(self.ipc_path)
40 return self.sock
41
42 def __exit__(self, exc_type, exc_value, traceback):
43 # only close the socket if there was an error
44 if exc_value is not None:
45 try:
46 self.sock.close()
47 except Exception:
48 pass
49 self.sock = None
50
51
52 def get_default_ipc_path(testnet=False):
53 if testnet:
54 testnet = "testnet"
55 else:
56 testnet = ""
57
58 if sys.platform == 'darwin':
59 ipc_path = os.path.expanduser(os.path.join(
60 "~",
61 "Library",
62 "Ethereum",
63 testnet,
64 "geth.ipc"
65 ))
66 if os.path.exists(ipc_path):
67 return ipc_path
68
69 ipc_path = os.path.expanduser(os.path.join(
70 "~",
71 "Library",
72 "Application Support",
73 "io.parity.ethereum",
74 "jsonrpc.ipc"
75 ))
76 if os.path.exists(ipc_path):
77 return ipc_path
78
79 elif sys.platform.startswith('linux'):
80 ipc_path = os.path.expanduser(os.path.join(
81 "~",
82 ".ethereum",
83 testnet,
84 "geth.ipc"
85 ))
86 if os.path.exists(ipc_path):
87 return ipc_path
88
89 ipc_path = os.path.expanduser(os.path.join(
90 "~",
91 ".local",
92 "share",
93 "io.parity.ethereum",
94 "jsonrpc.ipc"
95 ))
96 if os.path.exists(ipc_path):
97 return ipc_path
98
99 elif sys.platform == 'win32':
100 ipc_path = os.path.join(
101 "\\\\",
102 ".",
103 "pipe",
104 "geth.ipc"
105 )
106 if os.path.exists(ipc_path):
107 return ipc_path
108
109 ipc_path = os.path.join(
110 "\\\\",
111 ".",
112 "pipe",
113 "jsonrpc.ipc"
114 )
115 if os.path.exists(ipc_path):
116 return ipc_path
117
118 else:
119 raise ValueError(
120 "Unsupported platform '{0}'. Only darwin/linux2/win32 are "
121 "supported. You must specify the ipc_path".format(sys.platform)
122 )
123
124
125 class IPCProvider(JSONBaseProvider):
126 _socket = None
127
128 def __init__(self, ipc_path=None, testnet=False, *args, **kwargs):
129 if ipc_path is None:
130 self.ipc_path = get_default_ipc_path(testnet)
131 else:
132 self.ipc_path = ipc_path
133
134 self._lock = threading.Lock()
135 self._socket = PersistantSocket(self.ipc_path)
136 super(IPCProvider, self).__init__(*args, **kwargs)
137
138 def make_request(self, method, params):
139 request = self.encode_rpc_request(method, params)
140
141 with self._lock, self._socket as sock:
142 sock.sendall(request)
143 raw_response = b""
144 with Timeout(10) as timeout:
145 while True:
146 try:
147 raw_response += sock.recv(4096)
148 except socket.timeout:
149 timeout.sleep(0)
150 continue
151 if raw_response == b"":
152 timeout.sleep(0)
153 else:
154 try:
155 response = self.decode_rpc_response(raw_response)
156 except JSONDecodeError:
157 timeout.sleep(0)
158 continue
159 else:
160 return response
161
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/web3/providers/ipc.py b/web3/providers/ipc.py
--- a/web3/providers/ipc.py
+++ b/web3/providers/ipc.py
@@ -35,6 +35,9 @@
self.ipc_path = ipc_path
def __enter__(self):
+ if not self.ipc_path:
+ raise FileNotFoundError("cannot connect to IPC socket at path: %r" % self.ipc_path)
+
if not self.sock:
self.sock = get_ipc_socket(self.ipc_path)
return self.sock
|
{"golden_diff": "diff --git a/web3/providers/ipc.py b/web3/providers/ipc.py\n--- a/web3/providers/ipc.py\n+++ b/web3/providers/ipc.py\n@@ -35,6 +35,9 @@\n self.ipc_path = ipc_path\n \n def __enter__(self):\n+ if not self.ipc_path:\n+ raise FileNotFoundError(\"cannot connect to IPC socket at path: %r\" % self.ipc_path)\n+\n if not self.sock:\n self.sock = get_ipc_socket(self.ipc_path)\n return self.sock\n", "issue": "web3.auto raises unclear exception if no client is live\n* Version: 4.0.0-beta.1\r\n* OS: linux\r\n\r\n\r\n### What was wrong?\r\n\r\nIf no client is live, I expect w3 to return as `None` in this case, but instead I get an exception.\r\n```\r\nfrom web3.auto import w3\r\n```\r\n\r\ncc @Sebohe \r\n\r\n> ~/code/ethtoken/ethtoken/main.py in eip20_token(address, w3, **kwargs)\r\n> 23 '''\r\n> 24 if w3 is None:\r\n> ---> 25 from web3.auto import w3\r\n> 26 if w3 is None:\r\n> 27 raise RuntimeError(\"Could not auto-detect web3 connection, please supply it as arg w3\")\r\n> \r\n> ~/code/ethtoken/venv/lib/python3.5/site-packages/web3/auto/__init__.py in <module>()\r\n> 2 \r\n> 3 for connector in ('ipc', 'http'):\r\n> ----> 4 connection = importlib.import_module('web3.auto.' + connector)\r\n> 5 if connection.w3:\r\n> 6 w3 = connection.w3\r\n> \r\n> /usr/lib/python3.5/importlib/__init__.py in import_module(name, package)\r\n> 124 break\r\n> 125 level += 1\r\n> --> 126 return _bootstrap._gcd_import(name[level:], package, level)\r\n> 127 \r\n> 128 \r\n> \r\n> ~/code/ethtoken/venv/lib/python3.5/site-packages/web3/auto/ipc.py in <module>()\r\n> 14 \r\n> 15 \r\n> ---> 16 w3 = connect()\r\n> \r\n> ~/code/ethtoken/venv/lib/python3.5/site-packages/web3/auto/ipc.py in connect()\r\n> 8 def connect():\r\n> 9 w3 = Web3(IPCProvider(get_default_ipc_path()))\r\n> ---> 10 if w3.isConnected():\r\n> 11 return w3\r\n> 12 \r\n> \r\n> ~/code/ethtoken/venv/lib/python3.5/site-packages/web3/main.py in isConnected(self)\r\n> 155 def isConnected(self):\r\n> 156 for provider in self.providers:\r\n> --> 157 if provider.isConnected():\r\n> 158 return True\r\n> 159 else:\r\n> \r\n> ~/code/ethtoken/venv/lib/python3.5/site-packages/web3/providers/base.py in isConnected(self)\r\n> 73 def isConnected(self):\r\n> 74 try:\r\n> ---> 75 response = self.make_request('web3_clientVersion', [])\r\n> 76 except IOError:\r\n> 77 return False\r\n> \r\n> ~/code/ethtoken/venv/lib/python3.5/site-packages/web3/providers/ipc.py in make_request(self, method, params)\r\n> 139 request = self.encode_rpc_request(method, params)\r\n> 140 \r\n> --> 141 with self._lock, self._socket as sock:\r\n> 142 sock.sendall(request)\r\n> 143 raw_response = b\"\"\r\n> \r\n> ~/code/ethtoken/venv/lib/python3.5/site-packages/web3/providers/ipc.py in __enter__(self)\r\n> 37 def __enter__(self):\r\n> 38 if not self.sock:\r\n> ---> 39 self.sock = get_ipc_socket(self.ipc_path)\r\n> 40 return self.sock\r\n> 41 \r\n> \r\n> ~/code/ethtoken/venv/lib/python3.5/site-packages/web3/providers/ipc.py in get_ipc_socket(ipc_path, timeout)\r\n> 24 else:\r\n> 25 sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\r\n> ---> 26 sock.connect(ipc_path)\r\n> 27 sock.settimeout(timeout)\r\n> 28 return sock\r\n> \r\n> TypeError: a bytes-like object is required, not 'NoneType'\r\n\r\n\r\n### How can it be fixed?\r\n\r\n* Add a new test to verify the situation, and prevent regressions\r\n* `isConnected` should short-circuit with something like: `if self.ipc_path is None: return False`\r\n\n", "before_files": [{"content": "import os\nimport socket\nimport sys\nimport threading\n\ntry:\n from json import JSONDecodeError\nexcept ImportError:\n JSONDecodeError = ValueError\n\nfrom web3.utils.threads import (\n Timeout,\n)\n\nfrom .base import JSONBaseProvider\n\n\ndef get_ipc_socket(ipc_path, timeout=0.1):\n if sys.platform == 'win32':\n # On Windows named pipe is used. Simulate socket with it.\n from web3.utils.windows import NamedPipe\n\n return NamedPipe(ipc_path)\n else:\n sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n sock.connect(ipc_path)\n sock.settimeout(timeout)\n return sock\n\n\nclass PersistantSocket(object):\n sock = None\n\n def __init__(self, ipc_path):\n self.ipc_path = ipc_path\n\n def __enter__(self):\n if not self.sock:\n self.sock = get_ipc_socket(self.ipc_path)\n return self.sock\n\n def __exit__(self, exc_type, exc_value, traceback):\n # only close the socket if there was an error\n if exc_value is not None:\n try:\n self.sock.close()\n except Exception:\n pass\n self.sock = None\n\n\ndef get_default_ipc_path(testnet=False):\n if testnet:\n testnet = \"testnet\"\n else:\n testnet = \"\"\n\n if sys.platform == 'darwin':\n ipc_path = os.path.expanduser(os.path.join(\n \"~\",\n \"Library\",\n \"Ethereum\",\n testnet,\n \"geth.ipc\"\n ))\n if os.path.exists(ipc_path):\n return ipc_path\n\n ipc_path = os.path.expanduser(os.path.join(\n \"~\",\n \"Library\",\n \"Application Support\",\n \"io.parity.ethereum\",\n \"jsonrpc.ipc\"\n ))\n if os.path.exists(ipc_path):\n return ipc_path\n\n elif sys.platform.startswith('linux'):\n ipc_path = os.path.expanduser(os.path.join(\n \"~\",\n \".ethereum\",\n testnet,\n \"geth.ipc\"\n ))\n if os.path.exists(ipc_path):\n return ipc_path\n\n ipc_path = os.path.expanduser(os.path.join(\n \"~\",\n \".local\",\n \"share\",\n \"io.parity.ethereum\",\n \"jsonrpc.ipc\"\n ))\n if os.path.exists(ipc_path):\n return ipc_path\n\n elif sys.platform == 'win32':\n ipc_path = os.path.join(\n \"\\\\\\\\\",\n \".\",\n \"pipe\",\n \"geth.ipc\"\n )\n if os.path.exists(ipc_path):\n return ipc_path\n\n ipc_path = os.path.join(\n \"\\\\\\\\\",\n \".\",\n \"pipe\",\n \"jsonrpc.ipc\"\n )\n if os.path.exists(ipc_path):\n return ipc_path\n\n else:\n raise ValueError(\n \"Unsupported platform '{0}'. Only darwin/linux2/win32 are \"\n \"supported. You must specify the ipc_path\".format(sys.platform)\n )\n\n\nclass IPCProvider(JSONBaseProvider):\n _socket = None\n\n def __init__(self, ipc_path=None, testnet=False, *args, **kwargs):\n if ipc_path is None:\n self.ipc_path = get_default_ipc_path(testnet)\n else:\n self.ipc_path = ipc_path\n\n self._lock = threading.Lock()\n self._socket = PersistantSocket(self.ipc_path)\n super(IPCProvider, self).__init__(*args, **kwargs)\n\n def make_request(self, method, params):\n request = self.encode_rpc_request(method, params)\n\n with self._lock, self._socket as sock:\n sock.sendall(request)\n raw_response = b\"\"\n with Timeout(10) as timeout:\n while True:\n try:\n raw_response += sock.recv(4096)\n except socket.timeout:\n timeout.sleep(0)\n continue\n if raw_response == b\"\":\n timeout.sleep(0)\n else:\n try:\n response = self.decode_rpc_response(raw_response)\n except JSONDecodeError:\n timeout.sleep(0)\n continue\n else:\n return response\n", "path": "web3/providers/ipc.py"}], "after_files": [{"content": "import os\nimport socket\nimport sys\nimport threading\n\ntry:\n from json import JSONDecodeError\nexcept ImportError:\n JSONDecodeError = ValueError\n\nfrom web3.utils.threads import (\n Timeout,\n)\n\nfrom .base import JSONBaseProvider\n\n\ndef get_ipc_socket(ipc_path, timeout=0.1):\n if sys.platform == 'win32':\n # On Windows named pipe is used. Simulate socket with it.\n from web3.utils.windows import NamedPipe\n\n return NamedPipe(ipc_path)\n else:\n sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n sock.connect(ipc_path)\n sock.settimeout(timeout)\n return sock\n\n\nclass PersistantSocket(object):\n sock = None\n\n def __init__(self, ipc_path):\n self.ipc_path = ipc_path\n\n def __enter__(self):\n if not self.ipc_path:\n raise FileNotFoundError(\"cannot connect to IPC socket at path: %r\" % self.ipc_path)\n\n if not self.sock:\n self.sock = get_ipc_socket(self.ipc_path)\n return self.sock\n\n def __exit__(self, exc_type, exc_value, traceback):\n # only close the socket if there was an error\n if exc_value is not None:\n try:\n self.sock.close()\n except Exception:\n pass\n self.sock = None\n\n\ndef get_default_ipc_path(testnet=False):\n if testnet:\n testnet = \"testnet\"\n else:\n testnet = \"\"\n\n if sys.platform == 'darwin':\n ipc_path = os.path.expanduser(os.path.join(\n \"~\",\n \"Library\",\n \"Ethereum\",\n testnet,\n \"geth.ipc\"\n ))\n if os.path.exists(ipc_path):\n return ipc_path\n\n ipc_path = os.path.expanduser(os.path.join(\n \"~\",\n \"Library\",\n \"Application Support\",\n \"io.parity.ethereum\",\n \"jsonrpc.ipc\"\n ))\n if os.path.exists(ipc_path):\n return ipc_path\n\n elif sys.platform.startswith('linux'):\n ipc_path = os.path.expanduser(os.path.join(\n \"~\",\n \".ethereum\",\n testnet,\n \"geth.ipc\"\n ))\n if os.path.exists(ipc_path):\n return ipc_path\n\n ipc_path = os.path.expanduser(os.path.join(\n \"~\",\n \".local\",\n \"share\",\n \"io.parity.ethereum\",\n \"jsonrpc.ipc\"\n ))\n if os.path.exists(ipc_path):\n return ipc_path\n\n elif sys.platform == 'win32':\n ipc_path = os.path.join(\n \"\\\\\\\\\",\n \".\",\n \"pipe\",\n \"geth.ipc\"\n )\n if os.path.exists(ipc_path):\n return ipc_path\n\n ipc_path = os.path.join(\n \"\\\\\\\\\",\n \".\",\n \"pipe\",\n \"jsonrpc.ipc\"\n )\n if os.path.exists(ipc_path):\n return ipc_path\n\n else:\n raise ValueError(\n \"Unsupported platform '{0}'. Only darwin/linux2/win32 are \"\n \"supported. You must specify the ipc_path\".format(sys.platform)\n )\n\n\nclass IPCProvider(JSONBaseProvider):\n _socket = None\n\n def __init__(self, ipc_path=None, testnet=False, *args, **kwargs):\n if ipc_path is None:\n self.ipc_path = get_default_ipc_path(testnet)\n else:\n self.ipc_path = ipc_path\n\n self._lock = threading.Lock()\n self._socket = PersistantSocket(self.ipc_path)\n super(IPCProvider, self).__init__(*args, **kwargs)\n\n def make_request(self, method, params):\n request = self.encode_rpc_request(method, params)\n\n with self._lock, self._socket as sock:\n sock.sendall(request)\n raw_response = b\"\"\n with Timeout(10) as timeout:\n while True:\n try:\n raw_response += sock.recv(4096)\n except socket.timeout:\n timeout.sleep(0)\n continue\n if raw_response == b\"\":\n timeout.sleep(0)\n else:\n try:\n response = self.decode_rpc_response(raw_response)\n except JSONDecodeError:\n timeout.sleep(0)\n continue\n else:\n return response\n", "path": "web3/providers/ipc.py"}]}
| 2,544 | 119 |
gh_patches_debug_42992
|
rasdani/github-patches
|
git_diff
|
conda-forge__staged-recipes-1395
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add falcon
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `.CI/create_feedstocks.py`
Content:
```
1 #!/usr/bin/env python
2 """
3 Convert all recipes into feedstocks.
4
5 This script is to be run in a TravisCI context, with all secret environment variables defined (BINSTAR_TOKEN, GH_TOKEN)
6 Such as:
7
8 export GH_TOKEN=$(cat ~/.conda-smithy/github.token)
9
10 """
11 from __future__ import print_function
12
13 from conda_build.metadata import MetaData
14 from conda_smithy.github import gh_token
15 from contextlib import contextmanager
16 from datetime import datetime
17 from github import Github, GithubException, Team
18 import os.path
19 from random import choice
20 import shutil
21 import subprocess
22 import tempfile
23
24
25 # Enable DEBUG to run the diagnostics, without actually creating new feedstocks.
26 DEBUG = False
27
28
29 superlative = ['awesome', 'slick', 'formidable', 'awe-inspiring', 'breathtaking',
30 'magnificent', 'wonderous', 'stunning', 'astonishing', 'superb',
31 'splendid', 'impressive', 'unbeatable', 'excellent', 'top', 'outstanding',
32 'exalted', 'standout', 'smashing']
33
34
35 def list_recipes():
36 recipe_directory_name = 'recipes'
37 if os.path.isdir(recipe_directory_name):
38 recipes = os.listdir(recipe_directory_name)
39 else:
40 recipes = []
41
42 for recipe_dir in recipes:
43 # We don't list the "example" feedstock. It is an example, and is there
44 # to be helpful.
45 if recipe_dir.startswith('example'):
46 continue
47 path = os.path.abspath(os.path.join(recipe_directory_name, recipe_dir))
48 yield path, recipe_dir
49
50
51 @contextmanager
52 def tmp_dir(*args, **kwargs):
53 temp_dir = tempfile.mkdtemp(*args, **kwargs)
54 try:
55 yield temp_dir
56 finally:
57 shutil.rmtree(temp_dir)
58
59
60 def repo_exists(organization, name):
61 token = gh_token()
62 gh = Github(token)
63 # Use the organization provided.
64 org = gh.get_organization(organization)
65 try:
66 org.get_repo(name)
67 return True
68 except GithubException as e:
69 if e.status == 404:
70 return False
71 raise
72
73
74 def create_team(org, name, description, repo_names):
75 # PyGithub creates secret teams, and has no way of turning that off! :(
76 post_parameters = {
77 "name": name,
78 "description": description,
79 "privacy": "closed",
80 "permission": "push",
81 "repo_names": repo_names
82 }
83 headers, data = org._requester.requestJsonAndCheck(
84 "POST",
85 org.url + "/teams",
86 input=post_parameters
87 )
88 return Team.Team(org._requester, headers, data, completed=True)
89
90 def print_rate_limiting_info(gh):
91 # Compute some info about our GitHub API Rate Limit.
92 # Note that it doesn't count against our limit to
93 # get this info. So, we should be doing this regularly
94 # to better know when it is going to run out. Also,
95 # this will help us better understand where we are
96 # spending it and how to better optimize it.
97
98 # Get GitHub API Rate Limit usage and total
99 gh_api_remaining, gh_api_total = gh.rate_limiting
100
101 # Compute time until GitHub API Rate Limit reset
102 gh_api_reset_time = gh.rate_limiting_resettime
103 gh_api_reset_time = datetime.utcfromtimestamp(gh_api_reset_time)
104 gh_api_reset_time -= datetime.utcnow()
105
106 print("")
107 print("GitHub API Rate Limit Info:")
108 print("---------------------------")
109 print("Currently remaining {remaining} out of {total}.".format(remaining=gh_api_remaining, total=gh_api_total))
110 print("Will reset in {time}.".format(time=gh_api_reset_time))
111 print("")
112
113
114
115 if __name__ == '__main__':
116 is_merged_pr = (os.environ.get('TRAVIS_BRANCH') == 'master' and os.environ.get('TRAVIS_PULL_REQUEST') == 'false')
117
118 smithy_conf = os.path.expanduser('~/.conda-smithy')
119 if not os.path.exists(smithy_conf):
120 os.mkdir(smithy_conf)
121
122 def write_token(name, token):
123 with open(os.path.join(smithy_conf, name + '.token'), 'w') as fh:
124 fh.write(token)
125 if 'APPVEYOR_TOKEN' in os.environ:
126 write_token('appveyor', os.environ['APPVEYOR_TOKEN'])
127 if 'CIRCLE_TOKEN' in os.environ:
128 write_token('circle', os.environ['CIRCLE_TOKEN'])
129 gh = None
130 if 'GH_TOKEN' in os.environ:
131 write_token('github', os.environ['GH_TOKEN'])
132 gh = Github(os.environ['GH_TOKEN'])
133
134 # Get our initial rate limit info.
135 print_rate_limiting_info(gh)
136
137
138 owner_info = ['--organization', 'conda-forge']
139
140 print('Calculating the recipes which need to be turned into feedstocks.')
141 removed_recipes = []
142 with tmp_dir('__feedstocks') as feedstocks_dir:
143 feedstock_dirs = []
144 for recipe_dir, name in list_recipes():
145 feedstock_dir = os.path.join(feedstocks_dir, name + '-feedstock')
146 os.mkdir(feedstock_dir)
147 print('Making feedstock for {}'.format(name))
148
149 subprocess.check_call(['conda', 'smithy', 'init', recipe_dir,
150 '--feedstock-directory', feedstock_dir])
151 if not is_merged_pr:
152 # We just want to check that conda-smithy is doing its thing without having any metadata issues.
153 continue
154
155 feedstock_dirs.append([feedstock_dir, name, recipe_dir])
156
157 subprocess.check_call(['git', 'remote', 'add', 'upstream_with_token',
158 'https://conda-forge-manager:{}@github.com/conda-forge/{}'.format(os.environ['GH_TOKEN'],
159 os.path.basename(feedstock_dir))],
160 cwd=feedstock_dir)
161
162 # Sometimes we already have the feedstock created. We need to deal with that case.
163 if repo_exists('conda-forge', os.path.basename(feedstock_dir)):
164 subprocess.check_call(['git', 'fetch', 'upstream_with_token'], cwd=feedstock_dir)
165 subprocess.check_call(['git', 'branch', '-m', 'master', 'old'], cwd=feedstock_dir)
166 try:
167 subprocess.check_call(['git', 'checkout', '-b', 'master', 'upstream_with_token/master'], cwd=feedstock_dir)
168 except subprocess.CalledProcessError:
169 # Sometimes, we have a repo, but there are no commits on it! Just catch that case.
170 subprocess.check_call(['git', 'checkout', '-b' 'master'], cwd=feedstock_dir)
171 else:
172 subprocess.check_call(['conda', 'smithy', 'register-github', feedstock_dir] + owner_info)
173
174 conda_forge = None
175 teams = None
176 if gh:
177 # Only get the org and teams if there is stuff to add.
178 if feedstock_dirs:
179 conda_forge = gh.get_organization('conda-forge')
180 teams = {team.name: team for team in conda_forge.get_teams()}
181
182 # Break the previous loop to allow the TravisCI registering to take place only once per function call.
183 # Without this, intermittent failiures to synch the TravisCI repos ensue.
184 all_maintainers = set()
185 for feedstock_dir, name, recipe_dir in feedstock_dirs:
186 subprocess.check_call(['conda', 'smithy', 'register-ci', '--feedstock_directory', feedstock_dir] + owner_info)
187
188 subprocess.check_call(['conda', 'smithy', 'rerender'], cwd=feedstock_dir)
189 subprocess.check_call(['git', 'commit', '-am', "Re-render the feedstock after CI registration."], cwd=feedstock_dir)
190 # Capture the output, as it may contain the GH_TOKEN.
191 out = subprocess.check_output(['git', 'push', 'upstream_with_token', 'master'], cwd=feedstock_dir,
192 stderr=subprocess.STDOUT)
193
194 # Add team members as maintainers.
195 if conda_forge:
196 meta = MetaData(recipe_dir)
197 maintainers = set(meta.meta.get('extra', {}).get('recipe-maintainers', []))
198 all_maintainers.update(maintainers)
199 team_name = name.lower()
200 repo_name = 'conda-forge/{}'.format(os.path.basename(feedstock_dir))
201
202 # Try to get team or create it if it doesn't exist.
203 team = teams.get(team_name)
204 if not team:
205 team = create_team(
206 conda_forge,
207 team_name,
208 'The {} {} contributors!'.format(choice(superlative), team_name),
209 repo_names=[repo_name]
210 )
211 teams[team_name] = team
212 current_maintainers = []
213 else:
214 current_maintainers = team.get_members()
215
216 # Add only the new maintainers to the team.
217 current_maintainers_handles = set([each_maintainers.login.lower() for each_maintainers in current_maintainers])
218 for new_maintainer in maintainers - current_maintainers_handles:
219 headers, data = team._requester.requestJsonAndCheck(
220 "PUT",
221 team.url + "/memberships/" + new_maintainer
222 )
223 # Mention any maintainers that need to be removed (unlikely here).
224 for old_maintainer in current_maintainers_handles - maintainers:
225 print("AN OLD MEMBER ({}) NEEDS TO BE REMOVED FROM {}".format(old_maintainer, repo_name))
226
227 # Remove this recipe from the repo.
228 removed_recipes.append(name)
229 if is_merged_pr:
230 subprocess.check_call(['git', 'rm', '-r', recipe_dir])
231
232 # Add new conda-forge members to all-members team. Welcome! :)
233 if conda_forge:
234 team_name = 'all-members'
235 team = teams.get(team_name)
236 if not team:
237 team = create_team(
238 conda_forge,
239 team_name,
240 'All of the awesome conda-forge contributors!',
241 []
242 )
243 teams[team_name] = team
244 current_members = []
245 else:
246 current_members = team.get_members()
247
248 # Add only the new members to the team.
249 current_members_handles = set([each_member.login.lower() for each_member in current_members])
250 for new_member in all_maintainers - current_members_handles:
251 print("Adding a new member ({}) to conda-forge. Welcome! :)".format(new_member))
252 headers, data = team._requester.requestJsonAndCheck(
253 "PUT",
254 team.url + "/memberships/" + new_member
255 )
256
257 # Commit any removed packages.
258 subprocess.check_call(['git', 'status'])
259 if removed_recipes:
260 subprocess.check_call(['git', 'checkout', os.environ.get('TRAVIS_BRANCH')])
261 msg = ('Removed recipe{s} ({}) after converting into feedstock{s}. '
262 '[ci skip]'.format(', '.join(removed_recipes),
263 s=('s' if len(removed_recipes) > 1 else '')))
264 if is_merged_pr:
265 # Capture the output, as it may contain the GH_TOKEN.
266 out = subprocess.check_output(['git', 'remote', 'add', 'upstream_with_token',
267 'https://conda-forge-manager:{}@github.com/conda-forge/staged-recipes'.format(os.environ['GH_TOKEN'])],
268 stderr=subprocess.STDOUT)
269 subprocess.check_call(['git', 'commit', '-m', msg])
270 # Capture the output, as it may contain the GH_TOKEN.
271 out = subprocess.check_output(['git', 'push', 'upstream_with_token', os.environ.get('TRAVIS_BRANCH')],
272 stderr=subprocess.STDOUT)
273 else:
274 print('Would git commit, with the following message: \n {}'.format(msg))
275
276 if gh:
277 # Get our final rate limit info.
278 print_rate_limiting_info(gh)
279
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/.CI/create_feedstocks.py b/.CI/create_feedstocks.py
--- a/.CI/create_feedstocks.py
+++ b/.CI/create_feedstocks.py
@@ -32,8 +32,8 @@
'exalted', 'standout', 'smashing']
+recipe_directory_name = 'recipes'
def list_recipes():
- recipe_directory_name = 'recipes'
if os.path.isdir(recipe_directory_name):
recipes = os.listdir(recipe_directory_name)
else:
@@ -138,7 +138,6 @@
owner_info = ['--organization', 'conda-forge']
print('Calculating the recipes which need to be turned into feedstocks.')
- removed_recipes = []
with tmp_dir('__feedstocks') as feedstocks_dir:
feedstock_dirs = []
for recipe_dir, name in list_recipes():
@@ -180,7 +179,7 @@
teams = {team.name: team for team in conda_forge.get_teams()}
# Break the previous loop to allow the TravisCI registering to take place only once per function call.
- # Without this, intermittent failiures to synch the TravisCI repos ensue.
+ # Without this, intermittent failures to synch the TravisCI repos ensue.
all_maintainers = set()
for feedstock_dir, name, recipe_dir in feedstock_dirs:
subprocess.check_call(['conda', 'smithy', 'register-ci', '--feedstock_directory', feedstock_dir] + owner_info)
@@ -225,7 +224,6 @@
print("AN OLD MEMBER ({}) NEEDS TO BE REMOVED FROM {}".format(old_maintainer, repo_name))
# Remove this recipe from the repo.
- removed_recipes.append(name)
if is_merged_pr:
subprocess.check_call(['git', 'rm', '-r', recipe_dir])
@@ -254,10 +252,34 @@
team.url + "/memberships/" + new_member
)
+ # Update status based on the remote.
+ subprocess.check_call(['git', 'stash', '--keep-index', '--include-untracked'])
+ subprocess.check_call(['git', 'fetch'])
+ subprocess.check_call(['git', 'rebase', '--autostash'])
+ subprocess.check_call(['git', 'add', '.'])
+ try:
+ subprocess.check_call(['git', 'stash', 'pop'])
+ except subprocess.CalledProcessError:
+ # In case there was nothing to stash.
+ # Finish quietly.
+ pass
+
+ # Generate a fresh listing of recipes removed.
+ # This gets pretty ugly as we parse `git status --porcelain`.
+ #
+ # * Each line we get back is a change to a file in the recipe directory.
+ # * We narrow the list down to recipes that are staged for deletion (ignores examples).
+ # * Then we clean up the list so that it only has the recipe names.
+ removed_recipes = subprocess.check_output(['git', 'status', '--porcelain', recipe_directory_name],
+ universal_newlines=True)
+ removed_recipes = removed_recipes.splitlines()
+ removed_recipes = filter(lambda _: _.startswith("D "), removed_recipes)
+ removed_recipes = list(map(lambda _ : _.replace("D", "", 1).lstrip(), removed_recipes))
+ removed_recipes = list(set(map(lambda _ : os.path.basename(os.path.dirname(_)), removed_recipes)))
+
# Commit any removed packages.
subprocess.check_call(['git', 'status'])
if removed_recipes:
- subprocess.check_call(['git', 'checkout', os.environ.get('TRAVIS_BRANCH')])
msg = ('Removed recipe{s} ({}) after converting into feedstock{s}. '
'[ci skip]'.format(', '.join(removed_recipes),
s=('s' if len(removed_recipes) > 1 else '')))
|
{"golden_diff": "diff --git a/.CI/create_feedstocks.py b/.CI/create_feedstocks.py\n--- a/.CI/create_feedstocks.py\n+++ b/.CI/create_feedstocks.py\n@@ -32,8 +32,8 @@\n 'exalted', 'standout', 'smashing']\n \n \n+recipe_directory_name = 'recipes'\n def list_recipes():\n- recipe_directory_name = 'recipes'\n if os.path.isdir(recipe_directory_name):\n recipes = os.listdir(recipe_directory_name)\n else:\n@@ -138,7 +138,6 @@\n owner_info = ['--organization', 'conda-forge']\n \n print('Calculating the recipes which need to be turned into feedstocks.')\n- removed_recipes = []\n with tmp_dir('__feedstocks') as feedstocks_dir:\n feedstock_dirs = []\n for recipe_dir, name in list_recipes():\n@@ -180,7 +179,7 @@\n teams = {team.name: team for team in conda_forge.get_teams()}\n \n # Break the previous loop to allow the TravisCI registering to take place only once per function call.\n- # Without this, intermittent failiures to synch the TravisCI repos ensue.\n+ # Without this, intermittent failures to synch the TravisCI repos ensue.\n all_maintainers = set()\n for feedstock_dir, name, recipe_dir in feedstock_dirs:\n subprocess.check_call(['conda', 'smithy', 'register-ci', '--feedstock_directory', feedstock_dir] + owner_info)\n@@ -225,7 +224,6 @@\n print(\"AN OLD MEMBER ({}) NEEDS TO BE REMOVED FROM {}\".format(old_maintainer, repo_name))\n \n # Remove this recipe from the repo.\n- removed_recipes.append(name)\n if is_merged_pr:\n subprocess.check_call(['git', 'rm', '-r', recipe_dir])\n \n@@ -254,10 +252,34 @@\n team.url + \"/memberships/\" + new_member\n )\n \n+ # Update status based on the remote.\n+ subprocess.check_call(['git', 'stash', '--keep-index', '--include-untracked'])\n+ subprocess.check_call(['git', 'fetch'])\n+ subprocess.check_call(['git', 'rebase', '--autostash'])\n+ subprocess.check_call(['git', 'add', '.'])\n+ try:\n+ subprocess.check_call(['git', 'stash', 'pop'])\n+ except subprocess.CalledProcessError:\n+ # In case there was nothing to stash.\n+ # Finish quietly.\n+ pass\n+\n+ # Generate a fresh listing of recipes removed.\n+ # This gets pretty ugly as we parse `git status --porcelain`.\n+ #\n+ # * Each line we get back is a change to a file in the recipe directory.\n+ # * We narrow the list down to recipes that are staged for deletion (ignores examples).\n+ # * Then we clean up the list so that it only has the recipe names.\n+ removed_recipes = subprocess.check_output(['git', 'status', '--porcelain', recipe_directory_name],\n+ universal_newlines=True)\n+ removed_recipes = removed_recipes.splitlines()\n+ removed_recipes = filter(lambda _: _.startswith(\"D \"), removed_recipes)\n+ removed_recipes = list(map(lambda _ : _.replace(\"D\", \"\", 1).lstrip(), removed_recipes))\n+ removed_recipes = list(set(map(lambda _ : os.path.basename(os.path.dirname(_)), removed_recipes)))\n+\n # Commit any removed packages.\n subprocess.check_call(['git', 'status'])\n if removed_recipes:\n- subprocess.check_call(['git', 'checkout', os.environ.get('TRAVIS_BRANCH')])\n msg = ('Removed recipe{s} ({}) after converting into feedstock{s}. '\n '[ci skip]'.format(', '.join(removed_recipes),\n s=('s' if len(removed_recipes) > 1 else '')))\n", "issue": "Add falcon\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\"\"\"\nConvert all recipes into feedstocks.\n\nThis script is to be run in a TravisCI context, with all secret environment variables defined (BINSTAR_TOKEN, GH_TOKEN)\nSuch as:\n\n export GH_TOKEN=$(cat ~/.conda-smithy/github.token)\n\n\"\"\"\nfrom __future__ import print_function\n\nfrom conda_build.metadata import MetaData\nfrom conda_smithy.github import gh_token\nfrom contextlib import contextmanager\nfrom datetime import datetime\nfrom github import Github, GithubException, Team\nimport os.path\nfrom random import choice\nimport shutil\nimport subprocess\nimport tempfile\n\n\n# Enable DEBUG to run the diagnostics, without actually creating new feedstocks.\nDEBUG = False\n\n\nsuperlative = ['awesome', 'slick', 'formidable', 'awe-inspiring', 'breathtaking',\n 'magnificent', 'wonderous', 'stunning', 'astonishing', 'superb',\n 'splendid', 'impressive', 'unbeatable', 'excellent', 'top', 'outstanding',\n 'exalted', 'standout', 'smashing']\n\n\ndef list_recipes():\n recipe_directory_name = 'recipes'\n if os.path.isdir(recipe_directory_name):\n recipes = os.listdir(recipe_directory_name)\n else:\n recipes = []\n\n for recipe_dir in recipes:\n # We don't list the \"example\" feedstock. It is an example, and is there\n # to be helpful.\n if recipe_dir.startswith('example'):\n continue\n path = os.path.abspath(os.path.join(recipe_directory_name, recipe_dir))\n yield path, recipe_dir\n\n\n@contextmanager\ndef tmp_dir(*args, **kwargs):\n temp_dir = tempfile.mkdtemp(*args, **kwargs)\n try:\n yield temp_dir\n finally:\n shutil.rmtree(temp_dir)\n\n\ndef repo_exists(organization, name):\n token = gh_token()\n gh = Github(token)\n # Use the organization provided.\n org = gh.get_organization(organization)\n try:\n org.get_repo(name)\n return True\n except GithubException as e:\n if e.status == 404:\n return False\n raise\n\n\ndef create_team(org, name, description, repo_names):\n # PyGithub creates secret teams, and has no way of turning that off! :(\n post_parameters = {\n \"name\": name,\n \"description\": description,\n \"privacy\": \"closed\",\n \"permission\": \"push\",\n \"repo_names\": repo_names\n }\n headers, data = org._requester.requestJsonAndCheck(\n \"POST\",\n org.url + \"/teams\",\n input=post_parameters\n )\n return Team.Team(org._requester, headers, data, completed=True)\n\ndef print_rate_limiting_info(gh):\n # Compute some info about our GitHub API Rate Limit.\n # Note that it doesn't count against our limit to\n # get this info. So, we should be doing this regularly\n # to better know when it is going to run out. Also,\n # this will help us better understand where we are\n # spending it and how to better optimize it.\n\n # Get GitHub API Rate Limit usage and total\n gh_api_remaining, gh_api_total = gh.rate_limiting\n\n # Compute time until GitHub API Rate Limit reset\n gh_api_reset_time = gh.rate_limiting_resettime\n gh_api_reset_time = datetime.utcfromtimestamp(gh_api_reset_time)\n gh_api_reset_time -= datetime.utcnow()\n\n print(\"\")\n print(\"GitHub API Rate Limit Info:\")\n print(\"---------------------------\")\n print(\"Currently remaining {remaining} out of {total}.\".format(remaining=gh_api_remaining, total=gh_api_total))\n print(\"Will reset in {time}.\".format(time=gh_api_reset_time))\n print(\"\")\n\n\n\nif __name__ == '__main__':\n is_merged_pr = (os.environ.get('TRAVIS_BRANCH') == 'master' and os.environ.get('TRAVIS_PULL_REQUEST') == 'false')\n\n smithy_conf = os.path.expanduser('~/.conda-smithy')\n if not os.path.exists(smithy_conf):\n os.mkdir(smithy_conf)\n\n def write_token(name, token):\n with open(os.path.join(smithy_conf, name + '.token'), 'w') as fh:\n fh.write(token)\n if 'APPVEYOR_TOKEN' in os.environ:\n write_token('appveyor', os.environ['APPVEYOR_TOKEN'])\n if 'CIRCLE_TOKEN' in os.environ:\n write_token('circle', os.environ['CIRCLE_TOKEN'])\n gh = None\n if 'GH_TOKEN' in os.environ:\n write_token('github', os.environ['GH_TOKEN'])\n gh = Github(os.environ['GH_TOKEN'])\n\n # Get our initial rate limit info.\n print_rate_limiting_info(gh)\n\n\n owner_info = ['--organization', 'conda-forge']\n\n print('Calculating the recipes which need to be turned into feedstocks.')\n removed_recipes = []\n with tmp_dir('__feedstocks') as feedstocks_dir:\n feedstock_dirs = []\n for recipe_dir, name in list_recipes():\n feedstock_dir = os.path.join(feedstocks_dir, name + '-feedstock')\n os.mkdir(feedstock_dir)\n print('Making feedstock for {}'.format(name))\n\n subprocess.check_call(['conda', 'smithy', 'init', recipe_dir,\n '--feedstock-directory', feedstock_dir])\n if not is_merged_pr:\n # We just want to check that conda-smithy is doing its thing without having any metadata issues.\n continue\n\n feedstock_dirs.append([feedstock_dir, name, recipe_dir])\n\n subprocess.check_call(['git', 'remote', 'add', 'upstream_with_token',\n 'https://conda-forge-manager:{}@github.com/conda-forge/{}'.format(os.environ['GH_TOKEN'],\n os.path.basename(feedstock_dir))],\n cwd=feedstock_dir)\n\n # Sometimes we already have the feedstock created. We need to deal with that case.\n if repo_exists('conda-forge', os.path.basename(feedstock_dir)):\n subprocess.check_call(['git', 'fetch', 'upstream_with_token'], cwd=feedstock_dir)\n subprocess.check_call(['git', 'branch', '-m', 'master', 'old'], cwd=feedstock_dir)\n try:\n subprocess.check_call(['git', 'checkout', '-b', 'master', 'upstream_with_token/master'], cwd=feedstock_dir)\n except subprocess.CalledProcessError:\n # Sometimes, we have a repo, but there are no commits on it! Just catch that case.\n subprocess.check_call(['git', 'checkout', '-b' 'master'], cwd=feedstock_dir)\n else:\n subprocess.check_call(['conda', 'smithy', 'register-github', feedstock_dir] + owner_info)\n\n conda_forge = None\n teams = None\n if gh:\n # Only get the org and teams if there is stuff to add.\n if feedstock_dirs:\n conda_forge = gh.get_organization('conda-forge')\n teams = {team.name: team for team in conda_forge.get_teams()}\n\n # Break the previous loop to allow the TravisCI registering to take place only once per function call.\n # Without this, intermittent failiures to synch the TravisCI repos ensue.\n all_maintainers = set()\n for feedstock_dir, name, recipe_dir in feedstock_dirs:\n subprocess.check_call(['conda', 'smithy', 'register-ci', '--feedstock_directory', feedstock_dir] + owner_info)\n\n subprocess.check_call(['conda', 'smithy', 'rerender'], cwd=feedstock_dir)\n subprocess.check_call(['git', 'commit', '-am', \"Re-render the feedstock after CI registration.\"], cwd=feedstock_dir)\n # Capture the output, as it may contain the GH_TOKEN.\n out = subprocess.check_output(['git', 'push', 'upstream_with_token', 'master'], cwd=feedstock_dir,\n stderr=subprocess.STDOUT)\n\n # Add team members as maintainers.\n if conda_forge:\n meta = MetaData(recipe_dir)\n maintainers = set(meta.meta.get('extra', {}).get('recipe-maintainers', []))\n all_maintainers.update(maintainers)\n team_name = name.lower()\n repo_name = 'conda-forge/{}'.format(os.path.basename(feedstock_dir))\n\n # Try to get team or create it if it doesn't exist.\n team = teams.get(team_name)\n if not team:\n team = create_team(\n conda_forge,\n team_name,\n 'The {} {} contributors!'.format(choice(superlative), team_name),\n repo_names=[repo_name]\n )\n teams[team_name] = team\n current_maintainers = []\n else:\n current_maintainers = team.get_members()\n\n # Add only the new maintainers to the team.\n current_maintainers_handles = set([each_maintainers.login.lower() for each_maintainers in current_maintainers])\n for new_maintainer in maintainers - current_maintainers_handles:\n headers, data = team._requester.requestJsonAndCheck(\n \"PUT\",\n team.url + \"/memberships/\" + new_maintainer\n )\n # Mention any maintainers that need to be removed (unlikely here).\n for old_maintainer in current_maintainers_handles - maintainers:\n print(\"AN OLD MEMBER ({}) NEEDS TO BE REMOVED FROM {}\".format(old_maintainer, repo_name))\n\n # Remove this recipe from the repo.\n removed_recipes.append(name)\n if is_merged_pr:\n subprocess.check_call(['git', 'rm', '-r', recipe_dir])\n\n # Add new conda-forge members to all-members team. Welcome! :)\n if conda_forge:\n team_name = 'all-members'\n team = teams.get(team_name)\n if not team:\n team = create_team(\n conda_forge,\n team_name,\n 'All of the awesome conda-forge contributors!',\n []\n )\n teams[team_name] = team\n current_members = []\n else:\n current_members = team.get_members()\n\n # Add only the new members to the team.\n current_members_handles = set([each_member.login.lower() for each_member in current_members])\n for new_member in all_maintainers - current_members_handles:\n print(\"Adding a new member ({}) to conda-forge. Welcome! :)\".format(new_member))\n headers, data = team._requester.requestJsonAndCheck(\n \"PUT\",\n team.url + \"/memberships/\" + new_member\n )\n\n # Commit any removed packages.\n subprocess.check_call(['git', 'status'])\n if removed_recipes:\n subprocess.check_call(['git', 'checkout', os.environ.get('TRAVIS_BRANCH')])\n msg = ('Removed recipe{s} ({}) after converting into feedstock{s}. '\n '[ci skip]'.format(', '.join(removed_recipes),\n s=('s' if len(removed_recipes) > 1 else '')))\n if is_merged_pr:\n # Capture the output, as it may contain the GH_TOKEN.\n out = subprocess.check_output(['git', 'remote', 'add', 'upstream_with_token',\n 'https://conda-forge-manager:{}@github.com/conda-forge/staged-recipes'.format(os.environ['GH_TOKEN'])],\n stderr=subprocess.STDOUT)\n subprocess.check_call(['git', 'commit', '-m', msg])\n # Capture the output, as it may contain the GH_TOKEN.\n out = subprocess.check_output(['git', 'push', 'upstream_with_token', os.environ.get('TRAVIS_BRANCH')],\n stderr=subprocess.STDOUT)\n else:\n print('Would git commit, with the following message: \\n {}'.format(msg))\n\n if gh:\n # Get our final rate limit info.\n print_rate_limiting_info(gh)\n", "path": ".CI/create_feedstocks.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\"\"\"\nConvert all recipes into feedstocks.\n\nThis script is to be run in a TravisCI context, with all secret environment variables defined (BINSTAR_TOKEN, GH_TOKEN)\nSuch as:\n\n export GH_TOKEN=$(cat ~/.conda-smithy/github.token)\n\n\"\"\"\nfrom __future__ import print_function\n\nfrom conda_build.metadata import MetaData\nfrom conda_smithy.github import gh_token\nfrom contextlib import contextmanager\nfrom datetime import datetime\nfrom github import Github, GithubException, Team\nimport os.path\nfrom random import choice\nimport shutil\nimport subprocess\nimport tempfile\n\n\n# Enable DEBUG to run the diagnostics, without actually creating new feedstocks.\nDEBUG = False\n\n\nsuperlative = ['awesome', 'slick', 'formidable', 'awe-inspiring', 'breathtaking',\n 'magnificent', 'wonderous', 'stunning', 'astonishing', 'superb',\n 'splendid', 'impressive', 'unbeatable', 'excellent', 'top', 'outstanding',\n 'exalted', 'standout', 'smashing']\n\n\nrecipe_directory_name = 'recipes'\ndef list_recipes():\n if os.path.isdir(recipe_directory_name):\n recipes = os.listdir(recipe_directory_name)\n else:\n recipes = []\n\n for recipe_dir in recipes:\n # We don't list the \"example\" feedstock. It is an example, and is there\n # to be helpful.\n if recipe_dir.startswith('example'):\n continue\n path = os.path.abspath(os.path.join(recipe_directory_name, recipe_dir))\n yield path, recipe_dir\n\n\n@contextmanager\ndef tmp_dir(*args, **kwargs):\n temp_dir = tempfile.mkdtemp(*args, **kwargs)\n try:\n yield temp_dir\n finally:\n shutil.rmtree(temp_dir)\n\n\ndef repo_exists(organization, name):\n token = gh_token()\n gh = Github(token)\n # Use the organization provided.\n org = gh.get_organization(organization)\n try:\n org.get_repo(name)\n return True\n except GithubException as e:\n if e.status == 404:\n return False\n raise\n\n\ndef create_team(org, name, description, repo_names):\n # PyGithub creates secret teams, and has no way of turning that off! :(\n post_parameters = {\n \"name\": name,\n \"description\": description,\n \"privacy\": \"closed\",\n \"permission\": \"push\",\n \"repo_names\": repo_names\n }\n headers, data = org._requester.requestJsonAndCheck(\n \"POST\",\n org.url + \"/teams\",\n input=post_parameters\n )\n return Team.Team(org._requester, headers, data, completed=True)\n\ndef print_rate_limiting_info(gh):\n # Compute some info about our GitHub API Rate Limit.\n # Note that it doesn't count against our limit to\n # get this info. So, we should be doing this regularly\n # to better know when it is going to run out. Also,\n # this will help us better understand where we are\n # spending it and how to better optimize it.\n\n # Get GitHub API Rate Limit usage and total\n gh_api_remaining, gh_api_total = gh.rate_limiting\n\n # Compute time until GitHub API Rate Limit reset\n gh_api_reset_time = gh.rate_limiting_resettime\n gh_api_reset_time = datetime.utcfromtimestamp(gh_api_reset_time)\n gh_api_reset_time -= datetime.utcnow()\n\n print(\"\")\n print(\"GitHub API Rate Limit Info:\")\n print(\"---------------------------\")\n print(\"Currently remaining {remaining} out of {total}.\".format(remaining=gh_api_remaining, total=gh_api_total))\n print(\"Will reset in {time}.\".format(time=gh_api_reset_time))\n print(\"\")\n\n\n\nif __name__ == '__main__':\n is_merged_pr = (os.environ.get('TRAVIS_BRANCH') == 'master' and os.environ.get('TRAVIS_PULL_REQUEST') == 'false')\n\n smithy_conf = os.path.expanduser('~/.conda-smithy')\n if not os.path.exists(smithy_conf):\n os.mkdir(smithy_conf)\n\n def write_token(name, token):\n with open(os.path.join(smithy_conf, name + '.token'), 'w') as fh:\n fh.write(token)\n if 'APPVEYOR_TOKEN' in os.environ:\n write_token('appveyor', os.environ['APPVEYOR_TOKEN'])\n if 'CIRCLE_TOKEN' in os.environ:\n write_token('circle', os.environ['CIRCLE_TOKEN'])\n gh = None\n if 'GH_TOKEN' in os.environ:\n write_token('github', os.environ['GH_TOKEN'])\n gh = Github(os.environ['GH_TOKEN'])\n\n # Get our initial rate limit info.\n print_rate_limiting_info(gh)\n\n\n owner_info = ['--organization', 'conda-forge']\n\n print('Calculating the recipes which need to be turned into feedstocks.')\n with tmp_dir('__feedstocks') as feedstocks_dir:\n feedstock_dirs = []\n for recipe_dir, name in list_recipes():\n feedstock_dir = os.path.join(feedstocks_dir, name + '-feedstock')\n os.mkdir(feedstock_dir)\n print('Making feedstock for {}'.format(name))\n\n subprocess.check_call(['conda', 'smithy', 'init', recipe_dir,\n '--feedstock-directory', feedstock_dir])\n if not is_merged_pr:\n # We just want to check that conda-smithy is doing its thing without having any metadata issues.\n continue\n\n feedstock_dirs.append([feedstock_dir, name, recipe_dir])\n\n subprocess.check_call(['git', 'remote', 'add', 'upstream_with_token',\n 'https://conda-forge-manager:{}@github.com/conda-forge/{}'.format(os.environ['GH_TOKEN'],\n os.path.basename(feedstock_dir))],\n cwd=feedstock_dir)\n\n # Sometimes we already have the feedstock created. We need to deal with that case.\n if repo_exists('conda-forge', os.path.basename(feedstock_dir)):\n subprocess.check_call(['git', 'fetch', 'upstream_with_token'], cwd=feedstock_dir)\n subprocess.check_call(['git', 'branch', '-m', 'master', 'old'], cwd=feedstock_dir)\n try:\n subprocess.check_call(['git', 'checkout', '-b', 'master', 'upstream_with_token/master'], cwd=feedstock_dir)\n except subprocess.CalledProcessError:\n # Sometimes, we have a repo, but there are no commits on it! Just catch that case.\n subprocess.check_call(['git', 'checkout', '-b' 'master'], cwd=feedstock_dir)\n else:\n subprocess.check_call(['conda', 'smithy', 'register-github', feedstock_dir] + owner_info)\n\n conda_forge = None\n teams = None\n if gh:\n # Only get the org and teams if there is stuff to add.\n if feedstock_dirs:\n conda_forge = gh.get_organization('conda-forge')\n teams = {team.name: team for team in conda_forge.get_teams()}\n\n # Break the previous loop to allow the TravisCI registering to take place only once per function call.\n # Without this, intermittent failures to synch the TravisCI repos ensue.\n all_maintainers = set()\n for feedstock_dir, name, recipe_dir in feedstock_dirs:\n subprocess.check_call(['conda', 'smithy', 'register-ci', '--feedstock_directory', feedstock_dir] + owner_info)\n\n subprocess.check_call(['conda', 'smithy', 'rerender'], cwd=feedstock_dir)\n subprocess.check_call(['git', 'commit', '-am', \"Re-render the feedstock after CI registration.\"], cwd=feedstock_dir)\n # Capture the output, as it may contain the GH_TOKEN.\n out = subprocess.check_output(['git', 'push', 'upstream_with_token', 'master'], cwd=feedstock_dir,\n stderr=subprocess.STDOUT)\n\n # Add team members as maintainers.\n if conda_forge:\n meta = MetaData(recipe_dir)\n maintainers = set(meta.meta.get('extra', {}).get('recipe-maintainers', []))\n all_maintainers.update(maintainers)\n team_name = name.lower()\n repo_name = 'conda-forge/{}'.format(os.path.basename(feedstock_dir))\n\n # Try to get team or create it if it doesn't exist.\n team = teams.get(team_name)\n if not team:\n team = create_team(\n conda_forge,\n team_name,\n 'The {} {} contributors!'.format(choice(superlative), team_name),\n repo_names=[repo_name]\n )\n teams[team_name] = team\n current_maintainers = []\n else:\n current_maintainers = team.get_members()\n\n # Add only the new maintainers to the team.\n current_maintainers_handles = set([each_maintainers.login.lower() for each_maintainers in current_maintainers])\n for new_maintainer in maintainers - current_maintainers_handles:\n headers, data = team._requester.requestJsonAndCheck(\n \"PUT\",\n team.url + \"/memberships/\" + new_maintainer\n )\n # Mention any maintainers that need to be removed (unlikely here).\n for old_maintainer in current_maintainers_handles - maintainers:\n print(\"AN OLD MEMBER ({}) NEEDS TO BE REMOVED FROM {}\".format(old_maintainer, repo_name))\n\n # Remove this recipe from the repo.\n if is_merged_pr:\n subprocess.check_call(['git', 'rm', '-r', recipe_dir])\n\n # Add new conda-forge members to all-members team. Welcome! :)\n if conda_forge:\n team_name = 'all-members'\n team = teams.get(team_name)\n if not team:\n team = create_team(\n conda_forge,\n team_name,\n 'All of the awesome conda-forge contributors!',\n []\n )\n teams[team_name] = team\n current_members = []\n else:\n current_members = team.get_members()\n\n # Add only the new members to the team.\n current_members_handles = set([each_member.login.lower() for each_member in current_members])\n for new_member in all_maintainers - current_members_handles:\n print(\"Adding a new member ({}) to conda-forge. Welcome! :)\".format(new_member))\n headers, data = team._requester.requestJsonAndCheck(\n \"PUT\",\n team.url + \"/memberships/\" + new_member\n )\n\n # Update status based on the remote.\n subprocess.check_call(['git', 'stash', '--keep-index', '--include-untracked'])\n subprocess.check_call(['git', 'fetch'])\n subprocess.check_call(['git', 'rebase', '--autostash'])\n subprocess.check_call(['git', 'add', '.'])\n try:\n subprocess.check_call(['git', 'stash', 'pop'])\n except subprocess.CalledProcessError:\n # In case there was nothing to stash.\n # Finish quietly.\n pass\n\n # Generate a fresh listing of recipes removed.\n # This gets pretty ugly as we parse `git status --porcelain`.\n #\n # * Each line we get back is a change to a file in the recipe directory.\n # * We narrow the list down to recipes that are staged for deletion (ignores examples).\n # * Then we clean up the list so that it only has the recipe names.\n removed_recipes = subprocess.check_output(['git', 'status', '--porcelain', recipe_directory_name],\n universal_newlines=True)\n removed_recipes = removed_recipes.splitlines()\n removed_recipes = filter(lambda _: _.startswith(\"D \"), removed_recipes)\n removed_recipes = list(map(lambda _ : _.replace(\"D\", \"\", 1).lstrip(), removed_recipes))\n removed_recipes = list(set(map(lambda _ : os.path.basename(os.path.dirname(_)), removed_recipes)))\n\n # Commit any removed packages.\n subprocess.check_call(['git', 'status'])\n if removed_recipes:\n msg = ('Removed recipe{s} ({}) after converting into feedstock{s}. '\n '[ci skip]'.format(', '.join(removed_recipes),\n s=('s' if len(removed_recipes) > 1 else '')))\n if is_merged_pr:\n # Capture the output, as it may contain the GH_TOKEN.\n out = subprocess.check_output(['git', 'remote', 'add', 'upstream_with_token',\n 'https://conda-forge-manager:{}@github.com/conda-forge/staged-recipes'.format(os.environ['GH_TOKEN'])],\n stderr=subprocess.STDOUT)\n subprocess.check_call(['git', 'commit', '-m', msg])\n # Capture the output, as it may contain the GH_TOKEN.\n out = subprocess.check_output(['git', 'push', 'upstream_with_token', os.environ.get('TRAVIS_BRANCH')],\n stderr=subprocess.STDOUT)\n else:\n print('Would git commit, with the following message: \\n {}'.format(msg))\n\n if gh:\n # Get our final rate limit info.\n print_rate_limiting_info(gh)\n", "path": ".CI/create_feedstocks.py"}]}
| 3,586 | 857 |
gh_patches_debug_5077
|
rasdani/github-patches
|
git_diff
|
stephenmcd__mezzanine-358
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Dynamic page permissions aren't called on the content model
The documentation mentions a `can_add` method to define on a a custom page model. When this is called in `page_tags.set_page_permissions`, it only calls in on the `Page` instance, never using `Page.get_content_model()`.
I will submit a pull request to fix this, but I'm not sure where the check should go -- is it better in the Page class, having the base `can_add` method delegate to `get_content_model().can_add`, or should it go in the template tag?
Dynamic page permissions aren't called on the content model
The documentation mentions a `can_add` method to define on a a custom page model. When this is called in `page_tags.set_page_permissions`, it only calls in on the `Page` instance, never using `Page.get_content_model()`.
I will submit a pull request to fix this, but I'm not sure where the check should go -- is it better in the Page class, having the base `can_add` method delegate to `get_content_model().can_add`, or should it go in the template tag?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mezzanine/pages/templatetags/pages_tags.py`
Content:
```
1
2 from collections import defaultdict
3
4 from django.core.exceptions import ImproperlyConfigured
5 from django.core.urlresolvers import reverse, NoReverseMatch
6 from django.template import TemplateSyntaxError, Variable
7 from django.template.loader import get_template
8 from django.utils.translation import ugettext_lazy as _
9
10 from mezzanine.conf import settings
11 from mezzanine.pages.models import Page
12 from mezzanine.utils.urls import admin_url
13 from mezzanine import template
14
15
16 register = template.Library()
17
18
19 @register.render_tag
20 def page_menu(context, token):
21 """
22 Return a list of child pages for the given parent, storing all
23 pages in a dict in the context when first called using parents as keys
24 for retrieval on subsequent recursive calls from the menu template.
25 """
26 # First arg could be the menu template file name, or the parent page.
27 # Also allow for both to be used.
28 template_name = None
29 parent_page = None
30 parts = token.split_contents()[1:]
31 for part in parts:
32 part = Variable(part).resolve(context)
33 if isinstance(part, unicode):
34 template_name = part
35 elif isinstance(part, Page):
36 parent_page = part
37 if template_name is None:
38 try:
39 template_name = context["menu_template_name"]
40 except KeyError:
41 error = "No template found for page_menu in: %s" % parts
42 raise TemplateSyntaxError(error)
43 context["menu_template_name"] = template_name
44 if "menu_pages" not in context:
45 try:
46 user = context["request"].user
47 slug = context["request"].path
48 except KeyError:
49 user = None
50 slug = ""
51 num_children = lambda id: lambda: len(context["menu_pages"][id])
52 has_children = lambda id: lambda: num_children(id)() > 0
53 published = Page.objects.published(for_user=user)
54 if slug == admin_url(Page, "changelist"):
55 related = [m.__name__.lower() for m in Page.get_content_models()]
56 published = published.select_related(*related)
57 else:
58 published = published.select_related(depth=2)
59 # Store the current page being viewed in the context. Used
60 # for comparisons in page.set_menu_helpers.
61 if "page" not in context:
62 try:
63 context["_current_page"] = published.get(slug=slug)
64 except Page.DoesNotExist:
65 context["_current_page"] = None
66 elif slug:
67 context["_current_page"] = context["page"]
68 # Maintain a dict of page IDs -> parent IDs for fast
69 # lookup in setting page.is_current_or_ascendant in
70 # page.set_menu_helpers.
71 context["_parent_page_ids"] = {}
72 pages = defaultdict(list)
73 for page in published.order_by("_order"):
74 page.set_helpers(context)
75 context["_parent_page_ids"][page.id] = page.parent_id
76 setattr(page, "num_children", num_children(page.id))
77 setattr(page, "has_children", has_children(page.id))
78 pages[page.parent_id].append(page)
79 context["menu_pages"] = pages
80 context["on_home"] = slug == reverse("home")
81 # ``branch_level`` must be stored against each page so that the
82 # calculation of it is correctly applied. This looks weird but if we do
83 # the ``branch_level`` as a separate arg to the template tag with the
84 # addition performed on it, the addition occurs each time the template
85 # tag is called rather than once per level.
86 context["branch_level"] = 0
87 parent_page_id = None
88 if parent_page is not None:
89 context["branch_level"] = getattr(parent_page, "branch_level", 0) + 1
90 parent_page_id = parent_page.id
91
92 context["page_branch"] = context["menu_pages"].get(parent_page_id, [])
93 context["page_branch_in_menu"] = False
94 for page in context["page_branch"]:
95 # footer/nav for backward compatibility.
96 page.in_footer = page.in_navigation = page.in_menu = True
97 for i, l, t in settings.PAGE_MENU_TEMPLATES:
98 if not unicode(i) in page.in_menus and t == template_name:
99 page.in_navigation = page.in_menu = False
100 if "footer" in template_name:
101 page.in_footer = False
102 break
103 if page.in_menu:
104 context["page_branch_in_menu"] = True
105 # Backwards compatibility
106 context['page_branch_in_navigation'] = context["page_branch_in_menu"]
107 context['page_branch_in_footer'] = (context["page_branch_in_menu"] and
108 template_name == "pages/menu/footer.html")
109
110 for i, page in enumerate(context["page_branch"]):
111 context["page_branch"][i].branch_level = context["branch_level"]
112 context["page_branch"][i].parent = parent_page
113 t = get_template(template_name)
114 return t.render(context)
115
116
117 @register.as_tag
118 def models_for_pages(*args):
119 """
120 Create a select list containing each of the models that subclass the
121 ``Page`` model.
122 """
123 page_models = []
124 for model in Page.get_content_models():
125 try:
126 admin_url(model, "add")
127 except NoReverseMatch:
128 continue
129 else:
130 setattr(model, "name", model._meta.verbose_name)
131 setattr(model, "add_url", admin_url(model, "add"))
132 page_models.append(model)
133 return page_models
134
135
136 @register.render_tag
137 def set_model_permissions(context, token):
138 """
139 Assigns a permissions dict to the given model, much like Django
140 does with its dashboard app list.
141
142 Used within the change list for pages, to implement permission
143 checks for the navigation tree.
144 """
145 model = context[token.split_contents()[1]]
146 opts = model._meta
147 perm_name = opts.app_label + ".%s_" + opts.object_name.lower()
148 request = context["request"]
149 setattr(model, "perms", {})
150 for perm_type in ("add", "change", "delete"):
151 model.perms[perm_type] = request.user.has_perm(perm_name % perm_type)
152 return ""
153
154
155 @register.render_tag
156 def set_page_permissions(context, token):
157 """
158 Assigns a permissions dict to the given page instance, combining
159 Django's permission for the page's model and a permission check
160 against the instance itself calling the page's ``can_add``,
161 ``can_change`` and ``can_delete`` custom methods.
162
163 Used within the change list for pages, to implement permission
164 checks for the navigation tree.
165 """
166 page = context[token.split_contents()[1]]
167 model = page.get_content_model()
168 try:
169 opts = model._meta
170 except AttributeError:
171 # A missing inner Meta class usually means the Page model
172 # hasn't been directly subclassed.
173 error = _("An error occured with the following class. Does "
174 "it subclass Page directly?")
175 raise ImproperlyConfigured(error + " '%s'" % model.__class__.__name__)
176 perm_name = opts.app_label + ".%s_" + opts.object_name.lower()
177 request = context["request"]
178 setattr(page, "perms", {})
179 for perm_type in ("add", "change", "delete"):
180 perm = request.user.has_perm(perm_name % perm_type)
181 perm = perm and getattr(page, "can_%s" % perm_type)(request)
182 page.perms[perm_type] = perm
183 return ""
184
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mezzanine/pages/templatetags/pages_tags.py b/mezzanine/pages/templatetags/pages_tags.py
--- a/mezzanine/pages/templatetags/pages_tags.py
+++ b/mezzanine/pages/templatetags/pages_tags.py
@@ -178,6 +178,6 @@
setattr(page, "perms", {})
for perm_type in ("add", "change", "delete"):
perm = request.user.has_perm(perm_name % perm_type)
- perm = perm and getattr(page, "can_%s" % perm_type)(request)
+ perm = perm and getattr(model, "can_%s" % perm_type)(request)
page.perms[perm_type] = perm
return ""
|
{"golden_diff": "diff --git a/mezzanine/pages/templatetags/pages_tags.py b/mezzanine/pages/templatetags/pages_tags.py\n--- a/mezzanine/pages/templatetags/pages_tags.py\n+++ b/mezzanine/pages/templatetags/pages_tags.py\n@@ -178,6 +178,6 @@\n setattr(page, \"perms\", {})\n for perm_type in (\"add\", \"change\", \"delete\"):\n perm = request.user.has_perm(perm_name % perm_type)\n- perm = perm and getattr(page, \"can_%s\" % perm_type)(request)\n+ perm = perm and getattr(model, \"can_%s\" % perm_type)(request)\n page.perms[perm_type] = perm\n return \"\"\n", "issue": "Dynamic page permissions aren't called on the content model\nThe documentation mentions a `can_add` method to define on a a custom page model. When this is called in `page_tags.set_page_permissions`, it only calls in on the `Page` instance, never using `Page.get_content_model()`.\n\nI will submit a pull request to fix this, but I'm not sure where the check should go -- is it better in the Page class, having the base `can_add` method delegate to `get_content_model().can_add`, or should it go in the template tag?\n\nDynamic page permissions aren't called on the content model\nThe documentation mentions a `can_add` method to define on a a custom page model. When this is called in `page_tags.set_page_permissions`, it only calls in on the `Page` instance, never using `Page.get_content_model()`.\n\nI will submit a pull request to fix this, but I'm not sure where the check should go -- is it better in the Page class, having the base `can_add` method delegate to `get_content_model().can_add`, or should it go in the template tag?\n\n", "before_files": [{"content": "\nfrom collections import defaultdict\n\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django.core.urlresolvers import reverse, NoReverseMatch\nfrom django.template import TemplateSyntaxError, Variable\nfrom django.template.loader import get_template\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom mezzanine.conf import settings\nfrom mezzanine.pages.models import Page\nfrom mezzanine.utils.urls import admin_url\nfrom mezzanine import template\n\n\nregister = template.Library()\n\n\[email protected]_tag\ndef page_menu(context, token):\n \"\"\"\n Return a list of child pages for the given parent, storing all\n pages in a dict in the context when first called using parents as keys\n for retrieval on subsequent recursive calls from the menu template.\n \"\"\"\n # First arg could be the menu template file name, or the parent page.\n # Also allow for both to be used.\n template_name = None\n parent_page = None\n parts = token.split_contents()[1:]\n for part in parts:\n part = Variable(part).resolve(context)\n if isinstance(part, unicode):\n template_name = part\n elif isinstance(part, Page):\n parent_page = part\n if template_name is None:\n try:\n template_name = context[\"menu_template_name\"]\n except KeyError:\n error = \"No template found for page_menu in: %s\" % parts\n raise TemplateSyntaxError(error)\n context[\"menu_template_name\"] = template_name\n if \"menu_pages\" not in context:\n try:\n user = context[\"request\"].user\n slug = context[\"request\"].path\n except KeyError:\n user = None\n slug = \"\"\n num_children = lambda id: lambda: len(context[\"menu_pages\"][id])\n has_children = lambda id: lambda: num_children(id)() > 0\n published = Page.objects.published(for_user=user)\n if slug == admin_url(Page, \"changelist\"):\n related = [m.__name__.lower() for m in Page.get_content_models()]\n published = published.select_related(*related)\n else:\n published = published.select_related(depth=2)\n # Store the current page being viewed in the context. Used\n # for comparisons in page.set_menu_helpers.\n if \"page\" not in context:\n try:\n context[\"_current_page\"] = published.get(slug=slug)\n except Page.DoesNotExist:\n context[\"_current_page\"] = None\n elif slug:\n context[\"_current_page\"] = context[\"page\"]\n # Maintain a dict of page IDs -> parent IDs for fast\n # lookup in setting page.is_current_or_ascendant in\n # page.set_menu_helpers.\n context[\"_parent_page_ids\"] = {}\n pages = defaultdict(list)\n for page in published.order_by(\"_order\"):\n page.set_helpers(context)\n context[\"_parent_page_ids\"][page.id] = page.parent_id\n setattr(page, \"num_children\", num_children(page.id))\n setattr(page, \"has_children\", has_children(page.id))\n pages[page.parent_id].append(page)\n context[\"menu_pages\"] = pages\n context[\"on_home\"] = slug == reverse(\"home\")\n # ``branch_level`` must be stored against each page so that the\n # calculation of it is correctly applied. This looks weird but if we do\n # the ``branch_level`` as a separate arg to the template tag with the\n # addition performed on it, the addition occurs each time the template\n # tag is called rather than once per level.\n context[\"branch_level\"] = 0\n parent_page_id = None\n if parent_page is not None:\n context[\"branch_level\"] = getattr(parent_page, \"branch_level\", 0) + 1\n parent_page_id = parent_page.id\n\n context[\"page_branch\"] = context[\"menu_pages\"].get(parent_page_id, [])\n context[\"page_branch_in_menu\"] = False\n for page in context[\"page_branch\"]:\n # footer/nav for backward compatibility.\n page.in_footer = page.in_navigation = page.in_menu = True\n for i, l, t in settings.PAGE_MENU_TEMPLATES:\n if not unicode(i) in page.in_menus and t == template_name:\n page.in_navigation = page.in_menu = False\n if \"footer\" in template_name:\n page.in_footer = False\n break\n if page.in_menu:\n context[\"page_branch_in_menu\"] = True\n # Backwards compatibility\n context['page_branch_in_navigation'] = context[\"page_branch_in_menu\"]\n context['page_branch_in_footer'] = (context[\"page_branch_in_menu\"] and\n template_name == \"pages/menu/footer.html\")\n\n for i, page in enumerate(context[\"page_branch\"]):\n context[\"page_branch\"][i].branch_level = context[\"branch_level\"]\n context[\"page_branch\"][i].parent = parent_page\n t = get_template(template_name)\n return t.render(context)\n\n\[email protected]_tag\ndef models_for_pages(*args):\n \"\"\"\n Create a select list containing each of the models that subclass the\n ``Page`` model.\n \"\"\"\n page_models = []\n for model in Page.get_content_models():\n try:\n admin_url(model, \"add\")\n except NoReverseMatch:\n continue\n else:\n setattr(model, \"name\", model._meta.verbose_name)\n setattr(model, \"add_url\", admin_url(model, \"add\"))\n page_models.append(model)\n return page_models\n\n\[email protected]_tag\ndef set_model_permissions(context, token):\n \"\"\"\n Assigns a permissions dict to the given model, much like Django\n does with its dashboard app list.\n\n Used within the change list for pages, to implement permission\n checks for the navigation tree.\n \"\"\"\n model = context[token.split_contents()[1]]\n opts = model._meta\n perm_name = opts.app_label + \".%s_\" + opts.object_name.lower()\n request = context[\"request\"]\n setattr(model, \"perms\", {})\n for perm_type in (\"add\", \"change\", \"delete\"):\n model.perms[perm_type] = request.user.has_perm(perm_name % perm_type)\n return \"\"\n\n\[email protected]_tag\ndef set_page_permissions(context, token):\n \"\"\"\n Assigns a permissions dict to the given page instance, combining\n Django's permission for the page's model and a permission check\n against the instance itself calling the page's ``can_add``,\n ``can_change`` and ``can_delete`` custom methods.\n\n Used within the change list for pages, to implement permission\n checks for the navigation tree.\n \"\"\"\n page = context[token.split_contents()[1]]\n model = page.get_content_model()\n try:\n opts = model._meta\n except AttributeError:\n # A missing inner Meta class usually means the Page model\n # hasn't been directly subclassed.\n error = _(\"An error occured with the following class. Does \"\n \"it subclass Page directly?\")\n raise ImproperlyConfigured(error + \" '%s'\" % model.__class__.__name__)\n perm_name = opts.app_label + \".%s_\" + opts.object_name.lower()\n request = context[\"request\"]\n setattr(page, \"perms\", {})\n for perm_type in (\"add\", \"change\", \"delete\"):\n perm = request.user.has_perm(perm_name % perm_type)\n perm = perm and getattr(page, \"can_%s\" % perm_type)(request)\n page.perms[perm_type] = perm\n return \"\"\n", "path": "mezzanine/pages/templatetags/pages_tags.py"}], "after_files": [{"content": "\nfrom collections import defaultdict\n\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django.core.urlresolvers import reverse, NoReverseMatch\nfrom django.template import TemplateSyntaxError, Variable\nfrom django.template.loader import get_template\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom mezzanine.conf import settings\nfrom mezzanine.pages.models import Page\nfrom mezzanine.utils.urls import admin_url\nfrom mezzanine import template\n\n\nregister = template.Library()\n\n\[email protected]_tag\ndef page_menu(context, token):\n \"\"\"\n Return a list of child pages for the given parent, storing all\n pages in a dict in the context when first called using parents as keys\n for retrieval on subsequent recursive calls from the menu template.\n \"\"\"\n # First arg could be the menu template file name, or the parent page.\n # Also allow for both to be used.\n template_name = None\n parent_page = None\n parts = token.split_contents()[1:]\n for part in parts:\n part = Variable(part).resolve(context)\n if isinstance(part, unicode):\n template_name = part\n elif isinstance(part, Page):\n parent_page = part\n if template_name is None:\n try:\n template_name = context[\"menu_template_name\"]\n except KeyError:\n error = \"No template found for page_menu in: %s\" % parts\n raise TemplateSyntaxError(error)\n context[\"menu_template_name\"] = template_name\n if \"menu_pages\" not in context:\n try:\n user = context[\"request\"].user\n slug = context[\"request\"].path\n except KeyError:\n user = None\n slug = \"\"\n num_children = lambda id: lambda: len(context[\"menu_pages\"][id])\n has_children = lambda id: lambda: num_children(id)() > 0\n published = Page.objects.published(for_user=user)\n if slug == admin_url(Page, \"changelist\"):\n related = [m.__name__.lower() for m in Page.get_content_models()]\n published = published.select_related(*related)\n else:\n published = published.select_related(depth=2)\n # Store the current page being viewed in the context. Used\n # for comparisons in page.set_menu_helpers.\n if \"page\" not in context:\n try:\n context[\"_current_page\"] = published.get(slug=slug)\n except Page.DoesNotExist:\n context[\"_current_page\"] = None\n elif slug:\n context[\"_current_page\"] = context[\"page\"]\n # Maintain a dict of page IDs -> parent IDs for fast\n # lookup in setting page.is_current_or_ascendant in\n # page.set_menu_helpers.\n context[\"_parent_page_ids\"] = {}\n pages = defaultdict(list)\n for page in published.order_by(\"_order\"):\n page.set_helpers(context)\n context[\"_parent_page_ids\"][page.id] = page.parent_id\n setattr(page, \"num_children\", num_children(page.id))\n setattr(page, \"has_children\", has_children(page.id))\n pages[page.parent_id].append(page)\n context[\"menu_pages\"] = pages\n context[\"on_home\"] = slug == reverse(\"home\")\n # ``branch_level`` must be stored against each page so that the\n # calculation of it is correctly applied. This looks weird but if we do\n # the ``branch_level`` as a separate arg to the template tag with the\n # addition performed on it, the addition occurs each time the template\n # tag is called rather than once per level.\n context[\"branch_level\"] = 0\n parent_page_id = None\n if parent_page is not None:\n context[\"branch_level\"] = getattr(parent_page, \"branch_level\", 0) + 1\n parent_page_id = parent_page.id\n\n context[\"page_branch\"] = context[\"menu_pages\"].get(parent_page_id, [])\n context[\"page_branch_in_menu\"] = False\n for page in context[\"page_branch\"]:\n # footer/nav for backward compatibility.\n page.in_footer = page.in_navigation = page.in_menu = True\n for i, l, t in settings.PAGE_MENU_TEMPLATES:\n if not unicode(i) in page.in_menus and t == template_name:\n page.in_navigation = page.in_menu = False\n if \"footer\" in template_name:\n page.in_footer = False\n break\n if page.in_menu:\n context[\"page_branch_in_menu\"] = True\n # Backwards compatibility\n context['page_branch_in_navigation'] = context[\"page_branch_in_menu\"]\n context['page_branch_in_footer'] = (context[\"page_branch_in_menu\"] and\n template_name == \"pages/menu/footer.html\")\n\n for i, page in enumerate(context[\"page_branch\"]):\n context[\"page_branch\"][i].branch_level = context[\"branch_level\"]\n context[\"page_branch\"][i].parent = parent_page\n t = get_template(template_name)\n return t.render(context)\n\n\[email protected]_tag\ndef models_for_pages(*args):\n \"\"\"\n Create a select list containing each of the models that subclass the\n ``Page`` model.\n \"\"\"\n page_models = []\n for model in Page.get_content_models():\n try:\n admin_url(model, \"add\")\n except NoReverseMatch:\n continue\n else:\n setattr(model, \"name\", model._meta.verbose_name)\n setattr(model, \"add_url\", admin_url(model, \"add\"))\n page_models.append(model)\n return page_models\n\n\[email protected]_tag\ndef set_model_permissions(context, token):\n \"\"\"\n Assigns a permissions dict to the given model, much like Django\n does with its dashboard app list.\n\n Used within the change list for pages, to implement permission\n checks for the navigation tree.\n \"\"\"\n model = context[token.split_contents()[1]]\n opts = model._meta\n perm_name = opts.app_label + \".%s_\" + opts.object_name.lower()\n request = context[\"request\"]\n setattr(model, \"perms\", {})\n for perm_type in (\"add\", \"change\", \"delete\"):\n model.perms[perm_type] = request.user.has_perm(perm_name % perm_type)\n return \"\"\n\n\[email protected]_tag\ndef set_page_permissions(context, token):\n \"\"\"\n Assigns a permissions dict to the given page instance, combining\n Django's permission for the page's model and a permission check\n against the instance itself calling the page's ``can_add``,\n ``can_change`` and ``can_delete`` custom methods.\n\n Used within the change list for pages, to implement permission\n checks for the navigation tree.\n \"\"\"\n page = context[token.split_contents()[1]]\n model = page.get_content_model()\n try:\n opts = model._meta\n except AttributeError:\n # A missing inner Meta class usually means the Page model\n # hasn't been directly subclassed.\n error = _(\"An error occured with the following class. Does \"\n \"it subclass Page directly?\")\n raise ImproperlyConfigured(error + \" '%s'\" % model.__class__.__name__)\n perm_name = opts.app_label + \".%s_\" + opts.object_name.lower()\n request = context[\"request\"]\n setattr(page, \"perms\", {})\n for perm_type in (\"add\", \"change\", \"delete\"):\n perm = request.user.has_perm(perm_name % perm_type)\n perm = perm and getattr(model, \"can_%s\" % perm_type)(request)\n page.perms[perm_type] = perm\n return \"\"\n", "path": "mezzanine/pages/templatetags/pages_tags.py"}]}
| 2,543 | 164 |
gh_patches_debug_15192
|
rasdani/github-patches
|
git_diff
|
SeldonIO__MLServer-339
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
mlserver --version fails (0.5.0)
```
mlserver --version
Traceback (most recent call last):
File "/home/clive/anaconda3/envs/mlserver/bin/mlserver", line 8, in <module>
sys.exit(main())
File "/home/clive/anaconda3/envs/mlserver/lib/python3.8/site-packages/mlserver/cli/main.py", line 45, in main
root()
File "/home/clive/anaconda3/envs/mlserver/lib/python3.8/site-packages/click/core.py", line 829, in __call__
return self.main(*args, **kwargs)
File "/home/clive/anaconda3/envs/mlserver/lib/python3.8/site-packages/click/core.py", line 781, in main
with self.make_context(prog_name, args, **extra) as ctx:
File "/home/clive/anaconda3/envs/mlserver/lib/python3.8/site-packages/click/core.py", line 700, in make_context
self.parse_args(ctx, args)
File "/home/clive/anaconda3/envs/mlserver/lib/python3.8/site-packages/click/core.py", line 1212, in parse_args
rest = Command.parse_args(self, ctx, args)
File "/home/clive/anaconda3/envs/mlserver/lib/python3.8/site-packages/click/core.py", line 1048, in parse_args
value, args = param.handle_parse_result(ctx, opts, args)
File "/home/clive/anaconda3/envs/mlserver/lib/python3.8/site-packages/click/core.py", line 1630, in handle_parse_result
value = invoke_param_callback(self.callback, ctx, self, value)
File "/home/clive/anaconda3/envs/mlserver/lib/python3.8/site-packages/click/core.py", line 123, in invoke_param_callback
return callback(ctx, param, value)
File "/home/clive/anaconda3/envs/mlserver/lib/python3.8/site-packages/click/decorators.py", line 295, in callback
raise RuntimeError("Could not determine version")
RuntimeError: Could not determine version
(mlserver) /home/clive $ pip freeze | grep mlserver
mlserver==0.5.0
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 import os
2
3 from typing import Dict
4 from setuptools import setup, find_packages
5
6 ROOT_PATH = os.path.dirname(__file__)
7 PKG_NAME = "mlserver"
8 PKG_PATH = os.path.join(ROOT_PATH, PKG_NAME)
9
10
11 def _load_version() -> str:
12 version = ""
13 version_path = os.path.join(PKG_PATH, "version.py")
14 with open(version_path) as fp:
15 version_module: Dict[str, str] = {}
16 exec(fp.read(), version_module)
17 version = version_module["__version__"]
18
19 return version
20
21
22 def _load_description() -> str:
23 readme_path = os.path.join(ROOT_PATH, "README.md")
24 with open(readme_path) as fp:
25 return fp.read()
26
27
28 setup(
29 name=PKG_NAME,
30 version=_load_version(),
31 url="https://github.com/SeldonIO/MLServer.git",
32 author="Seldon Technologies Ltd.",
33 author_email="[email protected]",
34 description="ML server",
35 packages=find_packages(exclude=["tests", "tests.*"]),
36 install_requires=[
37 "grpcio",
38 "protobuf",
39 # We pin version of fastapi
40 # check https://github.com/SeldonIO/MLServer/issues/340
41 "fastapi==0.68.2",
42 "uvicorn",
43 "click",
44 "numpy",
45 "pandas",
46 ],
47 extras_require={"all": ["orjson"]},
48 entry_points={"console_scripts": ["mlserver=mlserver.cli:main"]},
49 long_description=_load_description(),
50 long_description_content_type="text/markdown",
51 license="Apache 2.0",
52 )
53
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -34,15 +34,16 @@
description="ML server",
packages=find_packages(exclude=["tests", "tests.*"]),
install_requires=[
- "grpcio",
- "protobuf",
+ "click",
# We pin version of fastapi
# check https://github.com/SeldonIO/MLServer/issues/340
"fastapi==0.68.2",
- "uvicorn",
- "click",
+ "grpcio",
+ "importlib-metadata;python_version<'3.8'",
"numpy",
"pandas",
+ "protobuf",
+ "uvicorn",
],
extras_require={"all": ["orjson"]},
entry_points={"console_scripts": ["mlserver=mlserver.cli:main"]},
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -34,15 +34,16 @@\n description=\"ML server\",\n packages=find_packages(exclude=[\"tests\", \"tests.*\"]),\n install_requires=[\n- \"grpcio\",\n- \"protobuf\",\n+ \"click\",\n # We pin version of fastapi\n # check https://github.com/SeldonIO/MLServer/issues/340\n \"fastapi==0.68.2\",\n- \"uvicorn\",\n- \"click\",\n+ \"grpcio\",\n+ \"importlib-metadata;python_version<'3.8'\",\n \"numpy\",\n \"pandas\",\n+ \"protobuf\",\n+ \"uvicorn\",\n ],\n extras_require={\"all\": [\"orjson\"]},\n entry_points={\"console_scripts\": [\"mlserver=mlserver.cli:main\"]},\n", "issue": "mlserver --version fails (0.5.0)\n```\r\nmlserver --version\r\nTraceback (most recent call last):\r\n File \"/home/clive/anaconda3/envs/mlserver/bin/mlserver\", line 8, in <module>\r\n sys.exit(main())\r\n File \"/home/clive/anaconda3/envs/mlserver/lib/python3.8/site-packages/mlserver/cli/main.py\", line 45, in main\r\n root()\r\n File \"/home/clive/anaconda3/envs/mlserver/lib/python3.8/site-packages/click/core.py\", line 829, in __call__\r\n return self.main(*args, **kwargs)\r\n File \"/home/clive/anaconda3/envs/mlserver/lib/python3.8/site-packages/click/core.py\", line 781, in main\r\n with self.make_context(prog_name, args, **extra) as ctx:\r\n File \"/home/clive/anaconda3/envs/mlserver/lib/python3.8/site-packages/click/core.py\", line 700, in make_context\r\n self.parse_args(ctx, args)\r\n File \"/home/clive/anaconda3/envs/mlserver/lib/python3.8/site-packages/click/core.py\", line 1212, in parse_args\r\n rest = Command.parse_args(self, ctx, args)\r\n File \"/home/clive/anaconda3/envs/mlserver/lib/python3.8/site-packages/click/core.py\", line 1048, in parse_args\r\n value, args = param.handle_parse_result(ctx, opts, args)\r\n File \"/home/clive/anaconda3/envs/mlserver/lib/python3.8/site-packages/click/core.py\", line 1630, in handle_parse_result\r\n value = invoke_param_callback(self.callback, ctx, self, value)\r\n File \"/home/clive/anaconda3/envs/mlserver/lib/python3.8/site-packages/click/core.py\", line 123, in invoke_param_callback\r\n return callback(ctx, param, value)\r\n File \"/home/clive/anaconda3/envs/mlserver/lib/python3.8/site-packages/click/decorators.py\", line 295, in callback\r\n raise RuntimeError(\"Could not determine version\")\r\nRuntimeError: Could not determine version\r\n(mlserver) /home/clive $ pip freeze | grep mlserver\r\nmlserver==0.5.0\r\n```\n", "before_files": [{"content": "import os\n\nfrom typing import Dict\nfrom setuptools import setup, find_packages\n\nROOT_PATH = os.path.dirname(__file__)\nPKG_NAME = \"mlserver\"\nPKG_PATH = os.path.join(ROOT_PATH, PKG_NAME)\n\n\ndef _load_version() -> str:\n version = \"\"\n version_path = os.path.join(PKG_PATH, \"version.py\")\n with open(version_path) as fp:\n version_module: Dict[str, str] = {}\n exec(fp.read(), version_module)\n version = version_module[\"__version__\"]\n\n return version\n\n\ndef _load_description() -> str:\n readme_path = os.path.join(ROOT_PATH, \"README.md\")\n with open(readme_path) as fp:\n return fp.read()\n\n\nsetup(\n name=PKG_NAME,\n version=_load_version(),\n url=\"https://github.com/SeldonIO/MLServer.git\",\n author=\"Seldon Technologies Ltd.\",\n author_email=\"[email protected]\",\n description=\"ML server\",\n packages=find_packages(exclude=[\"tests\", \"tests.*\"]),\n install_requires=[\n \"grpcio\",\n \"protobuf\",\n # We pin version of fastapi\n # check https://github.com/SeldonIO/MLServer/issues/340\n \"fastapi==0.68.2\",\n \"uvicorn\",\n \"click\",\n \"numpy\",\n \"pandas\",\n ],\n extras_require={\"all\": [\"orjson\"]},\n entry_points={\"console_scripts\": [\"mlserver=mlserver.cli:main\"]},\n long_description=_load_description(),\n long_description_content_type=\"text/markdown\",\n license=\"Apache 2.0\",\n)\n", "path": "setup.py"}], "after_files": [{"content": "import os\n\nfrom typing import Dict\nfrom setuptools import setup, find_packages\n\nROOT_PATH = os.path.dirname(__file__)\nPKG_NAME = \"mlserver\"\nPKG_PATH = os.path.join(ROOT_PATH, PKG_NAME)\n\n\ndef _load_version() -> str:\n version = \"\"\n version_path = os.path.join(PKG_PATH, \"version.py\")\n with open(version_path) as fp:\n version_module: Dict[str, str] = {}\n exec(fp.read(), version_module)\n version = version_module[\"__version__\"]\n\n return version\n\n\ndef _load_description() -> str:\n readme_path = os.path.join(ROOT_PATH, \"README.md\")\n with open(readme_path) as fp:\n return fp.read()\n\n\nsetup(\n name=PKG_NAME,\n version=_load_version(),\n url=\"https://github.com/SeldonIO/MLServer.git\",\n author=\"Seldon Technologies Ltd.\",\n author_email=\"[email protected]\",\n description=\"ML server\",\n packages=find_packages(exclude=[\"tests\", \"tests.*\"]),\n install_requires=[\n \"click\",\n # We pin version of fastapi\n # check https://github.com/SeldonIO/MLServer/issues/340\n \"fastapi==0.68.2\",\n \"grpcio\",\n \"importlib-metadata;python_version<'3.8'\",\n \"numpy\",\n \"pandas\",\n \"protobuf\",\n \"uvicorn\",\n ],\n extras_require={\"all\": [\"orjson\"]},\n entry_points={\"console_scripts\": [\"mlserver=mlserver.cli:main\"]},\n long_description=_load_description(),\n long_description_content_type=\"text/markdown\",\n license=\"Apache 2.0\",\n)\n", "path": "setup.py"}]}
| 1,226 | 195 |
gh_patches_debug_30223
|
rasdani/github-patches
|
git_diff
|
SeldonIO__MLServer-1346
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Support for NaN in np.ndarray codec?
Sending/receiving a `numpy` array containing NaN values to/from an mlserver with the `NumpyCodec` results in a `ValueError: Out of range float values are not JSON compliant` error. Is this a known limitation, and are there any good workarounds that would still allow me to use a pre-packaged server? I understand that it would probably be bad to assume than `NaN` can be mapped to `null` in the JSON, but maybe that could be an option?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mlserver/codecs/numpy.py`
Content:
```
1 import numpy as np
2
3 from typing import Any
4
5 from ..types import RequestInput, ResponseOutput, Parameters
6
7 from .base import InputCodec, register_input_codec, register_request_codec
8 from .utils import SingleInputRequestCodec, InputOrOutput, inject_batch_dimension
9 from .lists import is_list_of
10 from .string import encode_str
11
12 _DatatypeToNumpy = {
13 "BOOL": "bool",
14 "UINT8": "uint8",
15 "UINT16": "uint16",
16 "UINT32": "uint32",
17 "UINT64": "uint64",
18 "INT8": "int8",
19 "INT16": "int16",
20 "INT32": "int32",
21 "INT64": "int64",
22 "FP16": "float16",
23 "FP32": "float32",
24 "FP64": "float64",
25 "BYTES": "bytes",
26 }
27
28 _NumpyToDatatype = {value: key for key, value in _DatatypeToNumpy.items()}
29
30 # NOTE: numpy has more types than v2 protocol
31 _NumpyToDatatype["object"] = "BYTES"
32 _NumpyToDatatype["S"] = "BYTES"
33 _NumpyToDatatype["U"] = "BYTES"
34
35
36 def to_dtype(input_or_output: InputOrOutput) -> "np.dtype":
37 dtype = _DatatypeToNumpy[input_or_output.datatype]
38
39 if input_or_output.datatype == "BYTES":
40 data = getattr(input_or_output.data, "__root__", input_or_output.data)
41 if is_list_of(data, str):
42 # Handle special case of strings being treated as Numpy arrays
43 return np.dtype(str)
44
45 # bytes have variable size, so need to specify as part of type
46 # TODO: Make elem size variable (and not just the last dimension)
47 elem_size = input_or_output.shape[-1]
48 return np.dtype((dtype, elem_size))
49
50 return np.dtype(dtype)
51
52
53 def to_datatype(dtype: np.dtype) -> str:
54 as_str = str(dtype)
55
56 if as_str not in _NumpyToDatatype:
57 # If not present, try with kind
58 as_str = getattr(dtype, "kind")
59
60 datatype = _NumpyToDatatype[as_str]
61
62 return datatype
63
64
65 def _to_ndarray(input_or_output: InputOrOutput) -> np.ndarray:
66 data = getattr(input_or_output.data, "__root__", input_or_output.data)
67 dtype = to_dtype(input_or_output)
68
69 if input_or_output.datatype == "BYTES":
70 if is_list_of(data, bytes):
71 # If the inputs is of type `BYTES`, there could be multiple "lists"
72 # serialised into multiple buffers.
73 # We will deserialise all of them and concatenate them together.
74 decoded = [np.frombuffer(buffer, dtype) for buffer in data]
75 return np.concatenate(decoded)
76
77 return np.array(data, dtype)
78
79
80 def _encode_data(data: np.ndarray, datatype: str) -> list:
81 if datatype == "BYTES":
82 if np.issubdtype(data.dtype, str):
83 # Handle special case of a string Numpy array, where the diff elems
84 # need to be encoded as well
85 as_list = data.flatten().tolist()
86 return list(map(encode_str, as_list))
87
88 if np.issubdtype(data.dtype, bytes):
89 # `tobytes` is way faster than tolist, although it's harder to serialise
90 # and only makes sense for actual bytes inputs (#253).
91 # Note that `.tobytes()` will return a single `bytes` payload, thus we
92 # need to encapsulate it into a list so that it's compatible.
93 return [data.tobytes()]
94
95 return data.flatten().tolist()
96
97
98 @register_input_codec
99 class NumpyCodec(InputCodec):
100 """
101 Decodes an request input (response output) as a NumPy array.
102 """
103
104 ContentType = "np"
105 TypeHint = np.ndarray
106
107 @classmethod
108 def can_encode(csl, payload: Any) -> bool:
109 return isinstance(payload, np.ndarray)
110
111 @classmethod
112 def encode_output(cls, name: str, payload: np.ndarray, **kwargs) -> ResponseOutput:
113 datatype = to_datatype(payload.dtype)
114
115 shape = inject_batch_dimension(list(payload.shape))
116
117 return ResponseOutput(
118 name=name,
119 datatype=datatype,
120 shape=shape,
121 data=_encode_data(payload, datatype),
122 parameters=Parameters(content_type=cls.ContentType),
123 )
124
125 @classmethod
126 def decode_output(cls, response_output: ResponseOutput) -> np.ndarray:
127 return cls.decode_input(response_output) # type: ignore
128
129 @classmethod
130 def encode_input(cls, name: str, payload: np.ndarray, **kwargs) -> RequestInput:
131 output = cls.encode_output(name=name, payload=payload)
132
133 return RequestInput(
134 name=output.name,
135 datatype=output.datatype,
136 shape=output.shape,
137 data=output.data,
138 parameters=Parameters(content_type=cls.ContentType),
139 )
140
141 @classmethod
142 def decode_input(cls, request_input: RequestInput) -> np.ndarray:
143 model_data = _to_ndarray(request_input)
144
145 # TODO: Check if reshape not valid
146 return model_data.reshape(request_input.shape)
147
148
149 @register_request_codec
150 class NumpyRequestCodec(SingleInputRequestCodec):
151 """
152 Decodes the first input (output) of request (response) as a NumPy array.
153 This codec can be useful for cases where the whole payload is a single
154 NumPy tensor.
155 """
156
157 InputCodec = NumpyCodec
158 ContentType = NumpyCodec.ContentType
159
```
Path: `mlserver/codecs/pandas.py`
Content:
```
1 import pandas as pd
2 import numpy as np
3
4 from typing import Optional, Any, List, Tuple
5
6 from .base import RequestCodec, register_request_codec
7 from .numpy import to_datatype, to_dtype
8 from .string import encode_str, StringCodec
9 from .utils import get_decoded_or_raw, InputOrOutput, inject_batch_dimension
10 from .lists import ListElement
11 from ..types import (
12 InferenceRequest,
13 InferenceResponse,
14 RequestInput,
15 ResponseOutput,
16 Parameters,
17 )
18
19
20 def _to_series(input_or_output: InputOrOutput) -> pd.Series:
21 payload = get_decoded_or_raw(input_or_output)
22
23 if input_or_output.datatype == "BYTES":
24 # Don't convert the dtype of BYTES
25 return pd.Series(payload)
26
27 if isinstance(payload, np.ndarray):
28 # Necessary so that it's compatible with pd.Series
29 payload = list(payload)
30
31 dtype = to_dtype(input_or_output)
32 return pd.Series(payload, dtype=dtype)
33
34
35 def _to_response_output(series: pd.Series, use_bytes: bool = True) -> ResponseOutput:
36 datatype = to_datatype(series.dtype)
37 data = series.tolist()
38 content_type = None
39
40 if datatype == "BYTES":
41 data, content_type = _process_bytes(data, use_bytes)
42
43 shape = inject_batch_dimension(list(series.shape))
44 parameters = None
45 if content_type:
46 parameters = Parameters(content_type=content_type)
47
48 return ResponseOutput(
49 name=series.name,
50 shape=shape,
51 data=data,
52 datatype=datatype,
53 parameters=parameters,
54 )
55
56
57 def _process_bytes(
58 data: List[ListElement], use_bytes: bool = True
59 ) -> Tuple[List[ListElement], Optional[str]]:
60 # To ensure that "string" columns can be encoded in gRPC, we need to
61 # encode them as bytes.
62 # We'll also keep track of whether the list should be treated in the
63 # future as a list of strings.
64 processed = []
65 content_type: Optional[str] = StringCodec.ContentType
66 for elem in data:
67 converted = elem
68 if not isinstance(elem, str):
69 # There was a non-string element, so we can't determine a content
70 # type
71 content_type = None
72 elif use_bytes:
73 converted = encode_str(elem)
74
75 processed.append(converted)
76
77 return processed, content_type
78
79
80 @register_request_codec
81 class PandasCodec(RequestCodec):
82 """
83 Decodes a request (response) into a Pandas DataFrame, assuming each input
84 (output) head corresponds to a column of the DataFrame.
85 """
86
87 ContentType = "pd"
88 TypeHint = pd.DataFrame
89
90 @classmethod
91 def can_encode(cls, payload: Any) -> bool:
92 return isinstance(payload, pd.DataFrame)
93
94 @classmethod
95 def encode_response(
96 cls,
97 model_name: str,
98 payload: pd.DataFrame,
99 model_version: Optional[str] = None,
100 use_bytes: bool = True,
101 **kwargs
102 ) -> InferenceResponse:
103 outputs = cls.encode_outputs(payload, use_bytes=use_bytes)
104
105 return InferenceResponse(
106 model_name=model_name,
107 model_version=model_version,
108 parameters=Parameters(content_type=cls.ContentType),
109 outputs=outputs,
110 )
111
112 @classmethod
113 def decode_response(cls, response: InferenceResponse) -> pd.DataFrame:
114 data = {
115 response_output.name: _to_series(response_output)
116 for response_output in response.outputs
117 }
118
119 return pd.DataFrame(data)
120
121 @classmethod
122 def encode_outputs(
123 cls, payload: pd.DataFrame, use_bytes: bool = True
124 ) -> List[ResponseOutput]:
125 return [
126 _to_response_output(payload[col], use_bytes=use_bytes) for col in payload
127 ]
128
129 @classmethod
130 def encode_request(
131 cls, payload: pd.DataFrame, use_bytes: bool = True, **kwargs
132 ) -> InferenceRequest:
133 outputs = cls.encode_outputs(payload, use_bytes=use_bytes)
134
135 return InferenceRequest(
136 parameters=Parameters(content_type=cls.ContentType),
137 inputs=[
138 RequestInput(
139 name=output.name,
140 datatype=output.datatype,
141 shape=output.shape,
142 data=output.data,
143 parameters=output.parameters,
144 )
145 for output in outputs
146 ],
147 )
148
149 @classmethod
150 def decode_request(cls, request: InferenceRequest) -> pd.DataFrame:
151 data = {
152 request_input.name: _to_series(request_input)
153 for request_input in request.inputs
154 }
155
156 return pd.DataFrame(data)
157
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mlserver/codecs/numpy.py b/mlserver/codecs/numpy.py
--- a/mlserver/codecs/numpy.py
+++ b/mlserver/codecs/numpy.py
@@ -92,7 +92,24 @@
# need to encapsulate it into a list so that it's compatible.
return [data.tobytes()]
- return data.flatten().tolist()
+ flattened_list = data.flatten().tolist()
+
+ # Replace NaN with null
+ if datatype != "BYTES":
+ # The `isnan` method doesn't work on Numpy arrays with non-numeric
+ # types
+ has_nan = np.isnan(data).any()
+ if has_nan:
+ flattened_list = list(map(convert_nan, flattened_list))
+
+ return flattened_list
+
+
+def convert_nan(val):
+ if np.isnan(val):
+ return None
+
+ return val
@register_input_codec
diff --git a/mlserver/codecs/pandas.py b/mlserver/codecs/pandas.py
--- a/mlserver/codecs/pandas.py
+++ b/mlserver/codecs/pandas.py
@@ -4,7 +4,7 @@
from typing import Optional, Any, List, Tuple
from .base import RequestCodec, register_request_codec
-from .numpy import to_datatype, to_dtype
+from .numpy import to_datatype, to_dtype, convert_nan
from .string import encode_str, StringCodec
from .utils import get_decoded_or_raw, InputOrOutput, inject_batch_dimension
from .lists import ListElement
@@ -35,8 +35,13 @@
def _to_response_output(series: pd.Series, use_bytes: bool = True) -> ResponseOutput:
datatype = to_datatype(series.dtype)
data = series.tolist()
- content_type = None
+ # Replace NaN with null
+ has_nan = series.isnull().any()
+ if has_nan:
+ data = list(map(convert_nan, data))
+
+ content_type = None
if datatype == "BYTES":
data, content_type = _process_bytes(data, use_bytes)
|
{"golden_diff": "diff --git a/mlserver/codecs/numpy.py b/mlserver/codecs/numpy.py\n--- a/mlserver/codecs/numpy.py\n+++ b/mlserver/codecs/numpy.py\n@@ -92,7 +92,24 @@\n # need to encapsulate it into a list so that it's compatible.\n return [data.tobytes()]\n \n- return data.flatten().tolist()\n+ flattened_list = data.flatten().tolist()\n+\n+ # Replace NaN with null\n+ if datatype != \"BYTES\":\n+ # The `isnan` method doesn't work on Numpy arrays with non-numeric\n+ # types\n+ has_nan = np.isnan(data).any()\n+ if has_nan:\n+ flattened_list = list(map(convert_nan, flattened_list))\n+\n+ return flattened_list\n+\n+\n+def convert_nan(val):\n+ if np.isnan(val):\n+ return None\n+\n+ return val\n \n \n @register_input_codec\ndiff --git a/mlserver/codecs/pandas.py b/mlserver/codecs/pandas.py\n--- a/mlserver/codecs/pandas.py\n+++ b/mlserver/codecs/pandas.py\n@@ -4,7 +4,7 @@\n from typing import Optional, Any, List, Tuple\n \n from .base import RequestCodec, register_request_codec\n-from .numpy import to_datatype, to_dtype\n+from .numpy import to_datatype, to_dtype, convert_nan\n from .string import encode_str, StringCodec\n from .utils import get_decoded_or_raw, InputOrOutput, inject_batch_dimension\n from .lists import ListElement\n@@ -35,8 +35,13 @@\n def _to_response_output(series: pd.Series, use_bytes: bool = True) -> ResponseOutput:\n datatype = to_datatype(series.dtype)\n data = series.tolist()\n- content_type = None\n \n+ # Replace NaN with null\n+ has_nan = series.isnull().any()\n+ if has_nan:\n+ data = list(map(convert_nan, data))\n+\n+ content_type = None\n if datatype == \"BYTES\":\n data, content_type = _process_bytes(data, use_bytes)\n", "issue": "Support for NaN in np.ndarray codec?\nSending/receiving a `numpy` array containing NaN values to/from an mlserver with the `NumpyCodec` results in a `ValueError: Out of range float values are not JSON compliant` error. Is this a known limitation, and are there any good workarounds that would still allow me to use a pre-packaged server? I understand that it would probably be bad to assume than `NaN` can be mapped to `null` in the JSON, but maybe that could be an option?\n", "before_files": [{"content": "import numpy as np\n\nfrom typing import Any\n\nfrom ..types import RequestInput, ResponseOutput, Parameters\n\nfrom .base import InputCodec, register_input_codec, register_request_codec\nfrom .utils import SingleInputRequestCodec, InputOrOutput, inject_batch_dimension\nfrom .lists import is_list_of\nfrom .string import encode_str\n\n_DatatypeToNumpy = {\n \"BOOL\": \"bool\",\n \"UINT8\": \"uint8\",\n \"UINT16\": \"uint16\",\n \"UINT32\": \"uint32\",\n \"UINT64\": \"uint64\",\n \"INT8\": \"int8\",\n \"INT16\": \"int16\",\n \"INT32\": \"int32\",\n \"INT64\": \"int64\",\n \"FP16\": \"float16\",\n \"FP32\": \"float32\",\n \"FP64\": \"float64\",\n \"BYTES\": \"bytes\",\n}\n\n_NumpyToDatatype = {value: key for key, value in _DatatypeToNumpy.items()}\n\n# NOTE: numpy has more types than v2 protocol\n_NumpyToDatatype[\"object\"] = \"BYTES\"\n_NumpyToDatatype[\"S\"] = \"BYTES\"\n_NumpyToDatatype[\"U\"] = \"BYTES\"\n\n\ndef to_dtype(input_or_output: InputOrOutput) -> \"np.dtype\":\n dtype = _DatatypeToNumpy[input_or_output.datatype]\n\n if input_or_output.datatype == \"BYTES\":\n data = getattr(input_or_output.data, \"__root__\", input_or_output.data)\n if is_list_of(data, str):\n # Handle special case of strings being treated as Numpy arrays\n return np.dtype(str)\n\n # bytes have variable size, so need to specify as part of type\n # TODO: Make elem size variable (and not just the last dimension)\n elem_size = input_or_output.shape[-1]\n return np.dtype((dtype, elem_size))\n\n return np.dtype(dtype)\n\n\ndef to_datatype(dtype: np.dtype) -> str:\n as_str = str(dtype)\n\n if as_str not in _NumpyToDatatype:\n # If not present, try with kind\n as_str = getattr(dtype, \"kind\")\n\n datatype = _NumpyToDatatype[as_str]\n\n return datatype\n\n\ndef _to_ndarray(input_or_output: InputOrOutput) -> np.ndarray:\n data = getattr(input_or_output.data, \"__root__\", input_or_output.data)\n dtype = to_dtype(input_or_output)\n\n if input_or_output.datatype == \"BYTES\":\n if is_list_of(data, bytes):\n # If the inputs is of type `BYTES`, there could be multiple \"lists\"\n # serialised into multiple buffers.\n # We will deserialise all of them and concatenate them together.\n decoded = [np.frombuffer(buffer, dtype) for buffer in data]\n return np.concatenate(decoded)\n\n return np.array(data, dtype)\n\n\ndef _encode_data(data: np.ndarray, datatype: str) -> list:\n if datatype == \"BYTES\":\n if np.issubdtype(data.dtype, str):\n # Handle special case of a string Numpy array, where the diff elems\n # need to be encoded as well\n as_list = data.flatten().tolist()\n return list(map(encode_str, as_list))\n\n if np.issubdtype(data.dtype, bytes):\n # `tobytes` is way faster than tolist, although it's harder to serialise\n # and only makes sense for actual bytes inputs (#253).\n # Note that `.tobytes()` will return a single `bytes` payload, thus we\n # need to encapsulate it into a list so that it's compatible.\n return [data.tobytes()]\n\n return data.flatten().tolist()\n\n\n@register_input_codec\nclass NumpyCodec(InputCodec):\n \"\"\"\n Decodes an request input (response output) as a NumPy array.\n \"\"\"\n\n ContentType = \"np\"\n TypeHint = np.ndarray\n\n @classmethod\n def can_encode(csl, payload: Any) -> bool:\n return isinstance(payload, np.ndarray)\n\n @classmethod\n def encode_output(cls, name: str, payload: np.ndarray, **kwargs) -> ResponseOutput:\n datatype = to_datatype(payload.dtype)\n\n shape = inject_batch_dimension(list(payload.shape))\n\n return ResponseOutput(\n name=name,\n datatype=datatype,\n shape=shape,\n data=_encode_data(payload, datatype),\n parameters=Parameters(content_type=cls.ContentType),\n )\n\n @classmethod\n def decode_output(cls, response_output: ResponseOutput) -> np.ndarray:\n return cls.decode_input(response_output) # type: ignore\n\n @classmethod\n def encode_input(cls, name: str, payload: np.ndarray, **kwargs) -> RequestInput:\n output = cls.encode_output(name=name, payload=payload)\n\n return RequestInput(\n name=output.name,\n datatype=output.datatype,\n shape=output.shape,\n data=output.data,\n parameters=Parameters(content_type=cls.ContentType),\n )\n\n @classmethod\n def decode_input(cls, request_input: RequestInput) -> np.ndarray:\n model_data = _to_ndarray(request_input)\n\n # TODO: Check if reshape not valid\n return model_data.reshape(request_input.shape)\n\n\n@register_request_codec\nclass NumpyRequestCodec(SingleInputRequestCodec):\n \"\"\"\n Decodes the first input (output) of request (response) as a NumPy array.\n This codec can be useful for cases where the whole payload is a single\n NumPy tensor.\n \"\"\"\n\n InputCodec = NumpyCodec\n ContentType = NumpyCodec.ContentType\n", "path": "mlserver/codecs/numpy.py"}, {"content": "import pandas as pd\nimport numpy as np\n\nfrom typing import Optional, Any, List, Tuple\n\nfrom .base import RequestCodec, register_request_codec\nfrom .numpy import to_datatype, to_dtype\nfrom .string import encode_str, StringCodec\nfrom .utils import get_decoded_or_raw, InputOrOutput, inject_batch_dimension\nfrom .lists import ListElement\nfrom ..types import (\n InferenceRequest,\n InferenceResponse,\n RequestInput,\n ResponseOutput,\n Parameters,\n)\n\n\ndef _to_series(input_or_output: InputOrOutput) -> pd.Series:\n payload = get_decoded_or_raw(input_or_output)\n\n if input_or_output.datatype == \"BYTES\":\n # Don't convert the dtype of BYTES\n return pd.Series(payload)\n\n if isinstance(payload, np.ndarray):\n # Necessary so that it's compatible with pd.Series\n payload = list(payload)\n\n dtype = to_dtype(input_or_output)\n return pd.Series(payload, dtype=dtype)\n\n\ndef _to_response_output(series: pd.Series, use_bytes: bool = True) -> ResponseOutput:\n datatype = to_datatype(series.dtype)\n data = series.tolist()\n content_type = None\n\n if datatype == \"BYTES\":\n data, content_type = _process_bytes(data, use_bytes)\n\n shape = inject_batch_dimension(list(series.shape))\n parameters = None\n if content_type:\n parameters = Parameters(content_type=content_type)\n\n return ResponseOutput(\n name=series.name,\n shape=shape,\n data=data,\n datatype=datatype,\n parameters=parameters,\n )\n\n\ndef _process_bytes(\n data: List[ListElement], use_bytes: bool = True\n) -> Tuple[List[ListElement], Optional[str]]:\n # To ensure that \"string\" columns can be encoded in gRPC, we need to\n # encode them as bytes.\n # We'll also keep track of whether the list should be treated in the\n # future as a list of strings.\n processed = []\n content_type: Optional[str] = StringCodec.ContentType\n for elem in data:\n converted = elem\n if not isinstance(elem, str):\n # There was a non-string element, so we can't determine a content\n # type\n content_type = None\n elif use_bytes:\n converted = encode_str(elem)\n\n processed.append(converted)\n\n return processed, content_type\n\n\n@register_request_codec\nclass PandasCodec(RequestCodec):\n \"\"\"\n Decodes a request (response) into a Pandas DataFrame, assuming each input\n (output) head corresponds to a column of the DataFrame.\n \"\"\"\n\n ContentType = \"pd\"\n TypeHint = pd.DataFrame\n\n @classmethod\n def can_encode(cls, payload: Any) -> bool:\n return isinstance(payload, pd.DataFrame)\n\n @classmethod\n def encode_response(\n cls,\n model_name: str,\n payload: pd.DataFrame,\n model_version: Optional[str] = None,\n use_bytes: bool = True,\n **kwargs\n ) -> InferenceResponse:\n outputs = cls.encode_outputs(payload, use_bytes=use_bytes)\n\n return InferenceResponse(\n model_name=model_name,\n model_version=model_version,\n parameters=Parameters(content_type=cls.ContentType),\n outputs=outputs,\n )\n\n @classmethod\n def decode_response(cls, response: InferenceResponse) -> pd.DataFrame:\n data = {\n response_output.name: _to_series(response_output)\n for response_output in response.outputs\n }\n\n return pd.DataFrame(data)\n\n @classmethod\n def encode_outputs(\n cls, payload: pd.DataFrame, use_bytes: bool = True\n ) -> List[ResponseOutput]:\n return [\n _to_response_output(payload[col], use_bytes=use_bytes) for col in payload\n ]\n\n @classmethod\n def encode_request(\n cls, payload: pd.DataFrame, use_bytes: bool = True, **kwargs\n ) -> InferenceRequest:\n outputs = cls.encode_outputs(payload, use_bytes=use_bytes)\n\n return InferenceRequest(\n parameters=Parameters(content_type=cls.ContentType),\n inputs=[\n RequestInput(\n name=output.name,\n datatype=output.datatype,\n shape=output.shape,\n data=output.data,\n parameters=output.parameters,\n )\n for output in outputs\n ],\n )\n\n @classmethod\n def decode_request(cls, request: InferenceRequest) -> pd.DataFrame:\n data = {\n request_input.name: _to_series(request_input)\n for request_input in request.inputs\n }\n\n return pd.DataFrame(data)\n", "path": "mlserver/codecs/pandas.py"}], "after_files": [{"content": "import numpy as np\n\nfrom typing import Any\n\nfrom ..types import RequestInput, ResponseOutput, Parameters\n\nfrom .base import InputCodec, register_input_codec, register_request_codec\nfrom .utils import SingleInputRequestCodec, InputOrOutput, inject_batch_dimension\nfrom .lists import is_list_of\nfrom .string import encode_str\n\n_DatatypeToNumpy = {\n \"BOOL\": \"bool\",\n \"UINT8\": \"uint8\",\n \"UINT16\": \"uint16\",\n \"UINT32\": \"uint32\",\n \"UINT64\": \"uint64\",\n \"INT8\": \"int8\",\n \"INT16\": \"int16\",\n \"INT32\": \"int32\",\n \"INT64\": \"int64\",\n \"FP16\": \"float16\",\n \"FP32\": \"float32\",\n \"FP64\": \"float64\",\n \"BYTES\": \"bytes\",\n}\n\n_NumpyToDatatype = {value: key for key, value in _DatatypeToNumpy.items()}\n\n# NOTE: numpy has more types than v2 protocol\n_NumpyToDatatype[\"object\"] = \"BYTES\"\n_NumpyToDatatype[\"S\"] = \"BYTES\"\n_NumpyToDatatype[\"U\"] = \"BYTES\"\n\n\ndef to_dtype(input_or_output: InputOrOutput) -> \"np.dtype\":\n dtype = _DatatypeToNumpy[input_or_output.datatype]\n\n if input_or_output.datatype == \"BYTES\":\n data = getattr(input_or_output.data, \"__root__\", input_or_output.data)\n if is_list_of(data, str):\n # Handle special case of strings being treated as Numpy arrays\n return np.dtype(str)\n\n # bytes have variable size, so need to specify as part of type\n # TODO: Make elem size variable (and not just the last dimension)\n elem_size = input_or_output.shape[-1]\n return np.dtype((dtype, elem_size))\n\n return np.dtype(dtype)\n\n\ndef to_datatype(dtype: np.dtype) -> str:\n as_str = str(dtype)\n\n if as_str not in _NumpyToDatatype:\n # If not present, try with kind\n as_str = getattr(dtype, \"kind\")\n\n datatype = _NumpyToDatatype[as_str]\n\n return datatype\n\n\ndef _to_ndarray(input_or_output: InputOrOutput) -> np.ndarray:\n data = getattr(input_or_output.data, \"__root__\", input_or_output.data)\n dtype = to_dtype(input_or_output)\n\n if input_or_output.datatype == \"BYTES\":\n if is_list_of(data, bytes):\n # If the inputs is of type `BYTES`, there could be multiple \"lists\"\n # serialised into multiple buffers.\n # We will deserialise all of them and concatenate them together.\n decoded = [np.frombuffer(buffer, dtype) for buffer in data]\n return np.concatenate(decoded)\n\n return np.array(data, dtype)\n\n\ndef _encode_data(data: np.ndarray, datatype: str) -> list:\n if datatype == \"BYTES\":\n if np.issubdtype(data.dtype, str):\n # Handle special case of a string Numpy array, where the diff elems\n # need to be encoded as well\n as_list = data.flatten().tolist()\n return list(map(encode_str, as_list))\n\n if np.issubdtype(data.dtype, bytes):\n # `tobytes` is way faster than tolist, although it's harder to serialise\n # and only makes sense for actual bytes inputs (#253).\n # Note that `.tobytes()` will return a single `bytes` payload, thus we\n # need to encapsulate it into a list so that it's compatible.\n return [data.tobytes()]\n\n flattened_list = data.flatten().tolist()\n\n # Replace NaN with null\n if datatype != \"BYTES\":\n # The `isnan` method doesn't work on Numpy arrays with non-numeric\n # types\n has_nan = np.isnan(data).any()\n if has_nan:\n flattened_list = list(map(convert_nan, flattened_list))\n\n return flattened_list\n\n\ndef convert_nan(val):\n if np.isnan(val):\n return None\n\n return val\n\n\n@register_input_codec\nclass NumpyCodec(InputCodec):\n \"\"\"\n Decodes an request input (response output) as a NumPy array.\n \"\"\"\n\n ContentType = \"np\"\n TypeHint = np.ndarray\n\n @classmethod\n def can_encode(csl, payload: Any) -> bool:\n return isinstance(payload, np.ndarray)\n\n @classmethod\n def encode_output(cls, name: str, payload: np.ndarray, **kwargs) -> ResponseOutput:\n datatype = to_datatype(payload.dtype)\n\n shape = inject_batch_dimension(list(payload.shape))\n\n return ResponseOutput(\n name=name,\n datatype=datatype,\n shape=shape,\n data=_encode_data(payload, datatype),\n parameters=Parameters(content_type=cls.ContentType),\n )\n\n @classmethod\n def decode_output(cls, response_output: ResponseOutput) -> np.ndarray:\n return cls.decode_input(response_output) # type: ignore\n\n @classmethod\n def encode_input(cls, name: str, payload: np.ndarray, **kwargs) -> RequestInput:\n output = cls.encode_output(name=name, payload=payload)\n\n return RequestInput(\n name=output.name,\n datatype=output.datatype,\n shape=output.shape,\n data=output.data,\n parameters=Parameters(content_type=cls.ContentType),\n )\n\n @classmethod\n def decode_input(cls, request_input: RequestInput) -> np.ndarray:\n model_data = _to_ndarray(request_input)\n\n # TODO: Check if reshape not valid\n return model_data.reshape(request_input.shape)\n\n\n@register_request_codec\nclass NumpyRequestCodec(SingleInputRequestCodec):\n \"\"\"\n Decodes the first input (output) of request (response) as a NumPy array.\n This codec can be useful for cases where the whole payload is a single\n NumPy tensor.\n \"\"\"\n\n InputCodec = NumpyCodec\n ContentType = NumpyCodec.ContentType\n", "path": "mlserver/codecs/numpy.py"}, {"content": "import pandas as pd\nimport numpy as np\n\nfrom typing import Optional, Any, List, Tuple\n\nfrom .base import RequestCodec, register_request_codec\nfrom .numpy import to_datatype, to_dtype, convert_nan\nfrom .string import encode_str, StringCodec\nfrom .utils import get_decoded_or_raw, InputOrOutput, inject_batch_dimension\nfrom .lists import ListElement\nfrom ..types import (\n InferenceRequest,\n InferenceResponse,\n RequestInput,\n ResponseOutput,\n Parameters,\n)\n\n\ndef _to_series(input_or_output: InputOrOutput) -> pd.Series:\n payload = get_decoded_or_raw(input_or_output)\n\n if input_or_output.datatype == \"BYTES\":\n # Don't convert the dtype of BYTES\n return pd.Series(payload)\n\n if isinstance(payload, np.ndarray):\n # Necessary so that it's compatible with pd.Series\n payload = list(payload)\n\n dtype = to_dtype(input_or_output)\n return pd.Series(payload, dtype=dtype)\n\n\ndef _to_response_output(series: pd.Series, use_bytes: bool = True) -> ResponseOutput:\n datatype = to_datatype(series.dtype)\n data = series.tolist()\n\n # Replace NaN with null\n has_nan = series.isnull().any()\n if has_nan:\n data = list(map(convert_nan, data))\n\n content_type = None\n if datatype == \"BYTES\":\n data, content_type = _process_bytes(data, use_bytes)\n\n shape = inject_batch_dimension(list(series.shape))\n parameters = None\n if content_type:\n parameters = Parameters(content_type=content_type)\n\n return ResponseOutput(\n name=series.name,\n shape=shape,\n data=data,\n datatype=datatype,\n parameters=parameters,\n )\n\n\ndef _process_bytes(\n data: List[ListElement], use_bytes: bool = True\n) -> Tuple[List[ListElement], Optional[str]]:\n # To ensure that \"string\" columns can be encoded in gRPC, we need to\n # encode them as bytes.\n # We'll also keep track of whether the list should be treated in the\n # future as a list of strings.\n processed = []\n content_type: Optional[str] = StringCodec.ContentType\n for elem in data:\n converted = elem\n if not isinstance(elem, str):\n # There was a non-string element, so we can't determine a content\n # type\n content_type = None\n elif use_bytes:\n converted = encode_str(elem)\n\n processed.append(converted)\n\n return processed, content_type\n\n\n@register_request_codec\nclass PandasCodec(RequestCodec):\n \"\"\"\n Decodes a request (response) into a Pandas DataFrame, assuming each input\n (output) head corresponds to a column of the DataFrame.\n \"\"\"\n\n ContentType = \"pd\"\n TypeHint = pd.DataFrame\n\n @classmethod\n def can_encode(cls, payload: Any) -> bool:\n return isinstance(payload, pd.DataFrame)\n\n @classmethod\n def encode_response(\n cls,\n model_name: str,\n payload: pd.DataFrame,\n model_version: Optional[str] = None,\n use_bytes: bool = True,\n **kwargs\n ) -> InferenceResponse:\n outputs = cls.encode_outputs(payload, use_bytes=use_bytes)\n\n return InferenceResponse(\n model_name=model_name,\n model_version=model_version,\n parameters=Parameters(content_type=cls.ContentType),\n outputs=outputs,\n )\n\n @classmethod\n def decode_response(cls, response: InferenceResponse) -> pd.DataFrame:\n data = {\n response_output.name: _to_series(response_output)\n for response_output in response.outputs\n }\n\n return pd.DataFrame(data)\n\n @classmethod\n def encode_outputs(\n cls, payload: pd.DataFrame, use_bytes: bool = True\n ) -> List[ResponseOutput]:\n return [\n _to_response_output(payload[col], use_bytes=use_bytes) for col in payload\n ]\n\n @classmethod\n def encode_request(\n cls, payload: pd.DataFrame, use_bytes: bool = True, **kwargs\n ) -> InferenceRequest:\n outputs = cls.encode_outputs(payload, use_bytes=use_bytes)\n\n return InferenceRequest(\n parameters=Parameters(content_type=cls.ContentType),\n inputs=[\n RequestInput(\n name=output.name,\n datatype=output.datatype,\n shape=output.shape,\n data=output.data,\n parameters=output.parameters,\n )\n for output in outputs\n ],\n )\n\n @classmethod\n def decode_request(cls, request: InferenceRequest) -> pd.DataFrame:\n data = {\n request_input.name: _to_series(request_input)\n for request_input in request.inputs\n }\n\n return pd.DataFrame(data)\n", "path": "mlserver/codecs/pandas.py"}]}
| 3,355 | 461 |
gh_patches_debug_28083
|
rasdani/github-patches
|
git_diff
|
MycroftAI__mycroft-core-1546
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Initial load of skills fails if auto_update is true.
On a new clone of the dev tree on an Ubuntu 16.04 machine **that's never run mycroft before.**
I ran _msm/msm default_ and it downloaded 27 skills, but mycroft wouldn't load any skills except _pairing_. I looked at the skills loading code and saw it wasn't getting past this:
```
if (exists(SKILLS_DIR) and
(self.next_download or not update)):
```
After I changed my config to _auto_update: false_, they loaded.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mycroft/util/__init__.py`
Content:
```
1 # Copyright 2017 Mycroft AI Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 #
15 from __future__ import absolute_import
16 import socket
17 import subprocess
18 from threading import Thread
19 from time import sleep
20
21 import json
22 import os.path
23 import psutil
24 from stat import S_ISREG, ST_MTIME, ST_MODE, ST_SIZE
25
26 import signal as sig
27
28 import mycroft.audio
29 import mycroft.configuration
30 from mycroft.util.format import nice_number
31 # Officially exported methods from this file:
32 # play_wav, play_mp3, get_cache_directory,
33 # resolve_resource_file, wait_while_speaking
34 from mycroft.util.log import LOG
35 from mycroft.util.parse import extract_datetime, extractnumber, normalize
36 from mycroft.util.signal import *
37
38
39 def resolve_resource_file(res_name):
40 """Convert a resource into an absolute filename.
41
42 Resource names are in the form: 'filename.ext'
43 or 'path/filename.ext'
44
45 The system wil look for ~/.mycroft/res_name first, and
46 if not found will look at /opt/mycroft/res_name,
47 then finally it will look for res_name in the 'mycroft/res'
48 folder of the source code package.
49
50 Example:
51 With mycroft running as the user 'bob', if you called
52 resolve_resource_file('snd/beep.wav')
53 it would return either '/home/bob/.mycroft/snd/beep.wav' or
54 '/opt/mycroft/snd/beep.wav' or '.../mycroft/res/snd/beep.wav',
55 where the '...' is replaced by the path where the package has
56 been installed.
57
58 Args:
59 res_name (str): a resource path/name
60 """
61
62 # First look for fully qualified file (e.g. a user setting)
63 if os.path.isfile(res_name):
64 return res_name
65
66 # Now look for ~/.mycroft/res_name (in user folder)
67 filename = os.path.expanduser("~/.mycroft/" + res_name)
68 if os.path.isfile(filename):
69 return filename
70
71 # Next look for /opt/mycroft/res/res_name
72 filename = os.path.expanduser("/opt/mycroft/" + res_name)
73 if os.path.isfile(filename):
74 return filename
75
76 # Finally look for it in the source package
77 filename = os.path.join(os.path.dirname(__file__), '..', 'res', res_name)
78 filename = os.path.abspath(os.path.normpath(filename))
79 if os.path.isfile(filename):
80 return filename
81
82 return None # Resource cannot be resolved
83
84
85 def play_wav(uri):
86 config = mycroft.configuration.Configuration.get()
87 play_cmd = config.get("play_wav_cmdline")
88 play_wav_cmd = str(play_cmd).split(" ")
89 for index, cmd in enumerate(play_wav_cmd):
90 if cmd == "%1":
91 play_wav_cmd[index] = (get_http(uri))
92 return subprocess.Popen(play_wav_cmd)
93
94
95 def play_mp3(uri):
96 config = mycroft.configuration.Configuration.get()
97 play_cmd = config.get("play_mp3_cmdline")
98 play_mp3_cmd = str(play_cmd).split(" ")
99 for index, cmd in enumerate(play_mp3_cmd):
100 if cmd == "%1":
101 play_mp3_cmd[index] = (get_http(uri))
102 return subprocess.Popen(play_mp3_cmd)
103
104
105 def record(file_path, duration, rate, channels):
106 if duration > 0:
107 return subprocess.Popen(
108 ["arecord", "-r", str(rate), "-c", str(channels), "-d",
109 str(duration), file_path])
110 else:
111 return subprocess.Popen(
112 ["arecord", "-r", str(rate), "-c", str(channels), file_path])
113
114
115 def get_http(uri):
116 return uri.replace("https://", "http://")
117
118
119 def remove_last_slash(url):
120 if url and url.endswith('/'):
121 url = url[:-1]
122 return url
123
124
125 def read_stripped_lines(filename):
126 with open(filename, 'r') as f:
127 return [line.strip() for line in f]
128
129
130 def read_dict(filename, div='='):
131 d = {}
132 with open(filename, 'r') as f:
133 for line in f:
134 (key, val) = line.split(div)
135 d[key.strip()] = val.strip()
136 return d
137
138
139 def connected(host="8.8.8.8", port=53, timeout=3):
140 """
141 Thanks to 7h3rAm on
142 Host: 8.8.8.8 (google-public-dns-a.google.com)
143 OpenPort: 53/tcp
144 Service: domain (DNS/TCP)
145
146 NOTE:
147 This is no longer in use by this version
148 New method checks for a connection using ConnectionError only when
149 a question is asked
150 """
151 try:
152 socket.setdefaulttimeout(timeout)
153 socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect((host, port))
154 return True
155 except IOError:
156 try:
157 socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect(
158 ("8.8.4.4", port))
159 return True
160 except IOError:
161 return False
162
163
164 def curate_cache(directory, min_free_percent=5.0, min_free_disk=50):
165 """Clear out the directory if needed
166
167 This assumes all the files in the directory can be deleted as freely
168
169 Args:
170 directory (str): directory path that holds cached files
171 min_free_percent (float): percentage (0.0-100.0) of drive to keep free,
172 default is 5% if not specified.
173 min_free_disk (float): minimum allowed disk space in MB, default
174 value is 50 MB if not specified.
175 """
176
177 # Simpleminded implementation -- keep a certain percentage of the
178 # disk available.
179 # TODO: Would be easy to add more options, like whitelisted files, etc.
180 space = psutil.disk_usage(directory)
181
182 # convert from MB to bytes
183 min_free_disk *= 1024 * 1024
184 # space.percent = space.used/space.total*100.0
185 percent_free = 100.0 - space.percent
186 if percent_free < min_free_percent and space.free < min_free_disk:
187 LOG.info('Low diskspace detected, cleaning cache')
188 # calculate how many bytes we need to delete
189 bytes_needed = (min_free_percent - percent_free) / 100.0 * space.total
190 bytes_needed = int(bytes_needed + 1.0)
191
192 # get all entries in the directory w/ stats
193 entries = (os.path.join(directory, fn) for fn in os.listdir(directory))
194 entries = ((os.stat(path), path) for path in entries)
195
196 # leave only regular files, insert modification date
197 entries = ((stat[ST_MTIME], stat[ST_SIZE], path)
198 for stat, path in entries if S_ISREG(stat[ST_MODE]))
199
200 # delete files with oldest modification date until space is freed
201 space_freed = 0
202 for moddate, fsize, path in sorted(entries):
203 try:
204 os.remove(path)
205 space_freed += fsize
206 except:
207 pass
208
209 if space_freed > bytes_needed:
210 return # deleted enough!
211
212
213 def get_cache_directory(domain=None):
214 """Get a directory for caching data
215
216 This directory can be used to hold temporary caches of data to
217 speed up performance. This directory will likely be part of a
218 small RAM disk and may be cleared at any time. So code that
219 uses these cached files must be able to fallback and regenerate
220 the file.
221
222 Args:
223 domain (str): The cache domain. Basically just a subdirectory.
224
225 Return:
226 str: a path to the directory where you can cache data
227 """
228 config = mycroft.configuration.Configuration.get()
229 dir = config.get("cache_path")
230 if not dir:
231 # If not defined, use /tmp/mycroft/cache
232 dir = os.path.join(tempfile.gettempdir(), "mycroft", "cache")
233 return ensure_directory_exists(dir, domain)
234
235
236 def validate_param(value, name):
237 if not value:
238 raise ValueError("Missing or empty %s in mycroft.conf " % name)
239
240
241 def is_speaking():
242 """Determine if Text to Speech is occurring
243
244 Returns:
245 bool: True while still speaking
246 """
247 LOG.info("mycroft.utils.is_speaking() is depreciated, use "
248 "mycroft.audio.is_speaking() instead.")
249 return mycroft.audio.is_speaking()
250
251
252 def wait_while_speaking():
253 """Pause as long as Text to Speech is still happening
254
255 Pause while Text to Speech is still happening. This always pauses
256 briefly to ensure that any preceeding request to speak has time to
257 begin.
258 """
259 LOG.info("mycroft.utils.wait_while_speaking() is depreciated, use "
260 "mycroft.audio.wait_while_speaking() instead.")
261 return mycroft.audio.wait_while_speaking()
262
263
264 def stop_speaking():
265 # TODO: Less hacky approach to this once Audio Manager is implemented
266 # Skills should only be able to stop speech they've initiated
267 LOG.info("mycroft.utils.stop_speaking() is depreciated, use "
268 "mycroft.audio.stop_speaking() instead.")
269 mycroft.audio.stop_speaking()
270
271
272 def get_arch():
273 """ Get architecture string of system. """
274 return os.uname()[4]
275
276
277 def reset_sigint_handler():
278 """
279 Reset the sigint handler to the default. This fixes KeyboardInterrupt
280 not getting raised when started via start-mycroft.sh
281 """
282 sig.signal(sig.SIGINT, sig.default_int_handler)
283
284
285 def create_daemon(target, args=(), kwargs=None):
286 """Helper to quickly create and start a thread with daemon = True"""
287 t = Thread(target=target, args=args, kwargs=kwargs)
288 t.daemon = True
289 t.start()
290 return t
291
292
293 def wait_for_exit_signal():
294 """Blocks until KeyboardInterrupt is received"""
295 try:
296 while True:
297 sleep(100)
298 except KeyboardInterrupt:
299 pass
300
301
302 def create_echo_function(name, whitelist=None):
303 from mycroft.configuration import Configuration
304 blacklist = Configuration.get().get("ignore_logs")
305
306 def echo(message):
307 """Listen for messages and echo them for logging"""
308 try:
309 js_msg = json.loads(message)
310
311 if whitelist and js_msg.get("type") not in whitelist:
312 return
313
314 if blacklist and js_msg.get("type") in blacklist:
315 return
316
317 if js_msg.get("type") == "registration":
318 # do not log tokens from registration messages
319 js_msg["data"]["token"] = None
320 message = json.dumps(js_msg)
321 except Exception:
322 pass
323 LOG(name).debug(message)
324 return echo
325
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mycroft/util/__init__.py b/mycroft/util/__init__.py
--- a/mycroft/util/__init__.py
+++ b/mycroft/util/__init__.py
@@ -22,6 +22,7 @@
import os.path
import psutil
from stat import S_ISREG, ST_MTIME, ST_MODE, ST_SIZE
+import requests
import signal as sig
@@ -136,18 +137,41 @@
return d
-def connected(host="8.8.8.8", port=53, timeout=3):
+def connected():
+ """ Check connection by connecting to 8.8.8.8, if this is
+ blocked/fails, Microsoft NCSI is used as a backup
+
+ Returns:
+ True if internet connection can be detected
+ """
+ return connected_dns() or connected_ncsi()
+
+
+def connected_ncsi():
+ """ Check internet connection by retrieving the Microsoft NCSI endpoint.
+
+ Returns:
+ True if internet connection can be detected
"""
- Thanks to 7h3rAm on
- Host: 8.8.8.8 (google-public-dns-a.google.com)
- OpenPort: 53/tcp
- Service: domain (DNS/TCP)
-
- NOTE:
- This is no longer in use by this version
- New method checks for a connection using ConnectionError only when
- a question is asked
+ try:
+ r = requests.get('http://www.msftncsi.com/ncsi.txt')
+ if r.text == u'Microsoft NCSI':
+ return True
+ except Exception:
+ pass
+ return False
+
+
+def connected_dns(host="8.8.8.8", port=53, timeout=3):
+ """ Check internet connection by connecting to DNS servers
+
+ Returns:
+ True if internet connection can be detected
"""
+ # Thanks to 7h3rAm on
+ # Host: 8.8.8.8 (google-public-dns-a.google.com)
+ # OpenPort: 53/tcp
+ # Service: domain (DNS/TCP)
try:
socket.setdefaulttimeout(timeout)
socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect((host, port))
|
{"golden_diff": "diff --git a/mycroft/util/__init__.py b/mycroft/util/__init__.py\n--- a/mycroft/util/__init__.py\n+++ b/mycroft/util/__init__.py\n@@ -22,6 +22,7 @@\n import os.path\n import psutil\n from stat import S_ISREG, ST_MTIME, ST_MODE, ST_SIZE\n+import requests\n \n import signal as sig\n \n@@ -136,18 +137,41 @@\n return d\n \n \n-def connected(host=\"8.8.8.8\", port=53, timeout=3):\n+def connected():\n+ \"\"\" Check connection by connecting to 8.8.8.8, if this is\n+ blocked/fails, Microsoft NCSI is used as a backup\n+\n+ Returns:\n+ True if internet connection can be detected\n+ \"\"\"\n+ return connected_dns() or connected_ncsi()\n+\n+\n+def connected_ncsi():\n+ \"\"\" Check internet connection by retrieving the Microsoft NCSI endpoint.\n+\n+ Returns:\n+ True if internet connection can be detected\n \"\"\"\n- Thanks to 7h3rAm on\n- Host: 8.8.8.8 (google-public-dns-a.google.com)\n- OpenPort: 53/tcp\n- Service: domain (DNS/TCP)\n-\n- NOTE:\n- This is no longer in use by this version\n- New method checks for a connection using ConnectionError only when\n- a question is asked\n+ try:\n+ r = requests.get('http://www.msftncsi.com/ncsi.txt')\n+ if r.text == u'Microsoft NCSI':\n+ return True\n+ except Exception:\n+ pass\n+ return False\n+\n+\n+def connected_dns(host=\"8.8.8.8\", port=53, timeout=3):\n+ \"\"\" Check internet connection by connecting to DNS servers\n+\n+ Returns:\n+ True if internet connection can be detected\n \"\"\"\n+ # Thanks to 7h3rAm on\n+ # Host: 8.8.8.8 (google-public-dns-a.google.com)\n+ # OpenPort: 53/tcp\n+ # Service: domain (DNS/TCP)\n try:\n socket.setdefaulttimeout(timeout)\n socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect((host, port))\n", "issue": "Initial load of skills fails if auto_update is true.\nOn a new clone of the dev tree on an Ubuntu 16.04 machine **that's never run mycroft before.**\r\n\r\nI ran _msm/msm default_ and it downloaded 27 skills, but mycroft wouldn't load any skills except _pairing_. I looked at the skills loading code and saw it wasn't getting past this:\r\n\r\n```\r\n if (exists(SKILLS_DIR) and\r\n (self.next_download or not update)):\r\n```\r\n\r\nAfter I changed my config to _auto_update: false_, they loaded.\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "# Copyright 2017 Mycroft AI Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nfrom __future__ import absolute_import\nimport socket\nimport subprocess\nfrom threading import Thread\nfrom time import sleep\n\nimport json\nimport os.path\nimport psutil\nfrom stat import S_ISREG, ST_MTIME, ST_MODE, ST_SIZE\n\nimport signal as sig\n\nimport mycroft.audio\nimport mycroft.configuration\nfrom mycroft.util.format import nice_number\n# Officially exported methods from this file:\n# play_wav, play_mp3, get_cache_directory,\n# resolve_resource_file, wait_while_speaking\nfrom mycroft.util.log import LOG\nfrom mycroft.util.parse import extract_datetime, extractnumber, normalize\nfrom mycroft.util.signal import *\n\n\ndef resolve_resource_file(res_name):\n \"\"\"Convert a resource into an absolute filename.\n\n Resource names are in the form: 'filename.ext'\n or 'path/filename.ext'\n\n The system wil look for ~/.mycroft/res_name first, and\n if not found will look at /opt/mycroft/res_name,\n then finally it will look for res_name in the 'mycroft/res'\n folder of the source code package.\n\n Example:\n With mycroft running as the user 'bob', if you called\n resolve_resource_file('snd/beep.wav')\n it would return either '/home/bob/.mycroft/snd/beep.wav' or\n '/opt/mycroft/snd/beep.wav' or '.../mycroft/res/snd/beep.wav',\n where the '...' is replaced by the path where the package has\n been installed.\n\n Args:\n res_name (str): a resource path/name\n \"\"\"\n\n # First look for fully qualified file (e.g. a user setting)\n if os.path.isfile(res_name):\n return res_name\n\n # Now look for ~/.mycroft/res_name (in user folder)\n filename = os.path.expanduser(\"~/.mycroft/\" + res_name)\n if os.path.isfile(filename):\n return filename\n\n # Next look for /opt/mycroft/res/res_name\n filename = os.path.expanduser(\"/opt/mycroft/\" + res_name)\n if os.path.isfile(filename):\n return filename\n\n # Finally look for it in the source package\n filename = os.path.join(os.path.dirname(__file__), '..', 'res', res_name)\n filename = os.path.abspath(os.path.normpath(filename))\n if os.path.isfile(filename):\n return filename\n\n return None # Resource cannot be resolved\n\n\ndef play_wav(uri):\n config = mycroft.configuration.Configuration.get()\n play_cmd = config.get(\"play_wav_cmdline\")\n play_wav_cmd = str(play_cmd).split(\" \")\n for index, cmd in enumerate(play_wav_cmd):\n if cmd == \"%1\":\n play_wav_cmd[index] = (get_http(uri))\n return subprocess.Popen(play_wav_cmd)\n\n\ndef play_mp3(uri):\n config = mycroft.configuration.Configuration.get()\n play_cmd = config.get(\"play_mp3_cmdline\")\n play_mp3_cmd = str(play_cmd).split(\" \")\n for index, cmd in enumerate(play_mp3_cmd):\n if cmd == \"%1\":\n play_mp3_cmd[index] = (get_http(uri))\n return subprocess.Popen(play_mp3_cmd)\n\n\ndef record(file_path, duration, rate, channels):\n if duration > 0:\n return subprocess.Popen(\n [\"arecord\", \"-r\", str(rate), \"-c\", str(channels), \"-d\",\n str(duration), file_path])\n else:\n return subprocess.Popen(\n [\"arecord\", \"-r\", str(rate), \"-c\", str(channels), file_path])\n\n\ndef get_http(uri):\n return uri.replace(\"https://\", \"http://\")\n\n\ndef remove_last_slash(url):\n if url and url.endswith('/'):\n url = url[:-1]\n return url\n\n\ndef read_stripped_lines(filename):\n with open(filename, 'r') as f:\n return [line.strip() for line in f]\n\n\ndef read_dict(filename, div='='):\n d = {}\n with open(filename, 'r') as f:\n for line in f:\n (key, val) = line.split(div)\n d[key.strip()] = val.strip()\n return d\n\n\ndef connected(host=\"8.8.8.8\", port=53, timeout=3):\n \"\"\"\n Thanks to 7h3rAm on\n Host: 8.8.8.8 (google-public-dns-a.google.com)\n OpenPort: 53/tcp\n Service: domain (DNS/TCP)\n\n NOTE:\n This is no longer in use by this version\n New method checks for a connection using ConnectionError only when\n a question is asked\n \"\"\"\n try:\n socket.setdefaulttimeout(timeout)\n socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect((host, port))\n return True\n except IOError:\n try:\n socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect(\n (\"8.8.4.4\", port))\n return True\n except IOError:\n return False\n\n\ndef curate_cache(directory, min_free_percent=5.0, min_free_disk=50):\n \"\"\"Clear out the directory if needed\n\n This assumes all the files in the directory can be deleted as freely\n\n Args:\n directory (str): directory path that holds cached files\n min_free_percent (float): percentage (0.0-100.0) of drive to keep free,\n default is 5% if not specified.\n min_free_disk (float): minimum allowed disk space in MB, default\n value is 50 MB if not specified.\n \"\"\"\n\n # Simpleminded implementation -- keep a certain percentage of the\n # disk available.\n # TODO: Would be easy to add more options, like whitelisted files, etc.\n space = psutil.disk_usage(directory)\n\n # convert from MB to bytes\n min_free_disk *= 1024 * 1024\n # space.percent = space.used/space.total*100.0\n percent_free = 100.0 - space.percent\n if percent_free < min_free_percent and space.free < min_free_disk:\n LOG.info('Low diskspace detected, cleaning cache')\n # calculate how many bytes we need to delete\n bytes_needed = (min_free_percent - percent_free) / 100.0 * space.total\n bytes_needed = int(bytes_needed + 1.0)\n\n # get all entries in the directory w/ stats\n entries = (os.path.join(directory, fn) for fn in os.listdir(directory))\n entries = ((os.stat(path), path) for path in entries)\n\n # leave only regular files, insert modification date\n entries = ((stat[ST_MTIME], stat[ST_SIZE], path)\n for stat, path in entries if S_ISREG(stat[ST_MODE]))\n\n # delete files with oldest modification date until space is freed\n space_freed = 0\n for moddate, fsize, path in sorted(entries):\n try:\n os.remove(path)\n space_freed += fsize\n except:\n pass\n\n if space_freed > bytes_needed:\n return # deleted enough!\n\n\ndef get_cache_directory(domain=None):\n \"\"\"Get a directory for caching data\n\n This directory can be used to hold temporary caches of data to\n speed up performance. This directory will likely be part of a\n small RAM disk and may be cleared at any time. So code that\n uses these cached files must be able to fallback and regenerate\n the file.\n\n Args:\n domain (str): The cache domain. Basically just a subdirectory.\n\n Return:\n str: a path to the directory where you can cache data\n \"\"\"\n config = mycroft.configuration.Configuration.get()\n dir = config.get(\"cache_path\")\n if not dir:\n # If not defined, use /tmp/mycroft/cache\n dir = os.path.join(tempfile.gettempdir(), \"mycroft\", \"cache\")\n return ensure_directory_exists(dir, domain)\n\n\ndef validate_param(value, name):\n if not value:\n raise ValueError(\"Missing or empty %s in mycroft.conf \" % name)\n\n\ndef is_speaking():\n \"\"\"Determine if Text to Speech is occurring\n\n Returns:\n bool: True while still speaking\n \"\"\"\n LOG.info(\"mycroft.utils.is_speaking() is depreciated, use \"\n \"mycroft.audio.is_speaking() instead.\")\n return mycroft.audio.is_speaking()\n\n\ndef wait_while_speaking():\n \"\"\"Pause as long as Text to Speech is still happening\n\n Pause while Text to Speech is still happening. This always pauses\n briefly to ensure that any preceeding request to speak has time to\n begin.\n \"\"\"\n LOG.info(\"mycroft.utils.wait_while_speaking() is depreciated, use \"\n \"mycroft.audio.wait_while_speaking() instead.\")\n return mycroft.audio.wait_while_speaking()\n\n\ndef stop_speaking():\n # TODO: Less hacky approach to this once Audio Manager is implemented\n # Skills should only be able to stop speech they've initiated\n LOG.info(\"mycroft.utils.stop_speaking() is depreciated, use \"\n \"mycroft.audio.stop_speaking() instead.\")\n mycroft.audio.stop_speaking()\n\n\ndef get_arch():\n \"\"\" Get architecture string of system. \"\"\"\n return os.uname()[4]\n\n\ndef reset_sigint_handler():\n \"\"\"\n Reset the sigint handler to the default. This fixes KeyboardInterrupt\n not getting raised when started via start-mycroft.sh\n \"\"\"\n sig.signal(sig.SIGINT, sig.default_int_handler)\n\n\ndef create_daemon(target, args=(), kwargs=None):\n \"\"\"Helper to quickly create and start a thread with daemon = True\"\"\"\n t = Thread(target=target, args=args, kwargs=kwargs)\n t.daemon = True\n t.start()\n return t\n\n\ndef wait_for_exit_signal():\n \"\"\"Blocks until KeyboardInterrupt is received\"\"\"\n try:\n while True:\n sleep(100)\n except KeyboardInterrupt:\n pass\n\n\ndef create_echo_function(name, whitelist=None):\n from mycroft.configuration import Configuration\n blacklist = Configuration.get().get(\"ignore_logs\")\n\n def echo(message):\n \"\"\"Listen for messages and echo them for logging\"\"\"\n try:\n js_msg = json.loads(message)\n\n if whitelist and js_msg.get(\"type\") not in whitelist:\n return\n\n if blacklist and js_msg.get(\"type\") in blacklist:\n return\n\n if js_msg.get(\"type\") == \"registration\":\n # do not log tokens from registration messages\n js_msg[\"data\"][\"token\"] = None\n message = json.dumps(js_msg)\n except Exception:\n pass\n LOG(name).debug(message)\n return echo\n", "path": "mycroft/util/__init__.py"}], "after_files": [{"content": "# Copyright 2017 Mycroft AI Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nfrom __future__ import absolute_import\nimport socket\nimport subprocess\nfrom threading import Thread\nfrom time import sleep\n\nimport json\nimport os.path\nimport psutil\nfrom stat import S_ISREG, ST_MTIME, ST_MODE, ST_SIZE\nimport requests\n\nimport signal as sig\n\nimport mycroft.audio\nimport mycroft.configuration\nfrom mycroft.util.format import nice_number\n# Officially exported methods from this file:\n# play_wav, play_mp3, get_cache_directory,\n# resolve_resource_file, wait_while_speaking\nfrom mycroft.util.log import LOG\nfrom mycroft.util.parse import extract_datetime, extractnumber, normalize\nfrom mycroft.util.signal import *\n\n\ndef resolve_resource_file(res_name):\n \"\"\"Convert a resource into an absolute filename.\n\n Resource names are in the form: 'filename.ext'\n or 'path/filename.ext'\n\n The system wil look for ~/.mycroft/res_name first, and\n if not found will look at /opt/mycroft/res_name,\n then finally it will look for res_name in the 'mycroft/res'\n folder of the source code package.\n\n Example:\n With mycroft running as the user 'bob', if you called\n resolve_resource_file('snd/beep.wav')\n it would return either '/home/bob/.mycroft/snd/beep.wav' or\n '/opt/mycroft/snd/beep.wav' or '.../mycroft/res/snd/beep.wav',\n where the '...' is replaced by the path where the package has\n been installed.\n\n Args:\n res_name (str): a resource path/name\n \"\"\"\n\n # First look for fully qualified file (e.g. a user setting)\n if os.path.isfile(res_name):\n return res_name\n\n # Now look for ~/.mycroft/res_name (in user folder)\n filename = os.path.expanduser(\"~/.mycroft/\" + res_name)\n if os.path.isfile(filename):\n return filename\n\n # Next look for /opt/mycroft/res/res_name\n filename = os.path.expanduser(\"/opt/mycroft/\" + res_name)\n if os.path.isfile(filename):\n return filename\n\n # Finally look for it in the source package\n filename = os.path.join(os.path.dirname(__file__), '..', 'res', res_name)\n filename = os.path.abspath(os.path.normpath(filename))\n if os.path.isfile(filename):\n return filename\n\n return None # Resource cannot be resolved\n\n\ndef play_wav(uri):\n config = mycroft.configuration.Configuration.get()\n play_cmd = config.get(\"play_wav_cmdline\")\n play_wav_cmd = str(play_cmd).split(\" \")\n for index, cmd in enumerate(play_wav_cmd):\n if cmd == \"%1\":\n play_wav_cmd[index] = (get_http(uri))\n return subprocess.Popen(play_wav_cmd)\n\n\ndef play_mp3(uri):\n config = mycroft.configuration.Configuration.get()\n play_cmd = config.get(\"play_mp3_cmdline\")\n play_mp3_cmd = str(play_cmd).split(\" \")\n for index, cmd in enumerate(play_mp3_cmd):\n if cmd == \"%1\":\n play_mp3_cmd[index] = (get_http(uri))\n return subprocess.Popen(play_mp3_cmd)\n\n\ndef record(file_path, duration, rate, channels):\n if duration > 0:\n return subprocess.Popen(\n [\"arecord\", \"-r\", str(rate), \"-c\", str(channels), \"-d\",\n str(duration), file_path])\n else:\n return subprocess.Popen(\n [\"arecord\", \"-r\", str(rate), \"-c\", str(channels), file_path])\n\n\ndef get_http(uri):\n return uri.replace(\"https://\", \"http://\")\n\n\ndef remove_last_slash(url):\n if url and url.endswith('/'):\n url = url[:-1]\n return url\n\n\ndef read_stripped_lines(filename):\n with open(filename, 'r') as f:\n return [line.strip() for line in f]\n\n\ndef read_dict(filename, div='='):\n d = {}\n with open(filename, 'r') as f:\n for line in f:\n (key, val) = line.split(div)\n d[key.strip()] = val.strip()\n return d\n\n\ndef connected():\n \"\"\" Check connection by connecting to 8.8.8.8, if this is\n blocked/fails, Microsoft NCSI is used as a backup\n\n Returns:\n True if internet connection can be detected\n \"\"\"\n return connected_dns() or connected_ncsi()\n\n\ndef connected_ncsi():\n \"\"\" Check internet connection by retrieving the Microsoft NCSI endpoint.\n\n Returns:\n True if internet connection can be detected\n \"\"\"\n try:\n r = requests.get('http://www.msftncsi.com/ncsi.txt')\n if r.text == u'Microsoft NCSI':\n return True\n except Exception:\n pass\n return False\n\n\ndef connected_dns(host=\"8.8.8.8\", port=53, timeout=3):\n \"\"\" Check internet connection by connecting to DNS servers\n\n Returns:\n True if internet connection can be detected\n \"\"\"\n # Thanks to 7h3rAm on\n # Host: 8.8.8.8 (google-public-dns-a.google.com)\n # OpenPort: 53/tcp\n # Service: domain (DNS/TCP)\n try:\n socket.setdefaulttimeout(timeout)\n socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect((host, port))\n return True\n except IOError:\n try:\n socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect(\n (\"8.8.4.4\", port))\n return True\n except IOError:\n return False\n\n\ndef curate_cache(directory, min_free_percent=5.0, min_free_disk=50):\n \"\"\"Clear out the directory if needed\n\n This assumes all the files in the directory can be deleted as freely\n\n Args:\n directory (str): directory path that holds cached files\n min_free_percent (float): percentage (0.0-100.0) of drive to keep free,\n default is 5% if not specified.\n min_free_disk (float): minimum allowed disk space in MB, default\n value is 50 MB if not specified.\n \"\"\"\n\n # Simpleminded implementation -- keep a certain percentage of the\n # disk available.\n # TODO: Would be easy to add more options, like whitelisted files, etc.\n space = psutil.disk_usage(directory)\n\n # convert from MB to bytes\n min_free_disk *= 1024 * 1024\n # space.percent = space.used/space.total*100.0\n percent_free = 100.0 - space.percent\n if percent_free < min_free_percent and space.free < min_free_disk:\n LOG.info('Low diskspace detected, cleaning cache')\n # calculate how many bytes we need to delete\n bytes_needed = (min_free_percent - percent_free) / 100.0 * space.total\n bytes_needed = int(bytes_needed + 1.0)\n\n # get all entries in the directory w/ stats\n entries = (os.path.join(directory, fn) for fn in os.listdir(directory))\n entries = ((os.stat(path), path) for path in entries)\n\n # leave only regular files, insert modification date\n entries = ((stat[ST_MTIME], stat[ST_SIZE], path)\n for stat, path in entries if S_ISREG(stat[ST_MODE]))\n\n # delete files with oldest modification date until space is freed\n space_freed = 0\n for moddate, fsize, path in sorted(entries):\n try:\n os.remove(path)\n space_freed += fsize\n except:\n pass\n\n if space_freed > bytes_needed:\n return # deleted enough!\n\n\ndef get_cache_directory(domain=None):\n \"\"\"Get a directory for caching data\n\n This directory can be used to hold temporary caches of data to\n speed up performance. This directory will likely be part of a\n small RAM disk and may be cleared at any time. So code that\n uses these cached files must be able to fallback and regenerate\n the file.\n\n Args:\n domain (str): The cache domain. Basically just a subdirectory.\n\n Return:\n str: a path to the directory where you can cache data\n \"\"\"\n config = mycroft.configuration.Configuration.get()\n dir = config.get(\"cache_path\")\n if not dir:\n # If not defined, use /tmp/mycroft/cache\n dir = os.path.join(tempfile.gettempdir(), \"mycroft\", \"cache\")\n return ensure_directory_exists(dir, domain)\n\n\ndef validate_param(value, name):\n if not value:\n raise ValueError(\"Missing or empty %s in mycroft.conf \" % name)\n\n\ndef is_speaking():\n \"\"\"Determine if Text to Speech is occurring\n\n Returns:\n bool: True while still speaking\n \"\"\"\n LOG.info(\"mycroft.utils.is_speaking() is depreciated, use \"\n \"mycroft.audio.is_speaking() instead.\")\n return mycroft.audio.is_speaking()\n\n\ndef wait_while_speaking():\n \"\"\"Pause as long as Text to Speech is still happening\n\n Pause while Text to Speech is still happening. This always pauses\n briefly to ensure that any preceeding request to speak has time to\n begin.\n \"\"\"\n LOG.info(\"mycroft.utils.wait_while_speaking() is depreciated, use \"\n \"mycroft.audio.wait_while_speaking() instead.\")\n return mycroft.audio.wait_while_speaking()\n\n\ndef stop_speaking():\n # TODO: Less hacky approach to this once Audio Manager is implemented\n # Skills should only be able to stop speech they've initiated\n LOG.info(\"mycroft.utils.stop_speaking() is depreciated, use \"\n \"mycroft.audio.stop_speaking() instead.\")\n mycroft.audio.stop_speaking()\n\n\ndef get_arch():\n \"\"\" Get architecture string of system. \"\"\"\n return os.uname()[4]\n\n\ndef reset_sigint_handler():\n \"\"\"\n Reset the sigint handler to the default. This fixes KeyboardInterrupt\n not getting raised when started via start-mycroft.sh\n \"\"\"\n sig.signal(sig.SIGINT, sig.default_int_handler)\n\n\ndef create_daemon(target, args=(), kwargs=None):\n \"\"\"Helper to quickly create and start a thread with daemon = True\"\"\"\n t = Thread(target=target, args=args, kwargs=kwargs)\n t.daemon = True\n t.start()\n return t\n\n\ndef wait_for_exit_signal():\n \"\"\"Blocks until KeyboardInterrupt is received\"\"\"\n try:\n while True:\n sleep(100)\n except KeyboardInterrupt:\n pass\n\n\ndef create_echo_function(name, whitelist=None):\n from mycroft.configuration import Configuration\n blacklist = Configuration.get().get(\"ignore_logs\")\n\n def echo(message):\n \"\"\"Listen for messages and echo them for logging\"\"\"\n try:\n js_msg = json.loads(message)\n\n if whitelist and js_msg.get(\"type\") not in whitelist:\n return\n\n if blacklist and js_msg.get(\"type\") in blacklist:\n return\n\n if js_msg.get(\"type\") == \"registration\":\n # do not log tokens from registration messages\n js_msg[\"data\"][\"token\"] = None\n message = json.dumps(js_msg)\n except Exception:\n pass\n LOG(name).debug(message)\n return echo\n", "path": "mycroft/util/__init__.py"}]}
| 3,746 | 519 |
gh_patches_debug_27651
|
rasdani/github-patches
|
git_diff
|
pypa__setuptools-1921
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Handling of find_links in setup.cfg in 42.0.0
Hi,
I have a couple of `setup_requires` dependencies in my setup.py and my setup.cfg looks like this:
```
[easy_install]
find_links = https://example.com
```
Looking at: https://github.com/pypa/setuptools/blob/e84f616a6507ec9115fad68b221cbf5333d9d2d9/setuptools/installer.py#L119
The `cmd` ends up being:
```python
['python', '-m', 'pip', '--disable-pip-version-check', 'wheel', '--no-deps', '-w', '/tmp/tmpWyNzjZ', '--quiet', '--find-links', 'h', '--find-links', 't', '--find-links', 't', '--find-links', 'p', '--find-links', 's', '--find-links', ':', '--find-links', '/', '--find-links', '/', '--find-links', 'e', '--find-links', 'x', '--find-links', 'a', '--find-links', 'm', '--find-links', 'p', '--find-links', 'l', '--find-links', 'e', '--find-links', '.', '--find-links', 'c', '--find-links', 'o', '--find-links', 'm', 'babel; extra == "i18n"']
```
It seems the assumption was that `find_links` is a list but it ends up being a string.
Additionally, since pip is unable to build/fetch a wheel, the following line throws an IndexError:
https://github.com/pypa/setuptools/blob/e84f616a6507ec9115fad68b221cbf5333d9d2d9/setuptools/installer.py#L122
Perhaps this could be caught and a more user-friendly error displayed instead.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setuptools/installer.py`
Content:
```
1 import glob
2 import os
3 import subprocess
4 import sys
5 from distutils import log
6 from distutils.errors import DistutilsError
7
8 import pkg_resources
9 from setuptools.command.easy_install import easy_install
10 from setuptools.wheel import Wheel
11
12 from .py31compat import TemporaryDirectory
13
14
15 def _legacy_fetch_build_egg(dist, req):
16 """Fetch an egg needed for building.
17
18 Legacy path using EasyInstall.
19 """
20 tmp_dist = dist.__class__({'script_args': ['easy_install']})
21 opts = tmp_dist.get_option_dict('easy_install')
22 opts.clear()
23 opts.update(
24 (k, v)
25 for k, v in dist.get_option_dict('easy_install').items()
26 if k in (
27 # don't use any other settings
28 'find_links', 'site_dirs', 'index_url',
29 'optimize', 'site_dirs', 'allow_hosts',
30 ))
31 if dist.dependency_links:
32 links = dist.dependency_links[:]
33 if 'find_links' in opts:
34 links = opts['find_links'][1] + links
35 opts['find_links'] = ('setup', links)
36 install_dir = dist.get_egg_cache_dir()
37 cmd = easy_install(
38 tmp_dist, args=["x"], install_dir=install_dir,
39 exclude_scripts=True,
40 always_copy=False, build_directory=None, editable=False,
41 upgrade=False, multi_version=True, no_report=True, user=False
42 )
43 cmd.ensure_finalized()
44 return cmd.easy_install(req)
45
46
47 def fetch_build_egg(dist, req):
48 """Fetch an egg needed for building.
49
50 Use pip/wheel to fetch/build a wheel."""
51 # Check pip is available.
52 try:
53 pkg_resources.get_distribution('pip')
54 except pkg_resources.DistributionNotFound:
55 dist.announce(
56 'WARNING: The pip package is not available, falling back '
57 'to EasyInstall for handling setup_requires/test_requires; '
58 'this is deprecated and will be removed in a future version.'
59 , log.WARN
60 )
61 return _legacy_fetch_build_egg(dist, req)
62 # Warn if wheel is not.
63 try:
64 pkg_resources.get_distribution('wheel')
65 except pkg_resources.DistributionNotFound:
66 dist.announce('WARNING: The wheel package is not available.', log.WARN)
67 # Ignore environment markers; if supplied, it is required.
68 req = strip_marker(req)
69 # Take easy_install options into account, but do not override relevant
70 # pip environment variables (like PIP_INDEX_URL or PIP_QUIET); they'll
71 # take precedence.
72 opts = dist.get_option_dict('easy_install')
73 if 'allow_hosts' in opts:
74 raise DistutilsError('the `allow-hosts` option is not supported '
75 'when using pip to install requirements.')
76 if 'PIP_QUIET' in os.environ or 'PIP_VERBOSE' in os.environ:
77 quiet = False
78 else:
79 quiet = True
80 if 'PIP_INDEX_URL' in os.environ:
81 index_url = None
82 elif 'index_url' in opts:
83 index_url = opts['index_url'][1]
84 else:
85 index_url = None
86 if 'find_links' in opts:
87 find_links = opts['find_links'][1][:]
88 else:
89 find_links = []
90 if dist.dependency_links:
91 find_links.extend(dist.dependency_links)
92 eggs_dir = os.path.realpath(dist.get_egg_cache_dir())
93 environment = pkg_resources.Environment()
94 for egg_dist in pkg_resources.find_distributions(eggs_dir):
95 if egg_dist in req and environment.can_add(egg_dist):
96 return egg_dist
97 with TemporaryDirectory() as tmpdir:
98 cmd = [
99 sys.executable, '-m', 'pip',
100 '--disable-pip-version-check',
101 'wheel', '--no-deps',
102 '-w', tmpdir,
103 ]
104 if quiet:
105 cmd.append('--quiet')
106 if index_url is not None:
107 cmd.extend(('--index-url', index_url))
108 if find_links is not None:
109 for link in find_links:
110 cmd.extend(('--find-links', link))
111 # If requirement is a PEP 508 direct URL, directly pass
112 # the URL to pip, as `req @ url` does not work on the
113 # command line.
114 if req.url:
115 cmd.append(req.url)
116 else:
117 cmd.append(str(req))
118 try:
119 subprocess.check_call(cmd)
120 except subprocess.CalledProcessError as e:
121 raise DistutilsError(str(e))
122 wheel = Wheel(glob.glob(os.path.join(tmpdir, '*.whl'))[0])
123 dist_location = os.path.join(eggs_dir, wheel.egg_name())
124 wheel.install_as_egg(dist_location)
125 dist_metadata = pkg_resources.PathMetadata(
126 dist_location, os.path.join(dist_location, 'EGG-INFO'))
127 dist = pkg_resources.Distribution.from_filename(
128 dist_location, metadata=dist_metadata)
129 return dist
130
131
132 def strip_marker(req):
133 """
134 Return a new requirement without the environment marker to avoid
135 calling pip with something like `babel; extra == "i18n"`, which
136 would always be ignored.
137 """
138 # create a copy to avoid mutating the input
139 req = pkg_resources.Requirement.parse(str(req))
140 req.marker = None
141 return req
142
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setuptools/installer.py b/setuptools/installer.py
--- a/setuptools/installer.py
+++ b/setuptools/installer.py
@@ -7,11 +7,20 @@
import pkg_resources
from setuptools.command.easy_install import easy_install
+from setuptools.extern import six
from setuptools.wheel import Wheel
from .py31compat import TemporaryDirectory
+def _fixup_find_links(find_links):
+ """Ensure find-links option end-up being a list of strings."""
+ if isinstance(find_links, six.string_types):
+ return find_links.split()
+ assert isinstance(find_links, (tuple, list))
+ return find_links
+
+
def _legacy_fetch_build_egg(dist, req):
"""Fetch an egg needed for building.
@@ -31,7 +40,7 @@
if dist.dependency_links:
links = dist.dependency_links[:]
if 'find_links' in opts:
- links = opts['find_links'][1] + links
+ links = _fixup_find_links(opts['find_links'][1]) + links
opts['find_links'] = ('setup', links)
install_dir = dist.get_egg_cache_dir()
cmd = easy_install(
@@ -84,7 +93,7 @@
else:
index_url = None
if 'find_links' in opts:
- find_links = opts['find_links'][1][:]
+ find_links = _fixup_find_links(opts['find_links'][1])[:]
else:
find_links = []
if dist.dependency_links:
|
{"golden_diff": "diff --git a/setuptools/installer.py b/setuptools/installer.py\n--- a/setuptools/installer.py\n+++ b/setuptools/installer.py\n@@ -7,11 +7,20 @@\n \n import pkg_resources\n from setuptools.command.easy_install import easy_install\n+from setuptools.extern import six\n from setuptools.wheel import Wheel\n \n from .py31compat import TemporaryDirectory\n \n \n+def _fixup_find_links(find_links):\n+ \"\"\"Ensure find-links option end-up being a list of strings.\"\"\"\n+ if isinstance(find_links, six.string_types):\n+ return find_links.split()\n+ assert isinstance(find_links, (tuple, list))\n+ return find_links\n+\n+\n def _legacy_fetch_build_egg(dist, req):\n \"\"\"Fetch an egg needed for building.\n \n@@ -31,7 +40,7 @@\n if dist.dependency_links:\n links = dist.dependency_links[:]\n if 'find_links' in opts:\n- links = opts['find_links'][1] + links\n+ links = _fixup_find_links(opts['find_links'][1]) + links\n opts['find_links'] = ('setup', links)\n install_dir = dist.get_egg_cache_dir()\n cmd = easy_install(\n@@ -84,7 +93,7 @@\n else:\n index_url = None\n if 'find_links' in opts:\n- find_links = opts['find_links'][1][:]\n+ find_links = _fixup_find_links(opts['find_links'][1])[:]\n else:\n find_links = []\n if dist.dependency_links:\n", "issue": "Handling of find_links in setup.cfg in 42.0.0\nHi,\r\n\r\nI have a couple of `setup_requires` dependencies in my setup.py and my setup.cfg looks like this:\r\n\r\n```\r\n[easy_install]\r\nfind_links = https://example.com\r\n```\r\n\r\nLooking at: https://github.com/pypa/setuptools/blob/e84f616a6507ec9115fad68b221cbf5333d9d2d9/setuptools/installer.py#L119\r\n\r\nThe `cmd` ends up being:\r\n\r\n```python\r\n['python', '-m', 'pip', '--disable-pip-version-check', 'wheel', '--no-deps', '-w', '/tmp/tmpWyNzjZ', '--quiet', '--find-links', 'h', '--find-links', 't', '--find-links', 't', '--find-links', 'p', '--find-links', 's', '--find-links', ':', '--find-links', '/', '--find-links', '/', '--find-links', 'e', '--find-links', 'x', '--find-links', 'a', '--find-links', 'm', '--find-links', 'p', '--find-links', 'l', '--find-links', 'e', '--find-links', '.', '--find-links', 'c', '--find-links', 'o', '--find-links', 'm', 'babel; extra == \"i18n\"']\r\n```\r\nIt seems the assumption was that `find_links` is a list but it ends up being a string.\r\n\r\nAdditionally, since pip is unable to build/fetch a wheel, the following line throws an IndexError:\r\nhttps://github.com/pypa/setuptools/blob/e84f616a6507ec9115fad68b221cbf5333d9d2d9/setuptools/installer.py#L122\r\n\r\nPerhaps this could be caught and a more user-friendly error displayed instead.\r\n\n", "before_files": [{"content": "import glob\nimport os\nimport subprocess\nimport sys\nfrom distutils import log\nfrom distutils.errors import DistutilsError\n\nimport pkg_resources\nfrom setuptools.command.easy_install import easy_install\nfrom setuptools.wheel import Wheel\n\nfrom .py31compat import TemporaryDirectory\n\n\ndef _legacy_fetch_build_egg(dist, req):\n \"\"\"Fetch an egg needed for building.\n\n Legacy path using EasyInstall.\n \"\"\"\n tmp_dist = dist.__class__({'script_args': ['easy_install']})\n opts = tmp_dist.get_option_dict('easy_install')\n opts.clear()\n opts.update(\n (k, v)\n for k, v in dist.get_option_dict('easy_install').items()\n if k in (\n # don't use any other settings\n 'find_links', 'site_dirs', 'index_url',\n 'optimize', 'site_dirs', 'allow_hosts',\n ))\n if dist.dependency_links:\n links = dist.dependency_links[:]\n if 'find_links' in opts:\n links = opts['find_links'][1] + links\n opts['find_links'] = ('setup', links)\n install_dir = dist.get_egg_cache_dir()\n cmd = easy_install(\n tmp_dist, args=[\"x\"], install_dir=install_dir,\n exclude_scripts=True,\n always_copy=False, build_directory=None, editable=False,\n upgrade=False, multi_version=True, no_report=True, user=False\n )\n cmd.ensure_finalized()\n return cmd.easy_install(req)\n\n\ndef fetch_build_egg(dist, req):\n \"\"\"Fetch an egg needed for building.\n\n Use pip/wheel to fetch/build a wheel.\"\"\"\n # Check pip is available.\n try:\n pkg_resources.get_distribution('pip')\n except pkg_resources.DistributionNotFound:\n dist.announce(\n 'WARNING: The pip package is not available, falling back '\n 'to EasyInstall for handling setup_requires/test_requires; '\n 'this is deprecated and will be removed in a future version.'\n , log.WARN\n )\n return _legacy_fetch_build_egg(dist, req)\n # Warn if wheel is not.\n try:\n pkg_resources.get_distribution('wheel')\n except pkg_resources.DistributionNotFound:\n dist.announce('WARNING: The wheel package is not available.', log.WARN)\n # Ignore environment markers; if supplied, it is required.\n req = strip_marker(req)\n # Take easy_install options into account, but do not override relevant\n # pip environment variables (like PIP_INDEX_URL or PIP_QUIET); they'll\n # take precedence.\n opts = dist.get_option_dict('easy_install')\n if 'allow_hosts' in opts:\n raise DistutilsError('the `allow-hosts` option is not supported '\n 'when using pip to install requirements.')\n if 'PIP_QUIET' in os.environ or 'PIP_VERBOSE' in os.environ:\n quiet = False\n else:\n quiet = True\n if 'PIP_INDEX_URL' in os.environ:\n index_url = None\n elif 'index_url' in opts:\n index_url = opts['index_url'][1]\n else:\n index_url = None\n if 'find_links' in opts:\n find_links = opts['find_links'][1][:]\n else:\n find_links = []\n if dist.dependency_links:\n find_links.extend(dist.dependency_links)\n eggs_dir = os.path.realpath(dist.get_egg_cache_dir())\n environment = pkg_resources.Environment()\n for egg_dist in pkg_resources.find_distributions(eggs_dir):\n if egg_dist in req and environment.can_add(egg_dist):\n return egg_dist\n with TemporaryDirectory() as tmpdir:\n cmd = [\n sys.executable, '-m', 'pip',\n '--disable-pip-version-check',\n 'wheel', '--no-deps',\n '-w', tmpdir,\n ]\n if quiet:\n cmd.append('--quiet')\n if index_url is not None:\n cmd.extend(('--index-url', index_url))\n if find_links is not None:\n for link in find_links:\n cmd.extend(('--find-links', link))\n # If requirement is a PEP 508 direct URL, directly pass\n # the URL to pip, as `req @ url` does not work on the\n # command line.\n if req.url:\n cmd.append(req.url)\n else:\n cmd.append(str(req))\n try:\n subprocess.check_call(cmd)\n except subprocess.CalledProcessError as e:\n raise DistutilsError(str(e))\n wheel = Wheel(glob.glob(os.path.join(tmpdir, '*.whl'))[0])\n dist_location = os.path.join(eggs_dir, wheel.egg_name())\n wheel.install_as_egg(dist_location)\n dist_metadata = pkg_resources.PathMetadata(\n dist_location, os.path.join(dist_location, 'EGG-INFO'))\n dist = pkg_resources.Distribution.from_filename(\n dist_location, metadata=dist_metadata)\n return dist\n\n\ndef strip_marker(req):\n \"\"\"\n Return a new requirement without the environment marker to avoid\n calling pip with something like `babel; extra == \"i18n\"`, which\n would always be ignored.\n \"\"\"\n # create a copy to avoid mutating the input\n req = pkg_resources.Requirement.parse(str(req))\n req.marker = None\n return req\n", "path": "setuptools/installer.py"}], "after_files": [{"content": "import glob\nimport os\nimport subprocess\nimport sys\nfrom distutils import log\nfrom distutils.errors import DistutilsError\n\nimport pkg_resources\nfrom setuptools.command.easy_install import easy_install\nfrom setuptools.extern import six\nfrom setuptools.wheel import Wheel\n\nfrom .py31compat import TemporaryDirectory\n\n\ndef _fixup_find_links(find_links):\n \"\"\"Ensure find-links option end-up being a list of strings.\"\"\"\n if isinstance(find_links, six.string_types):\n return find_links.split()\n assert isinstance(find_links, (tuple, list))\n return find_links\n\n\ndef _legacy_fetch_build_egg(dist, req):\n \"\"\"Fetch an egg needed for building.\n\n Legacy path using EasyInstall.\n \"\"\"\n tmp_dist = dist.__class__({'script_args': ['easy_install']})\n opts = tmp_dist.get_option_dict('easy_install')\n opts.clear()\n opts.update(\n (k, v)\n for k, v in dist.get_option_dict('easy_install').items()\n if k in (\n # don't use any other settings\n 'find_links', 'site_dirs', 'index_url',\n 'optimize', 'site_dirs', 'allow_hosts',\n ))\n if dist.dependency_links:\n links = dist.dependency_links[:]\n if 'find_links' in opts:\n links = _fixup_find_links(opts['find_links'][1]) + links\n opts['find_links'] = ('setup', links)\n install_dir = dist.get_egg_cache_dir()\n cmd = easy_install(\n tmp_dist, args=[\"x\"], install_dir=install_dir,\n exclude_scripts=True,\n always_copy=False, build_directory=None, editable=False,\n upgrade=False, multi_version=True, no_report=True, user=False\n )\n cmd.ensure_finalized()\n return cmd.easy_install(req)\n\n\ndef fetch_build_egg(dist, req):\n \"\"\"Fetch an egg needed for building.\n\n Use pip/wheel to fetch/build a wheel.\"\"\"\n # Check pip is available.\n try:\n pkg_resources.get_distribution('pip')\n except pkg_resources.DistributionNotFound:\n dist.announce(\n 'WARNING: The pip package is not available, falling back '\n 'to EasyInstall for handling setup_requires/test_requires; '\n 'this is deprecated and will be removed in a future version.'\n , log.WARN\n )\n return _legacy_fetch_build_egg(dist, req)\n # Warn if wheel is not.\n try:\n pkg_resources.get_distribution('wheel')\n except pkg_resources.DistributionNotFound:\n dist.announce('WARNING: The wheel package is not available.', log.WARN)\n # Ignore environment markers; if supplied, it is required.\n req = strip_marker(req)\n # Take easy_install options into account, but do not override relevant\n # pip environment variables (like PIP_INDEX_URL or PIP_QUIET); they'll\n # take precedence.\n opts = dist.get_option_dict('easy_install')\n if 'allow_hosts' in opts:\n raise DistutilsError('the `allow-hosts` option is not supported '\n 'when using pip to install requirements.')\n if 'PIP_QUIET' in os.environ or 'PIP_VERBOSE' in os.environ:\n quiet = False\n else:\n quiet = True\n if 'PIP_INDEX_URL' in os.environ:\n index_url = None\n elif 'index_url' in opts:\n index_url = opts['index_url'][1]\n else:\n index_url = None\n if 'find_links' in opts:\n find_links = _fixup_find_links(opts['find_links'][1])[:]\n else:\n find_links = []\n if dist.dependency_links:\n find_links.extend(dist.dependency_links)\n eggs_dir = os.path.realpath(dist.get_egg_cache_dir())\n environment = pkg_resources.Environment()\n for egg_dist in pkg_resources.find_distributions(eggs_dir):\n if egg_dist in req and environment.can_add(egg_dist):\n return egg_dist\n with TemporaryDirectory() as tmpdir:\n cmd = [\n sys.executable, '-m', 'pip',\n '--disable-pip-version-check',\n 'wheel', '--no-deps',\n '-w', tmpdir,\n ]\n if quiet:\n cmd.append('--quiet')\n if index_url is not None:\n cmd.extend(('--index-url', index_url))\n if find_links is not None:\n for link in find_links:\n cmd.extend(('--find-links', link))\n # If requirement is a PEP 508 direct URL, directly pass\n # the URL to pip, as `req @ url` does not work on the\n # command line.\n if req.url:\n cmd.append(req.url)\n else:\n cmd.append(str(req))\n try:\n subprocess.check_call(cmd)\n except subprocess.CalledProcessError as e:\n raise DistutilsError(str(e))\n wheel = Wheel(glob.glob(os.path.join(tmpdir, '*.whl'))[0])\n dist_location = os.path.join(eggs_dir, wheel.egg_name())\n wheel.install_as_egg(dist_location)\n dist_metadata = pkg_resources.PathMetadata(\n dist_location, os.path.join(dist_location, 'EGG-INFO'))\n dist = pkg_resources.Distribution.from_filename(\n dist_location, metadata=dist_metadata)\n return dist\n\n\ndef strip_marker(req):\n \"\"\"\n Return a new requirement without the environment marker to avoid\n calling pip with something like `babel; extra == \"i18n\"`, which\n would always be ignored.\n \"\"\"\n # create a copy to avoid mutating the input\n req = pkg_resources.Requirement.parse(str(req))\n req.marker = None\n return req\n", "path": "setuptools/installer.py"}]}
| 2,138 | 341 |
gh_patches_debug_43501
|
rasdani/github-patches
|
git_diff
|
goauthentik__authentik-7264
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bug: Authenticator SMS Challenge response doesn't have _errors attribute
**Describe the bug**
It seems that something has changed in the regards to Authenticator SMS Challenge Response.
**To Reproduce**
Steps to reproduce the behavior:
1. Have an perfectly working SMS Authenicator stage prior to upgrading to 2023.8.3
2. Upgrade
3. Don't see any immediate errors.
4. See error when user tries to register using said flow.
**Expected behavior**
I would have expected it to not error out.
**Logs**
<details>
<summary>Stacktrace from authentik</summary>
```
Traceback (most recent call last):
File "/authentik/flows/views/executor.py", line 298, in get
stage_response = self.current_stage_view.get(request, *args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/authentik/stages/authenticator_sms/stage.py", line 117, in get
response._errors.setdefault("phone_number", [])
^^^^^^^^^^^^^^^^
builtins.AttributeError: 'AuthenticatorSMSChallengeResponse' object has no attribute '_errors'
```
</details>
**Version and Deployment (please complete the following information):**
- authentik version: 2023.8.3
- Deployment: Ansible with Docker
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `authentik/stages/authenticator_sms/stage.py`
Content:
```
1 """SMS Setup stage"""
2 from typing import Optional
3
4 from django.db.models import Q
5 from django.http import HttpRequest, HttpResponse
6 from django.http.request import QueryDict
7 from django.utils.translation import gettext_lazy as _
8 from rest_framework.exceptions import ValidationError
9 from rest_framework.fields import BooleanField, CharField, IntegerField
10
11 from authentik.flows.challenge import (
12 Challenge,
13 ChallengeResponse,
14 ChallengeTypes,
15 ErrorDetailSerializer,
16 WithUserInfoChallenge,
17 )
18 from authentik.flows.stage import ChallengeStageView
19 from authentik.stages.authenticator_sms.models import (
20 AuthenticatorSMSStage,
21 SMSDevice,
22 hash_phone_number,
23 )
24 from authentik.stages.prompt.stage import PLAN_CONTEXT_PROMPT
25
26 SESSION_KEY_SMS_DEVICE = "authentik/stages/authenticator_sms/sms_device"
27
28
29 class AuthenticatorSMSChallenge(WithUserInfoChallenge):
30 """SMS Setup challenge"""
31
32 # Set to true if no previous prompt stage set the phone number
33 # this stage will also check prompt_data.phone
34 phone_number_required = BooleanField(default=True)
35 component = CharField(default="ak-stage-authenticator-sms")
36
37
38 class AuthenticatorSMSChallengeResponse(ChallengeResponse):
39 """SMS Challenge response, device is set by get_response_instance"""
40
41 device: SMSDevice
42
43 code = IntegerField(required=False)
44 phone_number = CharField(required=False)
45
46 component = CharField(default="ak-stage-authenticator-sms")
47
48 def validate(self, attrs: dict) -> dict:
49 """Check"""
50 if "code" not in attrs:
51 self.device.phone_number = attrs["phone_number"]
52 self.stage.validate_and_send(attrs["phone_number"])
53 return super().validate(attrs)
54 if not self.device.verify_token(str(attrs["code"])):
55 raise ValidationError(_("Code does not match"))
56 self.device.confirmed = True
57 return super().validate(attrs)
58
59
60 class AuthenticatorSMSStageView(ChallengeStageView):
61 """OTP sms Setup stage"""
62
63 response_class = AuthenticatorSMSChallengeResponse
64
65 def validate_and_send(self, phone_number: str):
66 """Validate phone number and send message"""
67 stage: AuthenticatorSMSStage = self.executor.current_stage
68 hashed_number = hash_phone_number(phone_number)
69 query = Q(phone_number=hashed_number) | Q(phone_number=phone_number)
70 if SMSDevice.objects.filter(query, stage=stage.pk).exists():
71 raise ValidationError(_("Invalid phone number"))
72 # No code yet, but we have a phone number, so send a verification message
73 device: SMSDevice = self.request.session[SESSION_KEY_SMS_DEVICE]
74 stage.send(device.token, device)
75
76 def _has_phone_number(self) -> Optional[str]:
77 context = self.executor.plan.context
78 if "phone" in context.get(PLAN_CONTEXT_PROMPT, {}):
79 self.logger.debug("got phone number from plan context")
80 return context.get(PLAN_CONTEXT_PROMPT, {}).get("phone")
81 if SESSION_KEY_SMS_DEVICE in self.request.session:
82 self.logger.debug("got phone number from device in session")
83 device: SMSDevice = self.request.session[SESSION_KEY_SMS_DEVICE]
84 if device.phone_number == "":
85 return None
86 return device.phone_number
87 return None
88
89 def get_challenge(self, *args, **kwargs) -> Challenge:
90 return AuthenticatorSMSChallenge(
91 data={
92 "type": ChallengeTypes.NATIVE.value,
93 "phone_number_required": self._has_phone_number() is None,
94 }
95 )
96
97 def get_response_instance(self, data: QueryDict) -> ChallengeResponse:
98 response = super().get_response_instance(data)
99 response.device = self.request.session[SESSION_KEY_SMS_DEVICE]
100 return response
101
102 def get(self, request: HttpRequest, *args, **kwargs) -> HttpResponse:
103 user = self.get_pending_user()
104
105 stage: AuthenticatorSMSStage = self.executor.current_stage
106
107 if SESSION_KEY_SMS_DEVICE not in self.request.session:
108 device = SMSDevice(user=user, confirmed=False, stage=stage, name="SMS Device")
109 device.generate_token(commit=False)
110 self.request.session[SESSION_KEY_SMS_DEVICE] = device
111 if phone_number := self._has_phone_number():
112 device.phone_number = phone_number
113 try:
114 self.validate_and_send(phone_number)
115 except ValidationError as exc:
116 response = AuthenticatorSMSChallengeResponse()
117 response._errors.setdefault("phone_number", [])
118 response._errors["phone_number"].append(ErrorDetailSerializer(exc.detail))
119 return self.challenge_invalid(response)
120 return super().get(request, *args, **kwargs)
121
122 def challenge_valid(self, response: ChallengeResponse) -> HttpResponse:
123 """SMS Token is validated by challenge"""
124 device: SMSDevice = self.request.session[SESSION_KEY_SMS_DEVICE]
125 if not device.confirmed:
126 return self.challenge_invalid(response)
127 stage: AuthenticatorSMSStage = self.executor.current_stage
128 if stage.verify_only:
129 self.logger.debug("Hashing number on device")
130 device.set_hashed_number()
131 device.save()
132 del self.request.session[SESSION_KEY_SMS_DEVICE]
133 return self.executor.stage_ok()
134
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/authentik/stages/authenticator_sms/stage.py b/authentik/stages/authenticator_sms/stage.py
--- a/authentik/stages/authenticator_sms/stage.py
+++ b/authentik/stages/authenticator_sms/stage.py
@@ -12,7 +12,6 @@
Challenge,
ChallengeResponse,
ChallengeTypes,
- ErrorDetailSerializer,
WithUserInfoChallenge,
)
from authentik.flows.stage import ChallengeStageView
@@ -24,6 +23,7 @@
from authentik.stages.prompt.stage import PLAN_CONTEXT_PROMPT
SESSION_KEY_SMS_DEVICE = "authentik/stages/authenticator_sms/sms_device"
+PLAN_CONTEXT_PHONE = "phone"
class AuthenticatorSMSChallenge(WithUserInfoChallenge):
@@ -48,6 +48,8 @@
def validate(self, attrs: dict) -> dict:
"""Check"""
if "code" not in attrs:
+ if "phone_number" not in attrs:
+ raise ValidationError("phone_number required")
self.device.phone_number = attrs["phone_number"]
self.stage.validate_and_send(attrs["phone_number"])
return super().validate(attrs)
@@ -67,6 +69,7 @@
stage: AuthenticatorSMSStage = self.executor.current_stage
hashed_number = hash_phone_number(phone_number)
query = Q(phone_number=hashed_number) | Q(phone_number=phone_number)
+ print(SMSDevice.objects.filter(query, stage=stage.pk))
if SMSDevice.objects.filter(query, stage=stage.pk).exists():
raise ValidationError(_("Invalid phone number"))
# No code yet, but we have a phone number, so send a verification message
@@ -75,9 +78,9 @@
def _has_phone_number(self) -> Optional[str]:
context = self.executor.plan.context
- if "phone" in context.get(PLAN_CONTEXT_PROMPT, {}):
+ if PLAN_CONTEXT_PHONE in context.get(PLAN_CONTEXT_PROMPT, {}):
self.logger.debug("got phone number from plan context")
- return context.get(PLAN_CONTEXT_PROMPT, {}).get("phone")
+ return context.get(PLAN_CONTEXT_PROMPT, {}).get(PLAN_CONTEXT_PHONE)
if SESSION_KEY_SMS_DEVICE in self.request.session:
self.logger.debug("got phone number from device in session")
device: SMSDevice = self.request.session[SESSION_KEY_SMS_DEVICE]
@@ -113,10 +116,17 @@
try:
self.validate_and_send(phone_number)
except ValidationError as exc:
- response = AuthenticatorSMSChallengeResponse()
- response._errors.setdefault("phone_number", [])
- response._errors["phone_number"].append(ErrorDetailSerializer(exc.detail))
- return self.challenge_invalid(response)
+ # We had a phone number given already (at this point only possible from flow
+ # context), but an error occurred while sending a number (most likely)
+ # due to a duplicate device, so delete the number we got given, reset the state
+ # (ish) and retry
+ device.phone_number = ""
+ self.executor.plan.context.get(PLAN_CONTEXT_PROMPT, {}).pop(
+ PLAN_CONTEXT_PHONE, None
+ )
+ self.request.session.pop(SESSION_KEY_SMS_DEVICE, None)
+ self.logger.warning("failed to send SMS message to pre-set number", exc=exc)
+ return self.get(request, *args, **kwargs)
return super().get(request, *args, **kwargs)
def challenge_valid(self, response: ChallengeResponse) -> HttpResponse:
|
{"golden_diff": "diff --git a/authentik/stages/authenticator_sms/stage.py b/authentik/stages/authenticator_sms/stage.py\n--- a/authentik/stages/authenticator_sms/stage.py\n+++ b/authentik/stages/authenticator_sms/stage.py\n@@ -12,7 +12,6 @@\n Challenge,\n ChallengeResponse,\n ChallengeTypes,\n- ErrorDetailSerializer,\n WithUserInfoChallenge,\n )\n from authentik.flows.stage import ChallengeStageView\n@@ -24,6 +23,7 @@\n from authentik.stages.prompt.stage import PLAN_CONTEXT_PROMPT\n \n SESSION_KEY_SMS_DEVICE = \"authentik/stages/authenticator_sms/sms_device\"\n+PLAN_CONTEXT_PHONE = \"phone\"\n \n \n class AuthenticatorSMSChallenge(WithUserInfoChallenge):\n@@ -48,6 +48,8 @@\n def validate(self, attrs: dict) -> dict:\n \"\"\"Check\"\"\"\n if \"code\" not in attrs:\n+ if \"phone_number\" not in attrs:\n+ raise ValidationError(\"phone_number required\")\n self.device.phone_number = attrs[\"phone_number\"]\n self.stage.validate_and_send(attrs[\"phone_number\"])\n return super().validate(attrs)\n@@ -67,6 +69,7 @@\n stage: AuthenticatorSMSStage = self.executor.current_stage\n hashed_number = hash_phone_number(phone_number)\n query = Q(phone_number=hashed_number) | Q(phone_number=phone_number)\n+ print(SMSDevice.objects.filter(query, stage=stage.pk))\n if SMSDevice.objects.filter(query, stage=stage.pk).exists():\n raise ValidationError(_(\"Invalid phone number\"))\n # No code yet, but we have a phone number, so send a verification message\n@@ -75,9 +78,9 @@\n \n def _has_phone_number(self) -> Optional[str]:\n context = self.executor.plan.context\n- if \"phone\" in context.get(PLAN_CONTEXT_PROMPT, {}):\n+ if PLAN_CONTEXT_PHONE in context.get(PLAN_CONTEXT_PROMPT, {}):\n self.logger.debug(\"got phone number from plan context\")\n- return context.get(PLAN_CONTEXT_PROMPT, {}).get(\"phone\")\n+ return context.get(PLAN_CONTEXT_PROMPT, {}).get(PLAN_CONTEXT_PHONE)\n if SESSION_KEY_SMS_DEVICE in self.request.session:\n self.logger.debug(\"got phone number from device in session\")\n device: SMSDevice = self.request.session[SESSION_KEY_SMS_DEVICE]\n@@ -113,10 +116,17 @@\n try:\n self.validate_and_send(phone_number)\n except ValidationError as exc:\n- response = AuthenticatorSMSChallengeResponse()\n- response._errors.setdefault(\"phone_number\", [])\n- response._errors[\"phone_number\"].append(ErrorDetailSerializer(exc.detail))\n- return self.challenge_invalid(response)\n+ # We had a phone number given already (at this point only possible from flow\n+ # context), but an error occurred while sending a number (most likely)\n+ # due to a duplicate device, so delete the number we got given, reset the state\n+ # (ish) and retry\n+ device.phone_number = \"\"\n+ self.executor.plan.context.get(PLAN_CONTEXT_PROMPT, {}).pop(\n+ PLAN_CONTEXT_PHONE, None\n+ )\n+ self.request.session.pop(SESSION_KEY_SMS_DEVICE, None)\n+ self.logger.warning(\"failed to send SMS message to pre-set number\", exc=exc)\n+ return self.get(request, *args, **kwargs)\n return super().get(request, *args, **kwargs)\n \n def challenge_valid(self, response: ChallengeResponse) -> HttpResponse:\n", "issue": "Bug: Authenticator SMS Challenge response doesn't have _errors attribute\n**Describe the bug**\r\nIt seems that something has changed in the regards to Authenticator SMS Challenge Response.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Have an perfectly working SMS Authenicator stage prior to upgrading to 2023.8.3\r\n2. Upgrade\r\n3. Don't see any immediate errors.\r\n4. See error when user tries to register using said flow.\r\n\r\n**Expected behavior**\r\nI would have expected it to not error out.\r\n\r\n**Logs**\r\n<details>\r\n <summary>Stacktrace from authentik</summary>\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/authentik/flows/views/executor.py\", line 298, in get\r\n stage_response = self.current_stage_view.get(request, *args, **kwargs)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/authentik/stages/authenticator_sms/stage.py\", line 117, in get\r\n response._errors.setdefault(\"phone_number\", [])\r\n ^^^^^^^^^^^^^^^^\r\nbuiltins.AttributeError: 'AuthenticatorSMSChallengeResponse' object has no attribute '_errors'\r\n```\r\n</details>\r\n\r\n\r\n**Version and Deployment (please complete the following information):**\r\n- authentik version: 2023.8.3\r\n- Deployment: Ansible with Docker\r\n \n", "before_files": [{"content": "\"\"\"SMS Setup stage\"\"\"\nfrom typing import Optional\n\nfrom django.db.models import Q\nfrom django.http import HttpRequest, HttpResponse\nfrom django.http.request import QueryDict\nfrom django.utils.translation import gettext_lazy as _\nfrom rest_framework.exceptions import ValidationError\nfrom rest_framework.fields import BooleanField, CharField, IntegerField\n\nfrom authentik.flows.challenge import (\n Challenge,\n ChallengeResponse,\n ChallengeTypes,\n ErrorDetailSerializer,\n WithUserInfoChallenge,\n)\nfrom authentik.flows.stage import ChallengeStageView\nfrom authentik.stages.authenticator_sms.models import (\n AuthenticatorSMSStage,\n SMSDevice,\n hash_phone_number,\n)\nfrom authentik.stages.prompt.stage import PLAN_CONTEXT_PROMPT\n\nSESSION_KEY_SMS_DEVICE = \"authentik/stages/authenticator_sms/sms_device\"\n\n\nclass AuthenticatorSMSChallenge(WithUserInfoChallenge):\n \"\"\"SMS Setup challenge\"\"\"\n\n # Set to true if no previous prompt stage set the phone number\n # this stage will also check prompt_data.phone\n phone_number_required = BooleanField(default=True)\n component = CharField(default=\"ak-stage-authenticator-sms\")\n\n\nclass AuthenticatorSMSChallengeResponse(ChallengeResponse):\n \"\"\"SMS Challenge response, device is set by get_response_instance\"\"\"\n\n device: SMSDevice\n\n code = IntegerField(required=False)\n phone_number = CharField(required=False)\n\n component = CharField(default=\"ak-stage-authenticator-sms\")\n\n def validate(self, attrs: dict) -> dict:\n \"\"\"Check\"\"\"\n if \"code\" not in attrs:\n self.device.phone_number = attrs[\"phone_number\"]\n self.stage.validate_and_send(attrs[\"phone_number\"])\n return super().validate(attrs)\n if not self.device.verify_token(str(attrs[\"code\"])):\n raise ValidationError(_(\"Code does not match\"))\n self.device.confirmed = True\n return super().validate(attrs)\n\n\nclass AuthenticatorSMSStageView(ChallengeStageView):\n \"\"\"OTP sms Setup stage\"\"\"\n\n response_class = AuthenticatorSMSChallengeResponse\n\n def validate_and_send(self, phone_number: str):\n \"\"\"Validate phone number and send message\"\"\"\n stage: AuthenticatorSMSStage = self.executor.current_stage\n hashed_number = hash_phone_number(phone_number)\n query = Q(phone_number=hashed_number) | Q(phone_number=phone_number)\n if SMSDevice.objects.filter(query, stage=stage.pk).exists():\n raise ValidationError(_(\"Invalid phone number\"))\n # No code yet, but we have a phone number, so send a verification message\n device: SMSDevice = self.request.session[SESSION_KEY_SMS_DEVICE]\n stage.send(device.token, device)\n\n def _has_phone_number(self) -> Optional[str]:\n context = self.executor.plan.context\n if \"phone\" in context.get(PLAN_CONTEXT_PROMPT, {}):\n self.logger.debug(\"got phone number from plan context\")\n return context.get(PLAN_CONTEXT_PROMPT, {}).get(\"phone\")\n if SESSION_KEY_SMS_DEVICE in self.request.session:\n self.logger.debug(\"got phone number from device in session\")\n device: SMSDevice = self.request.session[SESSION_KEY_SMS_DEVICE]\n if device.phone_number == \"\":\n return None\n return device.phone_number\n return None\n\n def get_challenge(self, *args, **kwargs) -> Challenge:\n return AuthenticatorSMSChallenge(\n data={\n \"type\": ChallengeTypes.NATIVE.value,\n \"phone_number_required\": self._has_phone_number() is None,\n }\n )\n\n def get_response_instance(self, data: QueryDict) -> ChallengeResponse:\n response = super().get_response_instance(data)\n response.device = self.request.session[SESSION_KEY_SMS_DEVICE]\n return response\n\n def get(self, request: HttpRequest, *args, **kwargs) -> HttpResponse:\n user = self.get_pending_user()\n\n stage: AuthenticatorSMSStage = self.executor.current_stage\n\n if SESSION_KEY_SMS_DEVICE not in self.request.session:\n device = SMSDevice(user=user, confirmed=False, stage=stage, name=\"SMS Device\")\n device.generate_token(commit=False)\n self.request.session[SESSION_KEY_SMS_DEVICE] = device\n if phone_number := self._has_phone_number():\n device.phone_number = phone_number\n try:\n self.validate_and_send(phone_number)\n except ValidationError as exc:\n response = AuthenticatorSMSChallengeResponse()\n response._errors.setdefault(\"phone_number\", [])\n response._errors[\"phone_number\"].append(ErrorDetailSerializer(exc.detail))\n return self.challenge_invalid(response)\n return super().get(request, *args, **kwargs)\n\n def challenge_valid(self, response: ChallengeResponse) -> HttpResponse:\n \"\"\"SMS Token is validated by challenge\"\"\"\n device: SMSDevice = self.request.session[SESSION_KEY_SMS_DEVICE]\n if not device.confirmed:\n return self.challenge_invalid(response)\n stage: AuthenticatorSMSStage = self.executor.current_stage\n if stage.verify_only:\n self.logger.debug(\"Hashing number on device\")\n device.set_hashed_number()\n device.save()\n del self.request.session[SESSION_KEY_SMS_DEVICE]\n return self.executor.stage_ok()\n", "path": "authentik/stages/authenticator_sms/stage.py"}], "after_files": [{"content": "\"\"\"SMS Setup stage\"\"\"\nfrom typing import Optional\n\nfrom django.db.models import Q\nfrom django.http import HttpRequest, HttpResponse\nfrom django.http.request import QueryDict\nfrom django.utils.translation import gettext_lazy as _\nfrom rest_framework.exceptions import ValidationError\nfrom rest_framework.fields import BooleanField, CharField, IntegerField\n\nfrom authentik.flows.challenge import (\n Challenge,\n ChallengeResponse,\n ChallengeTypes,\n WithUserInfoChallenge,\n)\nfrom authentik.flows.stage import ChallengeStageView\nfrom authentik.stages.authenticator_sms.models import (\n AuthenticatorSMSStage,\n SMSDevice,\n hash_phone_number,\n)\nfrom authentik.stages.prompt.stage import PLAN_CONTEXT_PROMPT\n\nSESSION_KEY_SMS_DEVICE = \"authentik/stages/authenticator_sms/sms_device\"\nPLAN_CONTEXT_PHONE = \"phone\"\n\n\nclass AuthenticatorSMSChallenge(WithUserInfoChallenge):\n \"\"\"SMS Setup challenge\"\"\"\n\n # Set to true if no previous prompt stage set the phone number\n # this stage will also check prompt_data.phone\n phone_number_required = BooleanField(default=True)\n component = CharField(default=\"ak-stage-authenticator-sms\")\n\n\nclass AuthenticatorSMSChallengeResponse(ChallengeResponse):\n \"\"\"SMS Challenge response, device is set by get_response_instance\"\"\"\n\n device: SMSDevice\n\n code = IntegerField(required=False)\n phone_number = CharField(required=False)\n\n component = CharField(default=\"ak-stage-authenticator-sms\")\n\n def validate(self, attrs: dict) -> dict:\n \"\"\"Check\"\"\"\n if \"code\" not in attrs:\n if \"phone_number\" not in attrs:\n raise ValidationError(\"phone_number required\")\n self.device.phone_number = attrs[\"phone_number\"]\n self.stage.validate_and_send(attrs[\"phone_number\"])\n return super().validate(attrs)\n if not self.device.verify_token(str(attrs[\"code\"])):\n raise ValidationError(_(\"Code does not match\"))\n self.device.confirmed = True\n return super().validate(attrs)\n\n\nclass AuthenticatorSMSStageView(ChallengeStageView):\n \"\"\"OTP sms Setup stage\"\"\"\n\n response_class = AuthenticatorSMSChallengeResponse\n\n def validate_and_send(self, phone_number: str):\n \"\"\"Validate phone number and send message\"\"\"\n stage: AuthenticatorSMSStage = self.executor.current_stage\n hashed_number = hash_phone_number(phone_number)\n query = Q(phone_number=hashed_number) | Q(phone_number=phone_number)\n print(SMSDevice.objects.filter(query, stage=stage.pk))\n if SMSDevice.objects.filter(query, stage=stage.pk).exists():\n raise ValidationError(_(\"Invalid phone number\"))\n # No code yet, but we have a phone number, so send a verification message\n device: SMSDevice = self.request.session[SESSION_KEY_SMS_DEVICE]\n stage.send(device.token, device)\n\n def _has_phone_number(self) -> Optional[str]:\n context = self.executor.plan.context\n if PLAN_CONTEXT_PHONE in context.get(PLAN_CONTEXT_PROMPT, {}):\n self.logger.debug(\"got phone number from plan context\")\n return context.get(PLAN_CONTEXT_PROMPT, {}).get(PLAN_CONTEXT_PHONE)\n if SESSION_KEY_SMS_DEVICE in self.request.session:\n self.logger.debug(\"got phone number from device in session\")\n device: SMSDevice = self.request.session[SESSION_KEY_SMS_DEVICE]\n if device.phone_number == \"\":\n return None\n return device.phone_number\n return None\n\n def get_challenge(self, *args, **kwargs) -> Challenge:\n return AuthenticatorSMSChallenge(\n data={\n \"type\": ChallengeTypes.NATIVE.value,\n \"phone_number_required\": self._has_phone_number() is None,\n }\n )\n\n def get_response_instance(self, data: QueryDict) -> ChallengeResponse:\n response = super().get_response_instance(data)\n response.device = self.request.session[SESSION_KEY_SMS_DEVICE]\n return response\n\n def get(self, request: HttpRequest, *args, **kwargs) -> HttpResponse:\n user = self.get_pending_user()\n\n stage: AuthenticatorSMSStage = self.executor.current_stage\n\n if SESSION_KEY_SMS_DEVICE not in self.request.session:\n device = SMSDevice(user=user, confirmed=False, stage=stage, name=\"SMS Device\")\n device.generate_token(commit=False)\n self.request.session[SESSION_KEY_SMS_DEVICE] = device\n if phone_number := self._has_phone_number():\n device.phone_number = phone_number\n try:\n self.validate_and_send(phone_number)\n except ValidationError as exc:\n # We had a phone number given already (at this point only possible from flow\n # context), but an error occurred while sending a number (most likely)\n # due to a duplicate device, so delete the number we got given, reset the state\n # (ish) and retry\n device.phone_number = \"\"\n self.executor.plan.context.get(PLAN_CONTEXT_PROMPT, {}).pop(\n PLAN_CONTEXT_PHONE, None\n )\n self.request.session.pop(SESSION_KEY_SMS_DEVICE, None)\n self.logger.warning(\"failed to send SMS message to pre-set number\", exc=exc)\n return self.get(request, *args, **kwargs)\n return super().get(request, *args, **kwargs)\n\n def challenge_valid(self, response: ChallengeResponse) -> HttpResponse:\n \"\"\"SMS Token is validated by challenge\"\"\"\n device: SMSDevice = self.request.session[SESSION_KEY_SMS_DEVICE]\n if not device.confirmed:\n return self.challenge_invalid(response)\n stage: AuthenticatorSMSStage = self.executor.current_stage\n if stage.verify_only:\n self.logger.debug(\"Hashing number on device\")\n device.set_hashed_number()\n device.save()\n del self.request.session[SESSION_KEY_SMS_DEVICE]\n return self.executor.stage_ok()\n", "path": "authentik/stages/authenticator_sms/stage.py"}]}
| 1,935 | 768 |
gh_patches_debug_15475
|
rasdani/github-patches
|
git_diff
|
DataDog__dd-trace-py-4198
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
aioredis raises CancelledError in _finish_span
### Which version of dd-trace-py are you using?
~~0.53.0~~ 0.58.0
### Which version of pip are you using?
21.3.1
### Which version of the libraries are you using?
django==3.2.11
django-redis==5.0.0
channels==3.0.4
daphne==3.0.2
### How can we reproduce your problem?
I am using code similar to the following:
asgi.py
```
import django
from channels.routing import get_default_application
from ddtrace.contrib.asgi import TraceMiddleware
django.setup()
application = TraceMiddleware(get_default_application())
```
routing.py
```
from django.urls import re_path
import my_app.consumers
websocket_urlpatterns = [
re_path(r"^ws/test/$", consumers.TestConsumer.as_asgi()),
]
```
my_app/consumers.py
```
from channels.generic.websocket import WebsocketConsumer
class TestConsumer(WebsocketConsumer):
groups = ["broadcast"]
def connect(self):
self.accept()
def receive(self, text_data=None, bytes_data=None):
raise Exception("An test exception")
```
I am running the application with: `ddtrace-run daphne asgi:application --bind 0.0.0.0 --port 8001`
### What is the result that you get?
I don't get any traces at all, and my logs show this:
```
handle: <Handle traced_13_execute_command.<locals>._finish_span(<Future cancelled>) at /usr/local/lib/python3.10/site-packages/ddtrace/contrib/aioredis/patch.py:140>
Traceback (most recent call last):
File "/usr/local/lib/python3.10/asyncio/events.py", line 80, in _run
self._context.run(self._callback, *self._args)
File "/usr/local/lib/python3.10/site-packages/ddtrace/contrib/aioredis/patch.py", line 146, in _finish_span
future.result()
asyncio.exceptions.CancelledError
```
### What is the result that you expected?
No errors
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ddtrace/contrib/aioredis/patch.py`
Content:
```
1 import asyncio
2 import sys
3
4 import aioredis
5
6 from ddtrace import config
7 from ddtrace.internal.utils.wrappers import unwrap as _u
8 from ddtrace.pin import Pin
9 from ddtrace.vendor.wrapt import wrap_function_wrapper as _w
10
11 from .. import trace_utils
12 from ...constants import ANALYTICS_SAMPLE_RATE_KEY
13 from ...constants import SPAN_MEASURED_KEY
14 from ...ext import SpanTypes
15 from ...ext import net
16 from ...ext import redis as redisx
17 from ..redis.util import _trace_redis_cmd
18 from ..redis.util import _trace_redis_execute_pipeline
19 from ..redis.util import format_command_args
20
21
22 try:
23 from aioredis.commands.transaction import _RedisBuffer
24 except ImportError:
25 _RedisBuffer = None
26
27 config._add("aioredis", dict(_default_service="redis"))
28
29 aioredis_version_str = getattr(aioredis, "__version__", "0.0.0")
30 aioredis_version = tuple([int(i) for i in aioredis_version_str.split(".")])
31
32
33 def patch():
34 if getattr(aioredis, "_datadog_patch", False):
35 return
36 setattr(aioredis, "_datadog_patch", True)
37 pin = Pin()
38 if aioredis_version >= (2, 0):
39 _w("aioredis.client", "Redis.execute_command", traced_execute_command)
40 _w("aioredis.client", "Redis.pipeline", traced_pipeline)
41 _w("aioredis.client", "Pipeline.execute", traced_execute_pipeline)
42 pin.onto(aioredis.client.Redis)
43 else:
44 _w("aioredis", "Redis.execute", traced_13_execute_command)
45 _w("aioredis", "Redis.pipeline", traced_13_pipeline)
46 _w("aioredis.commands.transaction", "Pipeline.execute", traced_13_execute_pipeline)
47 pin.onto(aioredis.Redis)
48
49
50 def unpatch():
51 if not getattr(aioredis, "_datadog_patch", False):
52 return
53
54 setattr(aioredis, "_datadog_patch", False)
55 if aioredis_version >= (2, 0):
56 _u(aioredis.client.Redis, "execute_command")
57 _u(aioredis.client.Redis, "pipeline")
58 _u(aioredis.client.Pipeline, "execute")
59 else:
60 _u(aioredis.Redis, "execute")
61 _u(aioredis.Redis, "pipeline")
62 _u(aioredis.commands.transaction.Pipeline, "execute")
63
64
65 async def traced_execute_command(func, instance, args, kwargs):
66 pin = Pin.get_from(instance)
67 if not pin or not pin.enabled():
68 return await func(*args, **kwargs)
69
70 with _trace_redis_cmd(pin, config.aioredis, instance, args):
71 return await func(*args, **kwargs)
72
73
74 def traced_pipeline(func, instance, args, kwargs):
75 pipeline = func(*args, **kwargs)
76 pin = Pin.get_from(instance)
77 if pin:
78 pin.onto(pipeline)
79 return pipeline
80
81
82 async def traced_execute_pipeline(func, instance, args, kwargs):
83 pin = Pin.get_from(instance)
84 if not pin or not pin.enabled():
85 return await func(*args, **kwargs)
86
87 cmds = [format_command_args(c) for c, _ in instance.command_stack]
88 resource = "\n".join(cmds)
89 with _trace_redis_execute_pipeline(pin, config.aioredis, resource, instance):
90 return await func(*args, **kwargs)
91
92
93 def traced_13_pipeline(func, instance, args, kwargs):
94 pipeline = func(*args, **kwargs)
95 pin = Pin.get_from(instance)
96 if pin:
97 pin.onto(pipeline)
98 return pipeline
99
100
101 def traced_13_execute_command(func, instance, args, kwargs):
102 # If we have a _RedisBuffer then we are in a pipeline
103 if isinstance(instance.connection, _RedisBuffer):
104 return func(*args, **kwargs)
105
106 pin = Pin.get_from(instance)
107 if not pin or not pin.enabled():
108 return func(*args, **kwargs)
109
110 # Don't activate the span since this operation is performed as a future which concludes sometime later on in
111 # execution so subsequent operations in the stack are not necessarily semantically related
112 # (we don't want this span to be the parent of all other spans created before the future is resolved)
113 parent = pin.tracer.current_span()
114 span = pin.tracer.start_span(
115 redisx.CMD,
116 service=trace_utils.ext_service(pin, config.aioredis),
117 span_type=SpanTypes.REDIS,
118 activate=False,
119 child_of=parent,
120 )
121
122 span.set_tag(SPAN_MEASURED_KEY)
123 query = format_command_args(args)
124 span.resource = query
125 span.set_tag(redisx.RAWCMD, query)
126 if pin.tags:
127 span.set_tags(pin.tags)
128
129 span.set_tags(
130 {
131 net.TARGET_HOST: instance.address[0],
132 net.TARGET_PORT: instance.address[1],
133 redisx.DB: instance.db or 0,
134 }
135 )
136 span.set_metric(redisx.ARGS_LEN, len(args))
137 # set analytics sample rate if enabled
138 span.set_tag(ANALYTICS_SAMPLE_RATE_KEY, config.aioredis.get_analytics_sample_rate())
139
140 def _finish_span(future):
141 try:
142 # Accessing the result will raise an exception if:
143 # - The future was cancelled
144 # - There was an error executing the future (`future.exception()`)
145 # - The future is in an invalid state
146 future.result()
147 except Exception:
148 span.set_exc_info(*sys.exc_info())
149 finally:
150 span.finish()
151
152 task = func(*args, **kwargs)
153 # Execute command returns a coroutine when no free connections are available
154 # https://github.com/aio-libs/aioredis-py/blob/v1.3.1/aioredis/pool.py#L191
155 task = asyncio.ensure_future(task)
156 task.add_done_callback(_finish_span)
157 return task
158
159
160 async def traced_13_execute_pipeline(func, instance, args, kwargs):
161 pin = Pin.get_from(instance)
162 if not pin or not pin.enabled():
163 return await func(*args, **kwargs)
164
165 cmds = []
166 for _, cmd, cmd_args, _ in instance._pipeline:
167 parts = [cmd]
168 parts.extend(cmd_args)
169 cmds.append(format_command_args(parts))
170 resource = "\n".join(cmds)
171 with pin.tracer.trace(
172 redisx.CMD,
173 resource=resource,
174 service=trace_utils.ext_service(pin, config.aioredis),
175 span_type=SpanTypes.REDIS,
176 ) as span:
177
178 span.set_tags(
179 {
180 net.TARGET_HOST: instance._pool_or_conn.address[0],
181 net.TARGET_PORT: instance._pool_or_conn.address[1],
182 redisx.DB: instance._pool_or_conn.db or 0,
183 }
184 )
185
186 span.set_tag(SPAN_MEASURED_KEY)
187 span.set_tag(redisx.RAWCMD, resource)
188 span.set_metric(redisx.PIPELINE_LEN, len(instance._pipeline))
189 # set analytics sample rate if enabled
190 span.set_tag(ANALYTICS_SAMPLE_RATE_KEY, config.aioredis.get_analytics_sample_rate())
191
192 return await func(*args, **kwargs)
193
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/ddtrace/contrib/aioredis/patch.py b/ddtrace/contrib/aioredis/patch.py
--- a/ddtrace/contrib/aioredis/patch.py
+++ b/ddtrace/contrib/aioredis/patch.py
@@ -140,11 +140,12 @@
def _finish_span(future):
try:
# Accessing the result will raise an exception if:
- # - The future was cancelled
+ # - The future was cancelled (CancelledError)
# - There was an error executing the future (`future.exception()`)
# - The future is in an invalid state
future.result()
- except Exception:
+ # CancelledError exceptions extend from BaseException as of Python 3.8, instead of usual Exception
+ except BaseException:
span.set_exc_info(*sys.exc_info())
finally:
span.finish()
|
{"golden_diff": "diff --git a/ddtrace/contrib/aioredis/patch.py b/ddtrace/contrib/aioredis/patch.py\n--- a/ddtrace/contrib/aioredis/patch.py\n+++ b/ddtrace/contrib/aioredis/patch.py\n@@ -140,11 +140,12 @@\n def _finish_span(future):\n try:\n # Accessing the result will raise an exception if:\n- # - The future was cancelled\n+ # - The future was cancelled (CancelledError)\n # - There was an error executing the future (`future.exception()`)\n # - The future is in an invalid state\n future.result()\n- except Exception:\n+ # CancelledError exceptions extend from BaseException as of Python 3.8, instead of usual Exception\n+ except BaseException:\n span.set_exc_info(*sys.exc_info())\n finally:\n span.finish()\n", "issue": "aioredis raises CancelledError in _finish_span \n### Which version of dd-trace-py are you using?\r\n\r\n~~0.53.0~~ 0.58.0\r\n\r\n### Which version of pip are you using?\r\n\r\n21.3.1\r\n\r\n### Which version of the libraries are you using?\r\n\r\ndjango==3.2.11\r\ndjango-redis==5.0.0\r\nchannels==3.0.4\r\ndaphne==3.0.2\r\n\r\n### How can we reproduce your problem?\r\n\r\nI am using code similar to the following:\r\n\r\nasgi.py\r\n\r\n```\r\nimport django\r\nfrom channels.routing import get_default_application\r\nfrom ddtrace.contrib.asgi import TraceMiddleware\r\n\r\ndjango.setup()\r\napplication = TraceMiddleware(get_default_application())\r\n```\r\n\r\nrouting.py\r\n\r\n```\r\nfrom django.urls import re_path\r\nimport my_app.consumers\r\n\r\nwebsocket_urlpatterns = [\r\n re_path(r\"^ws/test/$\", consumers.TestConsumer.as_asgi()),\r\n]\r\n```\r\n\r\nmy_app/consumers.py\r\n\r\n```\r\nfrom channels.generic.websocket import WebsocketConsumer\r\n\r\nclass TestConsumer(WebsocketConsumer):\r\n groups = [\"broadcast\"]\r\n\r\n def connect(self):\r\n self.accept()\r\n\r\n def receive(self, text_data=None, bytes_data=None):\r\n raise Exception(\"An test exception\")\r\n```\r\n\r\nI am running the application with: `ddtrace-run daphne asgi:application --bind 0.0.0.0 --port 8001`\r\n\r\n### What is the result that you get?\r\n\r\nI don't get any traces at all, and my logs show this:\r\n\r\n```\r\nhandle: <Handle traced_13_execute_command.<locals>._finish_span(<Future cancelled>) at /usr/local/lib/python3.10/site-packages/ddtrace/contrib/aioredis/patch.py:140>\r\n\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.10/asyncio/events.py\", line 80, in _run\r\n self._context.run(self._callback, *self._args)\r\n File \"/usr/local/lib/python3.10/site-packages/ddtrace/contrib/aioredis/patch.py\", line 146, in _finish_span\r\n future.result()\r\nasyncio.exceptions.CancelledError\r\n```\r\n\r\n\r\n### What is the result that you expected?\r\n\r\nNo errors\r\n\n", "before_files": [{"content": "import asyncio\nimport sys\n\nimport aioredis\n\nfrom ddtrace import config\nfrom ddtrace.internal.utils.wrappers import unwrap as _u\nfrom ddtrace.pin import Pin\nfrom ddtrace.vendor.wrapt import wrap_function_wrapper as _w\n\nfrom .. import trace_utils\nfrom ...constants import ANALYTICS_SAMPLE_RATE_KEY\nfrom ...constants import SPAN_MEASURED_KEY\nfrom ...ext import SpanTypes\nfrom ...ext import net\nfrom ...ext import redis as redisx\nfrom ..redis.util import _trace_redis_cmd\nfrom ..redis.util import _trace_redis_execute_pipeline\nfrom ..redis.util import format_command_args\n\n\ntry:\n from aioredis.commands.transaction import _RedisBuffer\nexcept ImportError:\n _RedisBuffer = None\n\nconfig._add(\"aioredis\", dict(_default_service=\"redis\"))\n\naioredis_version_str = getattr(aioredis, \"__version__\", \"0.0.0\")\naioredis_version = tuple([int(i) for i in aioredis_version_str.split(\".\")])\n\n\ndef patch():\n if getattr(aioredis, \"_datadog_patch\", False):\n return\n setattr(aioredis, \"_datadog_patch\", True)\n pin = Pin()\n if aioredis_version >= (2, 0):\n _w(\"aioredis.client\", \"Redis.execute_command\", traced_execute_command)\n _w(\"aioredis.client\", \"Redis.pipeline\", traced_pipeline)\n _w(\"aioredis.client\", \"Pipeline.execute\", traced_execute_pipeline)\n pin.onto(aioredis.client.Redis)\n else:\n _w(\"aioredis\", \"Redis.execute\", traced_13_execute_command)\n _w(\"aioredis\", \"Redis.pipeline\", traced_13_pipeline)\n _w(\"aioredis.commands.transaction\", \"Pipeline.execute\", traced_13_execute_pipeline)\n pin.onto(aioredis.Redis)\n\n\ndef unpatch():\n if not getattr(aioredis, \"_datadog_patch\", False):\n return\n\n setattr(aioredis, \"_datadog_patch\", False)\n if aioredis_version >= (2, 0):\n _u(aioredis.client.Redis, \"execute_command\")\n _u(aioredis.client.Redis, \"pipeline\")\n _u(aioredis.client.Pipeline, \"execute\")\n else:\n _u(aioredis.Redis, \"execute\")\n _u(aioredis.Redis, \"pipeline\")\n _u(aioredis.commands.transaction.Pipeline, \"execute\")\n\n\nasync def traced_execute_command(func, instance, args, kwargs):\n pin = Pin.get_from(instance)\n if not pin or not pin.enabled():\n return await func(*args, **kwargs)\n\n with _trace_redis_cmd(pin, config.aioredis, instance, args):\n return await func(*args, **kwargs)\n\n\ndef traced_pipeline(func, instance, args, kwargs):\n pipeline = func(*args, **kwargs)\n pin = Pin.get_from(instance)\n if pin:\n pin.onto(pipeline)\n return pipeline\n\n\nasync def traced_execute_pipeline(func, instance, args, kwargs):\n pin = Pin.get_from(instance)\n if not pin or not pin.enabled():\n return await func(*args, **kwargs)\n\n cmds = [format_command_args(c) for c, _ in instance.command_stack]\n resource = \"\\n\".join(cmds)\n with _trace_redis_execute_pipeline(pin, config.aioredis, resource, instance):\n return await func(*args, **kwargs)\n\n\ndef traced_13_pipeline(func, instance, args, kwargs):\n pipeline = func(*args, **kwargs)\n pin = Pin.get_from(instance)\n if pin:\n pin.onto(pipeline)\n return pipeline\n\n\ndef traced_13_execute_command(func, instance, args, kwargs):\n # If we have a _RedisBuffer then we are in a pipeline\n if isinstance(instance.connection, _RedisBuffer):\n return func(*args, **kwargs)\n\n pin = Pin.get_from(instance)\n if not pin or not pin.enabled():\n return func(*args, **kwargs)\n\n # Don't activate the span since this operation is performed as a future which concludes sometime later on in\n # execution so subsequent operations in the stack are not necessarily semantically related\n # (we don't want this span to be the parent of all other spans created before the future is resolved)\n parent = pin.tracer.current_span()\n span = pin.tracer.start_span(\n redisx.CMD,\n service=trace_utils.ext_service(pin, config.aioredis),\n span_type=SpanTypes.REDIS,\n activate=False,\n child_of=parent,\n )\n\n span.set_tag(SPAN_MEASURED_KEY)\n query = format_command_args(args)\n span.resource = query\n span.set_tag(redisx.RAWCMD, query)\n if pin.tags:\n span.set_tags(pin.tags)\n\n span.set_tags(\n {\n net.TARGET_HOST: instance.address[0],\n net.TARGET_PORT: instance.address[1],\n redisx.DB: instance.db or 0,\n }\n )\n span.set_metric(redisx.ARGS_LEN, len(args))\n # set analytics sample rate if enabled\n span.set_tag(ANALYTICS_SAMPLE_RATE_KEY, config.aioredis.get_analytics_sample_rate())\n\n def _finish_span(future):\n try:\n # Accessing the result will raise an exception if:\n # - The future was cancelled\n # - There was an error executing the future (`future.exception()`)\n # - The future is in an invalid state\n future.result()\n except Exception:\n span.set_exc_info(*sys.exc_info())\n finally:\n span.finish()\n\n task = func(*args, **kwargs)\n # Execute command returns a coroutine when no free connections are available\n # https://github.com/aio-libs/aioredis-py/blob/v1.3.1/aioredis/pool.py#L191\n task = asyncio.ensure_future(task)\n task.add_done_callback(_finish_span)\n return task\n\n\nasync def traced_13_execute_pipeline(func, instance, args, kwargs):\n pin = Pin.get_from(instance)\n if not pin or not pin.enabled():\n return await func(*args, **kwargs)\n\n cmds = []\n for _, cmd, cmd_args, _ in instance._pipeline:\n parts = [cmd]\n parts.extend(cmd_args)\n cmds.append(format_command_args(parts))\n resource = \"\\n\".join(cmds)\n with pin.tracer.trace(\n redisx.CMD,\n resource=resource,\n service=trace_utils.ext_service(pin, config.aioredis),\n span_type=SpanTypes.REDIS,\n ) as span:\n\n span.set_tags(\n {\n net.TARGET_HOST: instance._pool_or_conn.address[0],\n net.TARGET_PORT: instance._pool_or_conn.address[1],\n redisx.DB: instance._pool_or_conn.db or 0,\n }\n )\n\n span.set_tag(SPAN_MEASURED_KEY)\n span.set_tag(redisx.RAWCMD, resource)\n span.set_metric(redisx.PIPELINE_LEN, len(instance._pipeline))\n # set analytics sample rate if enabled\n span.set_tag(ANALYTICS_SAMPLE_RATE_KEY, config.aioredis.get_analytics_sample_rate())\n\n return await func(*args, **kwargs)\n", "path": "ddtrace/contrib/aioredis/patch.py"}], "after_files": [{"content": "import asyncio\nimport sys\n\nimport aioredis\n\nfrom ddtrace import config\nfrom ddtrace.internal.utils.wrappers import unwrap as _u\nfrom ddtrace.pin import Pin\nfrom ddtrace.vendor.wrapt import wrap_function_wrapper as _w\n\nfrom .. import trace_utils\nfrom ...constants import ANALYTICS_SAMPLE_RATE_KEY\nfrom ...constants import SPAN_MEASURED_KEY\nfrom ...ext import SpanTypes\nfrom ...ext import net\nfrom ...ext import redis as redisx\nfrom ..redis.util import _trace_redis_cmd\nfrom ..redis.util import _trace_redis_execute_pipeline\nfrom ..redis.util import format_command_args\n\n\ntry:\n from aioredis.commands.transaction import _RedisBuffer\nexcept ImportError:\n _RedisBuffer = None\n\nconfig._add(\"aioredis\", dict(_default_service=\"redis\"))\n\naioredis_version_str = getattr(aioredis, \"__version__\", \"0.0.0\")\naioredis_version = tuple([int(i) for i in aioredis_version_str.split(\".\")])\n\n\ndef patch():\n if getattr(aioredis, \"_datadog_patch\", False):\n return\n setattr(aioredis, \"_datadog_patch\", True)\n pin = Pin()\n if aioredis_version >= (2, 0):\n _w(\"aioredis.client\", \"Redis.execute_command\", traced_execute_command)\n _w(\"aioredis.client\", \"Redis.pipeline\", traced_pipeline)\n _w(\"aioredis.client\", \"Pipeline.execute\", traced_execute_pipeline)\n pin.onto(aioredis.client.Redis)\n else:\n _w(\"aioredis\", \"Redis.execute\", traced_13_execute_command)\n _w(\"aioredis\", \"Redis.pipeline\", traced_13_pipeline)\n _w(\"aioredis.commands.transaction\", \"Pipeline.execute\", traced_13_execute_pipeline)\n pin.onto(aioredis.Redis)\n\n\ndef unpatch():\n if not getattr(aioredis, \"_datadog_patch\", False):\n return\n\n setattr(aioredis, \"_datadog_patch\", False)\n if aioredis_version >= (2, 0):\n _u(aioredis.client.Redis, \"execute_command\")\n _u(aioredis.client.Redis, \"pipeline\")\n _u(aioredis.client.Pipeline, \"execute\")\n else:\n _u(aioredis.Redis, \"execute\")\n _u(aioredis.Redis, \"pipeline\")\n _u(aioredis.commands.transaction.Pipeline, \"execute\")\n\n\nasync def traced_execute_command(func, instance, args, kwargs):\n pin = Pin.get_from(instance)\n if not pin or not pin.enabled():\n return await func(*args, **kwargs)\n\n with _trace_redis_cmd(pin, config.aioredis, instance, args):\n return await func(*args, **kwargs)\n\n\ndef traced_pipeline(func, instance, args, kwargs):\n pipeline = func(*args, **kwargs)\n pin = Pin.get_from(instance)\n if pin:\n pin.onto(pipeline)\n return pipeline\n\n\nasync def traced_execute_pipeline(func, instance, args, kwargs):\n pin = Pin.get_from(instance)\n if not pin or not pin.enabled():\n return await func(*args, **kwargs)\n\n cmds = [format_command_args(c) for c, _ in instance.command_stack]\n resource = \"\\n\".join(cmds)\n with _trace_redis_execute_pipeline(pin, config.aioredis, resource, instance):\n return await func(*args, **kwargs)\n\n\ndef traced_13_pipeline(func, instance, args, kwargs):\n pipeline = func(*args, **kwargs)\n pin = Pin.get_from(instance)\n if pin:\n pin.onto(pipeline)\n return pipeline\n\n\ndef traced_13_execute_command(func, instance, args, kwargs):\n # If we have a _RedisBuffer then we are in a pipeline\n if isinstance(instance.connection, _RedisBuffer):\n return func(*args, **kwargs)\n\n pin = Pin.get_from(instance)\n if not pin or not pin.enabled():\n return func(*args, **kwargs)\n\n # Don't activate the span since this operation is performed as a future which concludes sometime later on in\n # execution so subsequent operations in the stack are not necessarily semantically related\n # (we don't want this span to be the parent of all other spans created before the future is resolved)\n parent = pin.tracer.current_span()\n span = pin.tracer.start_span(\n redisx.CMD,\n service=trace_utils.ext_service(pin, config.aioredis),\n span_type=SpanTypes.REDIS,\n activate=False,\n child_of=parent,\n )\n\n span.set_tag(SPAN_MEASURED_KEY)\n query = format_command_args(args)\n span.resource = query\n span.set_tag(redisx.RAWCMD, query)\n if pin.tags:\n span.set_tags(pin.tags)\n\n span.set_tags(\n {\n net.TARGET_HOST: instance.address[0],\n net.TARGET_PORT: instance.address[1],\n redisx.DB: instance.db or 0,\n }\n )\n span.set_metric(redisx.ARGS_LEN, len(args))\n # set analytics sample rate if enabled\n span.set_tag(ANALYTICS_SAMPLE_RATE_KEY, config.aioredis.get_analytics_sample_rate())\n\n def _finish_span(future):\n try:\n # Accessing the result will raise an exception if:\n # - The future was cancelled (CancelledError)\n # - There was an error executing the future (`future.exception()`)\n # - The future is in an invalid state\n future.result()\n # CancelledError exceptions extend from BaseException as of Python 3.8, instead of usual Exception\n except BaseException:\n span.set_exc_info(*sys.exc_info())\n finally:\n span.finish()\n\n task = func(*args, **kwargs)\n # Execute command returns a coroutine when no free connections are available\n # https://github.com/aio-libs/aioredis-py/blob/v1.3.1/aioredis/pool.py#L191\n task = asyncio.ensure_future(task)\n task.add_done_callback(_finish_span)\n return task\n\n\nasync def traced_13_execute_pipeline(func, instance, args, kwargs):\n pin = Pin.get_from(instance)\n if not pin or not pin.enabled():\n return await func(*args, **kwargs)\n\n cmds = []\n for _, cmd, cmd_args, _ in instance._pipeline:\n parts = [cmd]\n parts.extend(cmd_args)\n cmds.append(format_command_args(parts))\n resource = \"\\n\".join(cmds)\n with pin.tracer.trace(\n redisx.CMD,\n resource=resource,\n service=trace_utils.ext_service(pin, config.aioredis),\n span_type=SpanTypes.REDIS,\n ) as span:\n\n span.set_tags(\n {\n net.TARGET_HOST: instance._pool_or_conn.address[0],\n net.TARGET_PORT: instance._pool_or_conn.address[1],\n redisx.DB: instance._pool_or_conn.db or 0,\n }\n )\n\n span.set_tag(SPAN_MEASURED_KEY)\n span.set_tag(redisx.RAWCMD, resource)\n span.set_metric(redisx.PIPELINE_LEN, len(instance._pipeline))\n # set analytics sample rate if enabled\n span.set_tag(ANALYTICS_SAMPLE_RATE_KEY, config.aioredis.get_analytics_sample_rate())\n\n return await func(*args, **kwargs)\n", "path": "ddtrace/contrib/aioredis/patch.py"}]}
| 2,820 | 202 |
gh_patches_debug_1887
|
rasdani/github-patches
|
git_diff
|
spotify__luigi-2679
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Is there a reason python-dateutil is pinned to v2.7.5?
In this [commit](https://github.com/spotify/luigi/commit/ca0aa9afedecda539339e51974ef38cecf180d4b), I can see that python-dateutil has been pinned to version 2.7.5 - is this strictly necessary? Version 2.8.0 was released a couple of weeks ago and It's causing `ContextualVersionConflict` errors for us.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # Copyright (c) 2012 Spotify AB
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License"); you may not
4 # use this file except in compliance with the License. You may obtain a copy of
5 # the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12 # License for the specific language governing permissions and limitations under
13 # the License.
14
15 import os
16 import sys
17
18 from setuptools import setup
19
20
21 def get_static_files(path):
22 return [os.path.join(dirpath.replace("luigi/", ""), ext)
23 for (dirpath, dirnames, filenames) in os.walk(path)
24 for ext in ["*.html", "*.js", "*.css", "*.png",
25 "*.eot", "*.svg", "*.ttf", "*.woff", "*.woff2"]]
26
27
28 luigi_package_data = sum(map(get_static_files, ["luigi/static", "luigi/templates"]), [])
29
30 readme_note = """\
31 .. note::
32
33 For the latest source, discussion, etc, please visit the
34 `GitHub repository <https://github.com/spotify/luigi>`_\n\n
35 """
36
37 with open('README.rst') as fobj:
38 long_description = readme_note + fobj.read()
39
40 install_requires = [
41 'tornado>=4.0,<5',
42 # https://pagure.io/python-daemon/issue/18
43 'python-daemon<2.2.0',
44 'python-dateutil==2.7.5',
45 ]
46
47 # Note: To support older versions of setuptools, we're explicitly not
48 # using conditional syntax (i.e. 'enum34>1.1.0;python_version<"3.4"').
49 # This syntax is a problem for setuptools as recent as `20.1.1`,
50 # published Feb 16, 2016.
51 if sys.version_info[:2] < (3, 4):
52 install_requires.append('enum34>1.1.0')
53
54 if os.environ.get('READTHEDOCS', None) == 'True':
55 # So that we can build documentation for luigi.db_task_history and luigi.contrib.sqla
56 install_requires.append('sqlalchemy')
57 # readthedocs don't like python-daemon, see #1342
58 install_requires.remove('python-daemon<2.2.0')
59 install_requires.append('sphinx>=1.4.4') # Value mirrored in doc/conf.py
60
61 setup(
62 name='luigi',
63 version='2.8.3',
64 description='Workflow mgmgt + task scheduling + dependency resolution',
65 long_description=long_description,
66 author='The Luigi Authors',
67 url='https://github.com/spotify/luigi',
68 license='Apache License 2.0',
69 packages=[
70 'luigi',
71 'luigi.configuration',
72 'luigi.contrib',
73 'luigi.contrib.hdfs',
74 'luigi.tools'
75 ],
76 package_data={
77 'luigi': luigi_package_data
78 },
79 entry_points={
80 'console_scripts': [
81 'luigi = luigi.cmdline:luigi_run',
82 'luigid = luigi.cmdline:luigid',
83 'luigi-grep = luigi.tools.luigi_grep:main',
84 'luigi-deps = luigi.tools.deps:main',
85 'luigi-deps-tree = luigi.tools.deps_tree:main'
86 ]
87 },
88 install_requires=install_requires,
89 extras_require={
90 'toml': ['toml<2.0.0'],
91 },
92 classifiers=[
93 'Development Status :: 5 - Production/Stable',
94 'Environment :: Console',
95 'Environment :: Web Environment',
96 'Intended Audience :: Developers',
97 'Intended Audience :: System Administrators',
98 'License :: OSI Approved :: Apache Software License',
99 'Programming Language :: Python :: 2.7',
100 'Programming Language :: Python :: 3.3',
101 'Programming Language :: Python :: 3.4',
102 'Programming Language :: Python :: 3.5',
103 'Programming Language :: Python :: 3.6',
104 'Programming Language :: Python :: 3.7',
105 'Topic :: System :: Monitoring',
106 ],
107 )
108
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -41,7 +41,7 @@
'tornado>=4.0,<5',
# https://pagure.io/python-daemon/issue/18
'python-daemon<2.2.0',
- 'python-dateutil==2.7.5',
+ 'python-dateutil>=2.7.5,<3',
]
# Note: To support older versions of setuptools, we're explicitly not
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -41,7 +41,7 @@\n 'tornado>=4.0,<5',\n # https://pagure.io/python-daemon/issue/18\n 'python-daemon<2.2.0',\n- 'python-dateutil==2.7.5',\n+ 'python-dateutil>=2.7.5,<3',\n ]\n \n # Note: To support older versions of setuptools, we're explicitly not\n", "issue": "Is there a reason python-dateutil is pinned to v2.7.5?\nIn this [commit](https://github.com/spotify/luigi/commit/ca0aa9afedecda539339e51974ef38cecf180d4b), I can see that python-dateutil has been pinned to version 2.7.5 - is this strictly necessary? Version 2.8.0 was released a couple of weeks ago and It's causing `ContextualVersionConflict` errors for us.\r\n\r\n\n", "before_files": [{"content": "# Copyright (c) 2012 Spotify AB\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may not\n# use this file except in compliance with the License. You may obtain a copy of\n# the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations under\n# the License.\n\nimport os\nimport sys\n\nfrom setuptools import setup\n\n\ndef get_static_files(path):\n return [os.path.join(dirpath.replace(\"luigi/\", \"\"), ext)\n for (dirpath, dirnames, filenames) in os.walk(path)\n for ext in [\"*.html\", \"*.js\", \"*.css\", \"*.png\",\n \"*.eot\", \"*.svg\", \"*.ttf\", \"*.woff\", \"*.woff2\"]]\n\n\nluigi_package_data = sum(map(get_static_files, [\"luigi/static\", \"luigi/templates\"]), [])\n\nreadme_note = \"\"\"\\\n.. note::\n\n For the latest source, discussion, etc, please visit the\n `GitHub repository <https://github.com/spotify/luigi>`_\\n\\n\n\"\"\"\n\nwith open('README.rst') as fobj:\n long_description = readme_note + fobj.read()\n\ninstall_requires = [\n 'tornado>=4.0,<5',\n # https://pagure.io/python-daemon/issue/18\n 'python-daemon<2.2.0',\n 'python-dateutil==2.7.5',\n]\n\n# Note: To support older versions of setuptools, we're explicitly not\n# using conditional syntax (i.e. 'enum34>1.1.0;python_version<\"3.4\"').\n# This syntax is a problem for setuptools as recent as `20.1.1`,\n# published Feb 16, 2016.\nif sys.version_info[:2] < (3, 4):\n install_requires.append('enum34>1.1.0')\n\nif os.environ.get('READTHEDOCS', None) == 'True':\n # So that we can build documentation for luigi.db_task_history and luigi.contrib.sqla\n install_requires.append('sqlalchemy')\n # readthedocs don't like python-daemon, see #1342\n install_requires.remove('python-daemon<2.2.0')\n install_requires.append('sphinx>=1.4.4') # Value mirrored in doc/conf.py\n\nsetup(\n name='luigi',\n version='2.8.3',\n description='Workflow mgmgt + task scheduling + dependency resolution',\n long_description=long_description,\n author='The Luigi Authors',\n url='https://github.com/spotify/luigi',\n license='Apache License 2.0',\n packages=[\n 'luigi',\n 'luigi.configuration',\n 'luigi.contrib',\n 'luigi.contrib.hdfs',\n 'luigi.tools'\n ],\n package_data={\n 'luigi': luigi_package_data\n },\n entry_points={\n 'console_scripts': [\n 'luigi = luigi.cmdline:luigi_run',\n 'luigid = luigi.cmdline:luigid',\n 'luigi-grep = luigi.tools.luigi_grep:main',\n 'luigi-deps = luigi.tools.deps:main',\n 'luigi-deps-tree = luigi.tools.deps_tree:main'\n ]\n },\n install_requires=install_requires,\n extras_require={\n 'toml': ['toml<2.0.0'],\n },\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: System :: Monitoring',\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "# Copyright (c) 2012 Spotify AB\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may not\n# use this file except in compliance with the License. You may obtain a copy of\n# the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations under\n# the License.\n\nimport os\nimport sys\n\nfrom setuptools import setup\n\n\ndef get_static_files(path):\n return [os.path.join(dirpath.replace(\"luigi/\", \"\"), ext)\n for (dirpath, dirnames, filenames) in os.walk(path)\n for ext in [\"*.html\", \"*.js\", \"*.css\", \"*.png\",\n \"*.eot\", \"*.svg\", \"*.ttf\", \"*.woff\", \"*.woff2\"]]\n\n\nluigi_package_data = sum(map(get_static_files, [\"luigi/static\", \"luigi/templates\"]), [])\n\nreadme_note = \"\"\"\\\n.. note::\n\n For the latest source, discussion, etc, please visit the\n `GitHub repository <https://github.com/spotify/luigi>`_\\n\\n\n\"\"\"\n\nwith open('README.rst') as fobj:\n long_description = readme_note + fobj.read()\n\ninstall_requires = [\n 'tornado>=4.0,<5',\n # https://pagure.io/python-daemon/issue/18\n 'python-daemon<2.2.0',\n 'python-dateutil>=2.7.5,<3',\n]\n\n# Note: To support older versions of setuptools, we're explicitly not\n# using conditional syntax (i.e. 'enum34>1.1.0;python_version<\"3.4\"').\n# This syntax is a problem for setuptools as recent as `20.1.1`,\n# published Feb 16, 2016.\nif sys.version_info[:2] < (3, 4):\n install_requires.append('enum34>1.1.0')\n\nif os.environ.get('READTHEDOCS', None) == 'True':\n # So that we can build documentation for luigi.db_task_history and luigi.contrib.sqla\n install_requires.append('sqlalchemy')\n # readthedocs don't like python-daemon, see #1342\n install_requires.remove('python-daemon<2.2.0')\n install_requires.append('sphinx>=1.4.4') # Value mirrored in doc/conf.py\n\nsetup(\n name='luigi',\n version='2.8.3',\n description='Workflow mgmgt + task scheduling + dependency resolution',\n long_description=long_description,\n author='The Luigi Authors',\n url='https://github.com/spotify/luigi',\n license='Apache License 2.0',\n packages=[\n 'luigi',\n 'luigi.configuration',\n 'luigi.contrib',\n 'luigi.contrib.hdfs',\n 'luigi.tools'\n ],\n package_data={\n 'luigi': luigi_package_data\n },\n entry_points={\n 'console_scripts': [\n 'luigi = luigi.cmdline:luigi_run',\n 'luigid = luigi.cmdline:luigid',\n 'luigi-grep = luigi.tools.luigi_grep:main',\n 'luigi-deps = luigi.tools.deps:main',\n 'luigi-deps-tree = luigi.tools.deps_tree:main'\n ]\n },\n install_requires=install_requires,\n extras_require={\n 'toml': ['toml<2.0.0'],\n },\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: System :: Monitoring',\n ],\n)\n", "path": "setup.py"}]}
| 1,555 | 116 |
gh_patches_debug_24795
|
rasdani/github-patches
|
git_diff
|
CiviWiki__OpenCiviWiki-1381
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
add django-debug-toolbar
## Task
- [ ] install `django-debug-toolbar` with the command `poetry add django-debug-toolbar --group dev`
- [ ] follow the [remaining installation instructions](https://django-debug-toolbar.readthedocs.io/en/latest/installation.html)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `project/core/urls.py`
Content:
```
1 """civiwiki URL Configuration
2
3 The `urlpatterns` list routes URLs to views. For more information please see:
4 https://docs.djangoproject.com/en/3.2/topics/http/urls/
5 Examples:
6 Function views
7 1. Add an import: from my_app import views
8 2. Add a URL to urlpatterns: path('', views.home, name='home')
9 Class-based views
10 1. Add an import: from other_app.views import Home
11 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
12 Including another URLconf
13 1. Import the include() function: from django.urls import include, path
14 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
15 """
16
17 from django.conf import settings
18 from django.conf.urls.static import static
19 from django.contrib import admin
20 from django.urls import include, path, re_path
21 from django.views.generic.base import RedirectView
22 from django.views.static import serve
23
24 from core.router import CiviWikiRouter
25
26
27 urlpatterns = [
28 path("admin/", admin.site.urls),
29 path("api/v1/", include(CiviWikiRouter.urls)),
30 path("api/", include("accounts.urls.api")),
31 path("api/", include("threads.urls.api")),
32 path("", include("accounts.urls.urls")),
33 path("", include("threads.urls.urls")),
34 path(
35 "inbox/notifications/",
36 include("notifications.urls", namespace="notifications"),
37 ),
38 path("favicon.ico", RedirectView.as_view(url="/static/favicon/favicon.ico")),
39 path(
40 "favicon-32x32.png",
41 RedirectView.as_view(url="/static/favicon/favicon-32x32.png"),
42 ),
43 path(
44 "apple-touch-icon.png",
45 RedirectView.as_view(url="/static/favicon/apple-touch-icon.png"),
46 ),
47 path(
48 "mstile-144x144.png",
49 RedirectView.as_view(url="/static/favicon/mstile-144x144.png"),
50 ),
51 re_path(r"^media/(?P<path>.*)$", serve, {"document_root": settings.MEDIA_ROOT}),
52 ]
53
54 urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
55 urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
56
```
Path: `project/core/settings.py`
Content:
```
1 """
2 Django settings for civiwiki project.
3 Darius Calliet May 12, 2016
4
5 Production settings file to select proper environment variables.
6 """
7 import os
8
9 # False if not in os.environ
10 DEBUG = os.getenv("DEBUG", False)
11
12 # defaults to second value if not found in os.environ
13 DJANGO_HOST = os.getenv("DJANGO_HOST", "LOCALHOST")
14
15 BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
16 SECRET_KEY = os.getenv("DJANGO_SECRET_KEY", "TEST_KEY_FOR_DEVELOPMENT")
17 ALLOWED_HOSTS = [".herokuapp.com", ".civiwiki.org", "127.0.0.1", "localhost", "0.0.0.0"]
18
19 INSTALLED_APPS = (
20 "django.contrib.admin",
21 "django.contrib.auth",
22 "django.contrib.contenttypes",
23 "django.contrib.sessions",
24 "django.contrib.messages",
25 "django.contrib.staticfiles",
26 "django_extensions",
27 "storages",
28 "core",
29 "rest_framework",
30 "accounts.apps.AccountsConfig",
31 "threads",
32 "notifications",
33 "corsheaders",
34 "taggit",
35 "categories",
36 "notification",
37 )
38
39 MIDDLEWARE = [
40 "corsheaders.middleware.CorsMiddleware",
41 "django.middleware.security.SecurityMiddleware",
42 "whitenoise.middleware.WhiteNoiseMiddleware",
43 "django.contrib.sessions.middleware.SessionMiddleware",
44 "django.middleware.common.CommonMiddleware",
45 "django.middleware.csrf.CsrfViewMiddleware",
46 "django.contrib.auth.middleware.AuthenticationMiddleware",
47 # 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
48 "django.contrib.messages.middleware.MessageMiddleware",
49 "django.middleware.clickjacking.XFrameOptionsMiddleware",
50 ]
51
52 CSRF_USE_SESSIONS = (
53 True # Store the CSRF token in the users session instead of in a cookie
54 )
55
56 CORS_ORIGIN_ALLOW_ALL = True
57 ROOT_URLCONF = "core.urls"
58
59 # SSL Setup
60 if DJANGO_HOST != "LOCALHOST":
61 SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
62 SECURE_SSL_REDIRECT = True
63 SESSION_COOKIE_SECURE = True
64 CSRF_COOKIE_SECURE = True
65
66 # Internationalization & Localization
67 LANGUAGE_CODE = "en-us"
68 TIME_ZONE = "UTC"
69 USE_I18N = True
70 USE_L10N = True
71 USE_TZ = True
72
73 TEMPLATES = [
74 {
75 "BACKEND": "django.template.backends.django.DjangoTemplates",
76 "DIRS": [
77 os.path.join(BASE_DIR, "threads/templates/threads"),
78 os.path.join(BASE_DIR, "accounts/templates/accounts"),
79 ], # TODO: Add non-webapp template directory
80 "APP_DIRS": True,
81 "OPTIONS": {
82 "context_processors": [
83 "django.template.context_processors.debug",
84 "django.template.context_processors.request",
85 "django.contrib.auth.context_processors.auth",
86 "django.contrib.messages.context_processors.messages",
87 ],
88 },
89 },
90 ]
91
92 WSGI_APPLICATION = "core.wsgi.application"
93
94 # Apex Contact for Production Errors
95 ADMINS = [("Development Team", "[email protected]")]
96
97 STATIC_URL = "/static/"
98 STATICFILES_DIRS = (os.path.join(BASE_DIR, "core/templates/static"),)
99 STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
100
101 MEDIA_ROOT = os.path.join(BASE_DIR, "media")
102 MEDIA_URL = "/media/"
103
104 # TODO: re-organize and simplify staticfiles settings
105 if "CIVIWIKI_LOCAL_NAME" not in os.environ:
106 STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
107
108 # Use DATABASE_URL in production
109 DATABASE_URL = os.getenv("DATABASE_URL")
110
111 if DATABASE_URL is not None:
112 DATABASES = {"default": DATABASE_URL}
113 else:
114 # Default to sqlite for simplicity in development
115 DATABASES = {
116 "default": {
117 "ENGINE": "django.db.backends.sqlite3",
118 "NAME": BASE_DIR + "/" + "db.sqlite3",
119 }
120 }
121
122 # Email Backend Setup
123 if "EMAIL_HOST" not in os.environ:
124 EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
125 EMAIL_HOST_USER = "[email protected]"
126 else:
127 EMAIL_BACKEND = "django.core.mail.backends.smtp.EmailBackend"
128 EMAIL_HOST = os.getenv("EMAIL_HOST")
129 EMAIL_PORT = os.getenv("EMAIL_PORT")
130 EMAIL_HOST_USER = os.getenv("EMAIL_HOST_USER")
131 EMAIL_HOST_PASSWORD = os.getenv("EMAIL_HOST_PASSWORD")
132 EMAIL_USE_SSL = True
133 DEFAULT_FROM_EMAIL = EMAIL_HOST
134
135 # Notification API Settings
136 NOTIFICATIONS_SOFT_DELETE = True
137 NOTIFICATIONS_USE_JSONFIELD = True
138
139 # Django REST API Settings
140 DEFAULT_RENDERER_CLASSES = ("rest_framework.renderers.JSONRenderer",)
141
142 if DEBUG:
143 # Browsable HTML - Enabled only in Debug mode (dev)
144 DEFAULT_RENDERER_CLASSES = DEFAULT_RENDERER_CLASSES + (
145 "rest_framework.renderers.BrowsableAPIRenderer",
146 )
147
148 REST_FRAMEWORK = {
149 "DEFAULT_PERMISSION_CLASSES": ("rest_framework.permissions.IsAuthenticated",),
150 "DEFAULT_RENDERER_CLASSES": DEFAULT_RENDERER_CLASSES,
151 "DEFAULT_AUTHENTICATION_CLASSES": (
152 "rest_framework.authentication.BasicAuthentication",
153 "rest_framework.authentication.SessionAuthentication",
154 ),
155 }
156
157 # CORS Settings
158 CORS_ORIGIN_ALLOW_ALL = True
159
160 # Custom User model
161 AUTH_USER_MODEL = "accounts.User"
162
163 DEFAULT_AUTO_FIELD = "django.db.models.AutoField"
164
165 # Login Logout URLS
166 LOGIN_URL = "login/"
167 LOGIN_REDIRECT_URL = "/"
168 LOGOUT_REDIRECT_URL = "/"
169
170 AUTH_PASSWORD_VALIDATORS = [
171 {
172 "NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator", # noqa: E501
173 },
174 {
175 "NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
176 "OPTIONS": {
177 "min_length": 4,
178 },
179 },
180 {
181 "NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
182 },
183 {
184 "NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
185 },
186 ]
187
188 LOGGING = {
189 "version": 1,
190 "disable_existing_loggers": False,
191 "formatters": {"rich": {"datefmt": "[%X]"}},
192 "handlers": {
193 "console": {
194 "class": "rich.logging.RichHandler",
195 "formatter": "rich",
196 "level": "WARNING",
197 # "filters": ["require_debug_true"],
198 "rich_tracebacks": True,
199 "tracebacks_show_locals": True,
200 }
201 },
202 "loggers": {"django": {"handlers": ["console"]}},
203 }
204
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/project/core/settings.py b/project/core/settings.py
--- a/project/core/settings.py
+++ b/project/core/settings.py
@@ -34,9 +34,11 @@
"taggit",
"categories",
"notification",
+ "debug_toolbar",
)
MIDDLEWARE = [
+ "debug_toolbar.middleware.DebugToolbarMiddleware",
"corsheaders.middleware.CorsMiddleware",
"django.middleware.security.SecurityMiddleware",
"whitenoise.middleware.WhiteNoiseMiddleware",
@@ -49,6 +51,10 @@
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
+INTERNAL_IPS = [
+ "127.0.0.1",
+]
+
CSRF_USE_SESSIONS = (
True # Store the CSRF token in the users session instead of in a cookie
)
diff --git a/project/core/urls.py b/project/core/urls.py
--- a/project/core/urls.py
+++ b/project/core/urls.py
@@ -49,6 +49,7 @@
RedirectView.as_view(url="/static/favicon/mstile-144x144.png"),
),
re_path(r"^media/(?P<path>.*)$", serve, {"document_root": settings.MEDIA_ROOT}),
+ path("__debug__/", include("debug_toolbar.urls")),
]
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
|
{"golden_diff": "diff --git a/project/core/settings.py b/project/core/settings.py\n--- a/project/core/settings.py\n+++ b/project/core/settings.py\n@@ -34,9 +34,11 @@\n \"taggit\",\n \"categories\",\n \"notification\",\n+ \"debug_toolbar\",\n )\n \n MIDDLEWARE = [\n+ \"debug_toolbar.middleware.DebugToolbarMiddleware\",\n \"corsheaders.middleware.CorsMiddleware\",\n \"django.middleware.security.SecurityMiddleware\",\n \"whitenoise.middleware.WhiteNoiseMiddleware\",\n@@ -49,6 +51,10 @@\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n ]\n \n+INTERNAL_IPS = [\n+ \"127.0.0.1\",\n+]\n+\n CSRF_USE_SESSIONS = (\n True # Store the CSRF token in the users session instead of in a cookie\n )\ndiff --git a/project/core/urls.py b/project/core/urls.py\n--- a/project/core/urls.py\n+++ b/project/core/urls.py\n@@ -49,6 +49,7 @@\n RedirectView.as_view(url=\"/static/favicon/mstile-144x144.png\"),\n ),\n re_path(r\"^media/(?P<path>.*)$\", serve, {\"document_root\": settings.MEDIA_ROOT}),\n+ path(\"__debug__/\", include(\"debug_toolbar.urls\")),\n ]\n \n urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\n", "issue": "add django-debug-toolbar\n## Task\r\n\r\n- [ ] install `django-debug-toolbar` with the command `poetry add django-debug-toolbar --group dev`\r\n- [ ] follow the [remaining installation instructions](https://django-debug-toolbar.readthedocs.io/en/latest/installation.html)\n", "before_files": [{"content": "\"\"\"civiwiki URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\n\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.contrib import admin\nfrom django.urls import include, path, re_path\nfrom django.views.generic.base import RedirectView\nfrom django.views.static import serve\n\nfrom core.router import CiviWikiRouter\n\n\nurlpatterns = [\n path(\"admin/\", admin.site.urls),\n path(\"api/v1/\", include(CiviWikiRouter.urls)),\n path(\"api/\", include(\"accounts.urls.api\")),\n path(\"api/\", include(\"threads.urls.api\")),\n path(\"\", include(\"accounts.urls.urls\")),\n path(\"\", include(\"threads.urls.urls\")),\n path(\n \"inbox/notifications/\",\n include(\"notifications.urls\", namespace=\"notifications\"),\n ),\n path(\"favicon.ico\", RedirectView.as_view(url=\"/static/favicon/favicon.ico\")),\n path(\n \"favicon-32x32.png\",\n RedirectView.as_view(url=\"/static/favicon/favicon-32x32.png\"),\n ),\n path(\n \"apple-touch-icon.png\",\n RedirectView.as_view(url=\"/static/favicon/apple-touch-icon.png\"),\n ),\n path(\n \"mstile-144x144.png\",\n RedirectView.as_view(url=\"/static/favicon/mstile-144x144.png\"),\n ),\n re_path(r\"^media/(?P<path>.*)$\", serve, {\"document_root\": settings.MEDIA_ROOT}),\n]\n\nurlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\nurlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n", "path": "project/core/urls.py"}, {"content": "\"\"\"\nDjango settings for civiwiki project.\nDarius Calliet May 12, 2016\n\nProduction settings file to select proper environment variables.\n\"\"\"\nimport os\n\n# False if not in os.environ\nDEBUG = os.getenv(\"DEBUG\", False)\n\n# defaults to second value if not found in os.environ\nDJANGO_HOST = os.getenv(\"DJANGO_HOST\", \"LOCALHOST\")\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nSECRET_KEY = os.getenv(\"DJANGO_SECRET_KEY\", \"TEST_KEY_FOR_DEVELOPMENT\")\nALLOWED_HOSTS = [\".herokuapp.com\", \".civiwiki.org\", \"127.0.0.1\", \"localhost\", \"0.0.0.0\"]\n\nINSTALLED_APPS = (\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"django_extensions\",\n \"storages\",\n \"core\",\n \"rest_framework\",\n \"accounts.apps.AccountsConfig\",\n \"threads\",\n \"notifications\",\n \"corsheaders\",\n \"taggit\",\n \"categories\",\n \"notification\",\n)\n\nMIDDLEWARE = [\n \"corsheaders.middleware.CorsMiddleware\",\n \"django.middleware.security.SecurityMiddleware\",\n \"whitenoise.middleware.WhiteNoiseMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n # 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n]\n\nCSRF_USE_SESSIONS = (\n True # Store the CSRF token in the users session instead of in a cookie\n)\n\nCORS_ORIGIN_ALLOW_ALL = True\nROOT_URLCONF = \"core.urls\"\n\n# SSL Setup\nif DJANGO_HOST != \"LOCALHOST\":\n SECURE_PROXY_SSL_HEADER = (\"HTTP_X_FORWARDED_PROTO\", \"https\")\n SECURE_SSL_REDIRECT = True\n SESSION_COOKIE_SECURE = True\n CSRF_COOKIE_SECURE = True\n\n# Internationalization & Localization\nLANGUAGE_CODE = \"en-us\"\nTIME_ZONE = \"UTC\"\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [\n os.path.join(BASE_DIR, \"threads/templates/threads\"),\n os.path.join(BASE_DIR, \"accounts/templates/accounts\"),\n ], # TODO: Add non-webapp template directory\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n ],\n },\n },\n]\n\nWSGI_APPLICATION = \"core.wsgi.application\"\n\n# Apex Contact for Production Errors\nADMINS = [(\"Development Team\", \"[email protected]\")]\n\nSTATIC_URL = \"/static/\"\nSTATICFILES_DIRS = (os.path.join(BASE_DIR, \"core/templates/static\"),)\nSTATIC_ROOT = os.path.join(BASE_DIR, \"staticfiles\")\n\nMEDIA_ROOT = os.path.join(BASE_DIR, \"media\")\nMEDIA_URL = \"/media/\"\n\n# TODO: re-organize and simplify staticfiles settings\nif \"CIVIWIKI_LOCAL_NAME\" not in os.environ:\n STATICFILES_STORAGE = \"whitenoise.storage.CompressedManifestStaticFilesStorage\"\n\n# Use DATABASE_URL in production\nDATABASE_URL = os.getenv(\"DATABASE_URL\")\n\nif DATABASE_URL is not None:\n DATABASES = {\"default\": DATABASE_URL}\nelse:\n # Default to sqlite for simplicity in development\n DATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.sqlite3\",\n \"NAME\": BASE_DIR + \"/\" + \"db.sqlite3\",\n }\n }\n\n# Email Backend Setup\nif \"EMAIL_HOST\" not in os.environ:\n EMAIL_BACKEND = \"django.core.mail.backends.console.EmailBackend\"\n EMAIL_HOST_USER = \"[email protected]\"\nelse:\n EMAIL_BACKEND = \"django.core.mail.backends.smtp.EmailBackend\"\n EMAIL_HOST = os.getenv(\"EMAIL_HOST\")\n EMAIL_PORT = os.getenv(\"EMAIL_PORT\")\n EMAIL_HOST_USER = os.getenv(\"EMAIL_HOST_USER\")\n EMAIL_HOST_PASSWORD = os.getenv(\"EMAIL_HOST_PASSWORD\")\n EMAIL_USE_SSL = True\n DEFAULT_FROM_EMAIL = EMAIL_HOST\n\n# Notification API Settings\nNOTIFICATIONS_SOFT_DELETE = True\nNOTIFICATIONS_USE_JSONFIELD = True\n\n# Django REST API Settings\nDEFAULT_RENDERER_CLASSES = (\"rest_framework.renderers.JSONRenderer\",)\n\nif DEBUG:\n # Browsable HTML - Enabled only in Debug mode (dev)\n DEFAULT_RENDERER_CLASSES = DEFAULT_RENDERER_CLASSES + (\n \"rest_framework.renderers.BrowsableAPIRenderer\",\n )\n\nREST_FRAMEWORK = {\n \"DEFAULT_PERMISSION_CLASSES\": (\"rest_framework.permissions.IsAuthenticated\",),\n \"DEFAULT_RENDERER_CLASSES\": DEFAULT_RENDERER_CLASSES,\n \"DEFAULT_AUTHENTICATION_CLASSES\": (\n \"rest_framework.authentication.BasicAuthentication\",\n \"rest_framework.authentication.SessionAuthentication\",\n ),\n}\n\n# CORS Settings\nCORS_ORIGIN_ALLOW_ALL = True\n\n# Custom User model\nAUTH_USER_MODEL = \"accounts.User\"\n\nDEFAULT_AUTO_FIELD = \"django.db.models.AutoField\"\n\n# Login Logout URLS\nLOGIN_URL = \"login/\"\nLOGIN_REDIRECT_URL = \"/\"\nLOGOUT_REDIRECT_URL = \"/\"\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\", # noqa: E501\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n \"OPTIONS\": {\n \"min_length\": 4,\n },\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n]\n\nLOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"formatters\": {\"rich\": {\"datefmt\": \"[%X]\"}},\n \"handlers\": {\n \"console\": {\n \"class\": \"rich.logging.RichHandler\",\n \"formatter\": \"rich\",\n \"level\": \"WARNING\",\n # \"filters\": [\"require_debug_true\"],\n \"rich_tracebacks\": True,\n \"tracebacks_show_locals\": True,\n }\n },\n \"loggers\": {\"django\": {\"handlers\": [\"console\"]}},\n}\n", "path": "project/core/settings.py"}], "after_files": [{"content": "\"\"\"civiwiki URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\n\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.contrib import admin\nfrom django.urls import include, path, re_path\nfrom django.views.generic.base import RedirectView\nfrom django.views.static import serve\n\nfrom core.router import CiviWikiRouter\n\n\nurlpatterns = [\n path(\"admin/\", admin.site.urls),\n path(\"api/v1/\", include(CiviWikiRouter.urls)),\n path(\"api/\", include(\"accounts.urls.api\")),\n path(\"api/\", include(\"threads.urls.api\")),\n path(\"\", include(\"accounts.urls.urls\")),\n path(\"\", include(\"threads.urls.urls\")),\n path(\n \"inbox/notifications/\",\n include(\"notifications.urls\", namespace=\"notifications\"),\n ),\n path(\"favicon.ico\", RedirectView.as_view(url=\"/static/favicon/favicon.ico\")),\n path(\n \"favicon-32x32.png\",\n RedirectView.as_view(url=\"/static/favicon/favicon-32x32.png\"),\n ),\n path(\n \"apple-touch-icon.png\",\n RedirectView.as_view(url=\"/static/favicon/apple-touch-icon.png\"),\n ),\n path(\n \"mstile-144x144.png\",\n RedirectView.as_view(url=\"/static/favicon/mstile-144x144.png\"),\n ),\n re_path(r\"^media/(?P<path>.*)$\", serve, {\"document_root\": settings.MEDIA_ROOT}),\n path(\"__debug__/\", include(\"debug_toolbar.urls\")),\n]\n\nurlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\nurlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n", "path": "project/core/urls.py"}, {"content": "\"\"\"\nDjango settings for civiwiki project.\nDarius Calliet May 12, 2016\n\nProduction settings file to select proper environment variables.\n\"\"\"\nimport os\n\n# False if not in os.environ\nDEBUG = os.getenv(\"DEBUG\", False)\n\n# defaults to second value if not found in os.environ\nDJANGO_HOST = os.getenv(\"DJANGO_HOST\", \"LOCALHOST\")\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nSECRET_KEY = os.getenv(\"DJANGO_SECRET_KEY\", \"TEST_KEY_FOR_DEVELOPMENT\")\nALLOWED_HOSTS = [\".herokuapp.com\", \".civiwiki.org\", \"127.0.0.1\", \"localhost\", \"0.0.0.0\"]\n\nINSTALLED_APPS = (\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"django_extensions\",\n \"storages\",\n \"core\",\n \"rest_framework\",\n \"accounts.apps.AccountsConfig\",\n \"threads\",\n \"notifications\",\n \"corsheaders\",\n \"taggit\",\n \"categories\",\n \"notification\",\n \"debug_toolbar\",\n)\n\nMIDDLEWARE = [\n \"debug_toolbar.middleware.DebugToolbarMiddleware\",\n \"corsheaders.middleware.CorsMiddleware\",\n \"django.middleware.security.SecurityMiddleware\",\n \"whitenoise.middleware.WhiteNoiseMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n # 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n]\n\nINTERNAL_IPS = [\n \"127.0.0.1\",\n]\n\nCSRF_USE_SESSIONS = (\n True # Store the CSRF token in the users session instead of in a cookie\n)\n\nCORS_ORIGIN_ALLOW_ALL = True\nROOT_URLCONF = \"core.urls\"\n\n# SSL Setup\nif DJANGO_HOST != \"LOCALHOST\":\n SECURE_PROXY_SSL_HEADER = (\"HTTP_X_FORWARDED_PROTO\", \"https\")\n SECURE_SSL_REDIRECT = True\n SESSION_COOKIE_SECURE = True\n CSRF_COOKIE_SECURE = True\n\n# Internationalization & Localization\nLANGUAGE_CODE = \"en-us\"\nTIME_ZONE = \"UTC\"\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [\n os.path.join(BASE_DIR, \"threads/templates/threads\"),\n os.path.join(BASE_DIR, \"accounts/templates/accounts\"),\n ], # TODO: Add non-webapp template directory\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n ],\n },\n },\n]\n\nWSGI_APPLICATION = \"core.wsgi.application\"\n\n# Apex Contact for Production Errors\nADMINS = [(\"Development Team\", \"[email protected]\")]\n\nSTATIC_URL = \"/static/\"\nSTATICFILES_DIRS = (os.path.join(BASE_DIR, \"core/templates/static\"),)\nSTATIC_ROOT = os.path.join(BASE_DIR, \"staticfiles\")\n\nMEDIA_ROOT = os.path.join(BASE_DIR, \"media\")\nMEDIA_URL = \"/media/\"\n\n# TODO: re-organize and simplify staticfiles settings\nif \"CIVIWIKI_LOCAL_NAME\" not in os.environ:\n STATICFILES_STORAGE = \"whitenoise.storage.CompressedManifestStaticFilesStorage\"\n\n# Use DATABASE_URL in production\nDATABASE_URL = os.getenv(\"DATABASE_URL\")\n\nif DATABASE_URL is not None:\n DATABASES = {\"default\": DATABASE_URL}\nelse:\n # Default to sqlite for simplicity in development\n DATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.sqlite3\",\n \"NAME\": BASE_DIR + \"/\" + \"db.sqlite3\",\n }\n }\n\n# Email Backend Setup\nif \"EMAIL_HOST\" not in os.environ:\n EMAIL_BACKEND = \"django.core.mail.backends.console.EmailBackend\"\n EMAIL_HOST_USER = \"[email protected]\"\nelse:\n EMAIL_BACKEND = \"django.core.mail.backends.smtp.EmailBackend\"\n EMAIL_HOST = os.getenv(\"EMAIL_HOST\")\n EMAIL_PORT = os.getenv(\"EMAIL_PORT\")\n EMAIL_HOST_USER = os.getenv(\"EMAIL_HOST_USER\")\n EMAIL_HOST_PASSWORD = os.getenv(\"EMAIL_HOST_PASSWORD\")\n EMAIL_USE_SSL = True\n DEFAULT_FROM_EMAIL = EMAIL_HOST\n\n# Notification API Settings\nNOTIFICATIONS_SOFT_DELETE = True\nNOTIFICATIONS_USE_JSONFIELD = True\n\n# Django REST API Settings\nDEFAULT_RENDERER_CLASSES = (\"rest_framework.renderers.JSONRenderer\",)\n\nif DEBUG:\n # Browsable HTML - Enabled only in Debug mode (dev)\n DEFAULT_RENDERER_CLASSES = DEFAULT_RENDERER_CLASSES + (\n \"rest_framework.renderers.BrowsableAPIRenderer\",\n )\n\nREST_FRAMEWORK = {\n \"DEFAULT_PERMISSION_CLASSES\": (\"rest_framework.permissions.IsAuthenticated\",),\n \"DEFAULT_RENDERER_CLASSES\": DEFAULT_RENDERER_CLASSES,\n \"DEFAULT_AUTHENTICATION_CLASSES\": (\n \"rest_framework.authentication.BasicAuthentication\",\n \"rest_framework.authentication.SessionAuthentication\",\n ),\n}\n\n# CORS Settings\nCORS_ORIGIN_ALLOW_ALL = True\n\n# Custom User model\nAUTH_USER_MODEL = \"accounts.User\"\n\nDEFAULT_AUTO_FIELD = \"django.db.models.AutoField\"\n\n# Login Logout URLS\nLOGIN_URL = \"login/\"\nLOGIN_REDIRECT_URL = \"/\"\nLOGOUT_REDIRECT_URL = \"/\"\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\", # noqa: E501\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n \"OPTIONS\": {\n \"min_length\": 4,\n },\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n]\n\nLOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"formatters\": {\"rich\": {\"datefmt\": \"[%X]\"}},\n \"handlers\": {\n \"console\": {\n \"class\": \"rich.logging.RichHandler\",\n \"formatter\": \"rich\",\n \"level\": \"WARNING\",\n # \"filters\": [\"require_debug_true\"],\n \"rich_tracebacks\": True,\n \"tracebacks_show_locals\": True,\n }\n },\n \"loggers\": {\"django\": {\"handlers\": [\"console\"]}},\n}\n", "path": "project/core/settings.py"}]}
| 2,808 | 298 |
gh_patches_debug_31048
|
rasdani/github-patches
|
git_diff
|
Cog-Creators__Red-DiscordBot-1193
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
AttributeError: 'MissingRequiredArgument' object has no attribute 'original'
https://sentry.io/will-tekulve/bot-development/issues/418646879/
```
AttributeError: 'MissingRequiredArgument' object has no attribute 'original'
File "redbot/core/events.py", line 187, in on_command_error
exc_info=error.original)
Exception in on_on_command_error
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `redbot/core/events.py`
Content:
```
1 import sys
2 import codecs
3 import datetime
4 import logging
5 import pkg_resources
6 import traceback
7 from pkg_resources import DistributionNotFound
8
9
10 import discord
11 from discord.ext import commands
12
13 from . import __version__
14 from .data_manager import storage_type
15 from .utils.chat_formatting import inline, bordered
16 from .rpc import initialize
17 from colorama import Fore, Style, init
18
19 log = logging.getLogger("red")
20 sentry_log = logging.getLogger("red.sentry")
21 init()
22
23 INTRO = """
24 ______ _ ______ _ _ ______ _
25 | ___ \ | | | _ (_) | | | ___ \ | |
26 | |_/ /___ __| | ______ | | | |_ ___ ___ ___ _ __ __| | | |_/ / ___ | |_
27 | // _ \/ _` | |______| | | | | / __|/ __/ _ \| '__/ _` | | ___ \/ _ \| __|
28 | |\ \ __/ (_| | | |/ /| \__ \ (_| (_) | | | (_| | | |_/ / (_) | |_
29 \_| \_\___|\__,_| |___/ |_|___/\___\___/|_| \__,_| \____/ \___/ \__|
30 """
31
32
33 def init_events(bot, cli_flags):
34
35 @bot.event
36 async def on_connect():
37 if bot.uptime is None:
38 print("Connected to Discord. Getting ready...")
39
40 @bot.event
41 async def on_ready():
42 if bot.uptime is not None:
43 return
44
45 bot.uptime = datetime.datetime.utcnow()
46
47 if cli_flags.no_cogs is False:
48 print("Loading packages...")
49 failed = []
50 packages = await bot.db.packages()
51
52 for package in packages:
53 try:
54 spec = await bot.cog_mgr.find_cog(package)
55 bot.load_extension(spec)
56 except Exception as e:
57 log.exception("Failed to load package {}".format(package),
58 exc_info=e)
59 await bot.remove_loaded_package(package)
60 if packages:
61 print("Loaded packages: " + ", ".join(packages))
62
63 guilds = len(bot.guilds)
64 users = len(set([m for m in bot.get_all_members()]))
65
66 try:
67 data = await bot.application_info()
68 invite_url = discord.utils.oauth_url(data.id)
69 except:
70 if bot.user.bot:
71 invite_url = "Could not fetch invite url"
72 else:
73 invite_url = None
74
75 prefixes = await bot.db.prefix()
76 lang = await bot.db.locale()
77 red_version = __version__
78 red_pkg = pkg_resources.get_distribution("Red-DiscordBot")
79 dpy_version = discord.__version__
80
81 INFO = [str(bot.user), "Prefixes: {}".format(', '.join(prefixes)),
82 'Language: {}'.format(lang),
83 "Red Bot Version: {}".format(red_version),
84 "Discord.py Version: {}".format(dpy_version),
85 "Shards: {}".format(bot.shard_count)]
86
87 if guilds:
88 INFO.extend(("Servers: {}".format(guilds), "Users: {}".format(users)))
89 else:
90 print("Ready. I'm not in any server yet!")
91
92 INFO.append('{} cogs with {} commands'.format(len(bot.cogs), len(bot.commands)))
93
94 INFO2 = []
95
96 sentry = await bot.db.enable_sentry()
97 mongo_enabled = storage_type() != "JSON"
98 reqs_installed = {
99 "voice": None,
100 "docs": None,
101 "test": None
102 }
103 for key in reqs_installed.keys():
104 reqs = [x.name for x in red_pkg._dep_map[key]]
105 try:
106 pkg_resources.require(reqs)
107 except DistributionNotFound:
108 reqs_installed[key] = False
109 else:
110 reqs_installed[key] = True
111
112 options = (
113 ("Error Reporting", sentry),
114 ("MongoDB", mongo_enabled),
115 ("Voice", reqs_installed["voice"]),
116 ("Docs", reqs_installed["docs"]),
117 ("Tests", reqs_installed["test"])
118 )
119
120 on_symbol, off_symbol = _get_settings_symbols()
121
122 for option, enabled in options:
123 enabled = on_symbol if enabled else off_symbol
124 INFO2.append("{} {}".format(enabled, option))
125
126 print(Fore.RED + INTRO)
127 print(Style.RESET_ALL)
128 print(bordered(INFO, INFO2))
129
130 if invite_url:
131 print("\nInvite URL: {}\n".format(invite_url))
132
133 if bot.rpc_enabled:
134 await initialize(bot)
135
136 @bot.event
137 async def on_error(event_method, *args, **kwargs):
138 sentry_log.exception("Exception in on_{}".format(event_method))
139
140 @bot.event
141 async def on_command_error(ctx, error):
142 if isinstance(error, commands.MissingRequiredArgument):
143 await ctx.send_help()
144 elif isinstance(error, commands.BadArgument):
145 await ctx.send_help()
146 elif isinstance(error, commands.DisabledCommand):
147 await ctx.send("That command is disabled.")
148 elif isinstance(error, commands.CommandInvokeError):
149 # Need to test if the following still works
150 """
151 no_dms = "Cannot send messages to this user"
152 is_help_cmd = ctx.command.qualified_name == "help"
153 is_forbidden = isinstance(error.original, discord.Forbidden)
154 if is_help_cmd and is_forbidden and error.original.text == no_dms:
155 msg = ("I couldn't send the help message to you in DM. Either"
156 " you blocked me or you disabled DMs in this server.")
157 await ctx.send(msg)
158 return
159 """
160 log.exception("Exception in command '{}'"
161 "".format(ctx.command.qualified_name),
162 exc_info=error.original)
163 message = ("Error in command '{}'. Check your console or "
164 "logs for details."
165 "".format(ctx.command.qualified_name))
166 exception_log = ("Exception in command '{}'\n"
167 "".format(ctx.command.qualified_name))
168 exception_log += "".join(traceback.format_exception(type(error),
169 error, error.__traceback__))
170 bot._last_exception = exception_log
171 await ctx.send(inline(message))
172 elif isinstance(error, commands.CommandNotFound):
173 pass
174 elif isinstance(error, commands.CheckFailure):
175 await ctx.send("⛔ You are not authorized to issue that command.")
176 elif isinstance(error, commands.NoPrivateMessage):
177 await ctx.send("That command is not available in DMs.")
178 elif isinstance(error, commands.CommandOnCooldown):
179 await ctx.send("This command is on cooldown. "
180 "Try again in {:.2f}s"
181 "".format(error.retry_after))
182 else:
183 log.exception(type(error).__name__, exc_info=error)
184
185 sentry_log.exception("Exception in command '{}'"
186 "".format(ctx.command.qualified_name),
187 exc_info=error.original)
188
189 @bot.event
190 async def on_message(message):
191 bot.counter["messages_read"] += 1
192 await bot.process_commands(message)
193
194 @bot.event
195 async def on_resumed():
196 bot.counter["sessions_resumed"] += 1
197
198 @bot.event
199 async def on_command(command):
200 bot.counter["processed_commands"] += 1
201
202 def _get_settings_symbols():
203 """Get symbols for displaying settings on stdout.
204
205 This is so we don't get encoding errors when trying to print unicode
206 emojis to stdout (particularly with Windows Command Prompt).
207 """
208 encoder = codecs.getencoder(sys.stdout.encoding)
209 check_mark = "\N{SQUARE ROOT}"
210 try:
211 encoder(check_mark)
212 except UnicodeEncodeError:
213 on_symbol = "[X]"
214 off_symbol = "[ ]"
215 else:
216 on_symbol = check_mark
217 off_symbol = "X"
218
219 return on_symbol, off_symbol
220
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/redbot/core/events.py b/redbot/core/events.py
--- a/redbot/core/events.py
+++ b/redbot/core/events.py
@@ -135,7 +135,7 @@
@bot.event
async def on_error(event_method, *args, **kwargs):
- sentry_log.exception("Exception in on_{}".format(event_method))
+ sentry_log.exception("Exception in {}".format(event_method))
@bot.event
async def on_command_error(ctx, error):
@@ -160,6 +160,10 @@
log.exception("Exception in command '{}'"
"".format(ctx.command.qualified_name),
exc_info=error.original)
+ sentry_log.exception("Exception in command '{}'"
+ "".format(ctx.command.qualified_name),
+ exc_info=error.original)
+
message = ("Error in command '{}'. Check your console or "
"logs for details."
"".format(ctx.command.qualified_name))
@@ -181,10 +185,13 @@
"".format(error.retry_after))
else:
log.exception(type(error).__name__, exc_info=error)
+ try:
+ sentry_error = error.original
+ except AttributeError:
+ sentry_error = error
- sentry_log.exception("Exception in command '{}'"
- "".format(ctx.command.qualified_name),
- exc_info=error.original)
+ sentry_log.exception("Unhandled command error.",
+ exc_info=sentry_error)
@bot.event
async def on_message(message):
|
{"golden_diff": "diff --git a/redbot/core/events.py b/redbot/core/events.py\n--- a/redbot/core/events.py\n+++ b/redbot/core/events.py\n@@ -135,7 +135,7 @@\n \n @bot.event\n async def on_error(event_method, *args, **kwargs):\n- sentry_log.exception(\"Exception in on_{}\".format(event_method))\n+ sentry_log.exception(\"Exception in {}\".format(event_method))\n \n @bot.event\n async def on_command_error(ctx, error):\n@@ -160,6 +160,10 @@\n log.exception(\"Exception in command '{}'\"\n \"\".format(ctx.command.qualified_name),\n exc_info=error.original)\n+ sentry_log.exception(\"Exception in command '{}'\"\n+ \"\".format(ctx.command.qualified_name),\n+ exc_info=error.original)\n+\n message = (\"Error in command '{}'. Check your console or \"\n \"logs for details.\"\n \"\".format(ctx.command.qualified_name))\n@@ -181,10 +185,13 @@\n \"\".format(error.retry_after))\n else:\n log.exception(type(error).__name__, exc_info=error)\n+ try:\n+ sentry_error = error.original\n+ except AttributeError:\n+ sentry_error = error\n \n- sentry_log.exception(\"Exception in command '{}'\"\n- \"\".format(ctx.command.qualified_name),\n- exc_info=error.original)\n+ sentry_log.exception(\"Unhandled command error.\",\n+ exc_info=sentry_error)\n \n @bot.event\n async def on_message(message):\n", "issue": "AttributeError: 'MissingRequiredArgument' object has no attribute 'original'\nhttps://sentry.io/will-tekulve/bot-development/issues/418646879/\n\n```\nAttributeError: 'MissingRequiredArgument' object has no attribute 'original'\n File \"redbot/core/events.py\", line 187, in on_command_error\n exc_info=error.original)\n\nException in on_on_command_error\n```\n", "before_files": [{"content": "import sys\nimport codecs\nimport datetime\nimport logging\nimport pkg_resources\nimport traceback\nfrom pkg_resources import DistributionNotFound\n\n\nimport discord\nfrom discord.ext import commands\n\nfrom . import __version__\nfrom .data_manager import storage_type\nfrom .utils.chat_formatting import inline, bordered\nfrom .rpc import initialize\nfrom colorama import Fore, Style, init\n\nlog = logging.getLogger(\"red\")\nsentry_log = logging.getLogger(\"red.sentry\")\ninit()\n\nINTRO = \"\"\"\n______ _ ______ _ _ ______ _ \n| ___ \\ | | | _ (_) | | | ___ \\ | | \n| |_/ /___ __| | ______ | | | |_ ___ ___ ___ _ __ __| | | |_/ / ___ | |_ \n| // _ \\/ _` | |______| | | | | / __|/ __/ _ \\| '__/ _` | | ___ \\/ _ \\| __|\n| |\\ \\ __/ (_| | | |/ /| \\__ \\ (_| (_) | | | (_| | | |_/ / (_) | |_ \n\\_| \\_\\___|\\__,_| |___/ |_|___/\\___\\___/|_| \\__,_| \\____/ \\___/ \\__|\n\"\"\"\n\n\ndef init_events(bot, cli_flags):\n\n @bot.event\n async def on_connect():\n if bot.uptime is None:\n print(\"Connected to Discord. Getting ready...\")\n\n @bot.event\n async def on_ready():\n if bot.uptime is not None:\n return\n\n bot.uptime = datetime.datetime.utcnow()\n\n if cli_flags.no_cogs is False:\n print(\"Loading packages...\")\n failed = []\n packages = await bot.db.packages()\n\n for package in packages:\n try:\n spec = await bot.cog_mgr.find_cog(package)\n bot.load_extension(spec)\n except Exception as e:\n log.exception(\"Failed to load package {}\".format(package),\n exc_info=e)\n await bot.remove_loaded_package(package)\n if packages:\n print(\"Loaded packages: \" + \", \".join(packages))\n\n guilds = len(bot.guilds)\n users = len(set([m for m in bot.get_all_members()]))\n\n try:\n data = await bot.application_info()\n invite_url = discord.utils.oauth_url(data.id)\n except:\n if bot.user.bot:\n invite_url = \"Could not fetch invite url\"\n else:\n invite_url = None\n\n prefixes = await bot.db.prefix()\n lang = await bot.db.locale()\n red_version = __version__\n red_pkg = pkg_resources.get_distribution(\"Red-DiscordBot\")\n dpy_version = discord.__version__\n\n INFO = [str(bot.user), \"Prefixes: {}\".format(', '.join(prefixes)),\n 'Language: {}'.format(lang),\n \"Red Bot Version: {}\".format(red_version),\n \"Discord.py Version: {}\".format(dpy_version),\n \"Shards: {}\".format(bot.shard_count)]\n\n if guilds:\n INFO.extend((\"Servers: {}\".format(guilds), \"Users: {}\".format(users)))\n else:\n print(\"Ready. I'm not in any server yet!\")\n\n INFO.append('{} cogs with {} commands'.format(len(bot.cogs), len(bot.commands)))\n\n INFO2 = []\n\n sentry = await bot.db.enable_sentry()\n mongo_enabled = storage_type() != \"JSON\"\n reqs_installed = {\n \"voice\": None,\n \"docs\": None,\n \"test\": None\n }\n for key in reqs_installed.keys():\n reqs = [x.name for x in red_pkg._dep_map[key]]\n try:\n pkg_resources.require(reqs)\n except DistributionNotFound:\n reqs_installed[key] = False\n else:\n reqs_installed[key] = True\n\n options = (\n (\"Error Reporting\", sentry),\n (\"MongoDB\", mongo_enabled),\n (\"Voice\", reqs_installed[\"voice\"]),\n (\"Docs\", reqs_installed[\"docs\"]),\n (\"Tests\", reqs_installed[\"test\"])\n )\n\n on_symbol, off_symbol = _get_settings_symbols()\n\n for option, enabled in options:\n enabled = on_symbol if enabled else off_symbol\n INFO2.append(\"{} {}\".format(enabled, option))\n\n print(Fore.RED + INTRO)\n print(Style.RESET_ALL)\n print(bordered(INFO, INFO2))\n\n if invite_url:\n print(\"\\nInvite URL: {}\\n\".format(invite_url))\n\n if bot.rpc_enabled:\n await initialize(bot)\n\n @bot.event\n async def on_error(event_method, *args, **kwargs):\n sentry_log.exception(\"Exception in on_{}\".format(event_method))\n\n @bot.event\n async def on_command_error(ctx, error):\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send_help()\n elif isinstance(error, commands.BadArgument):\n await ctx.send_help()\n elif isinstance(error, commands.DisabledCommand):\n await ctx.send(\"That command is disabled.\")\n elif isinstance(error, commands.CommandInvokeError):\n # Need to test if the following still works\n \"\"\"\n no_dms = \"Cannot send messages to this user\"\n is_help_cmd = ctx.command.qualified_name == \"help\"\n is_forbidden = isinstance(error.original, discord.Forbidden)\n if is_help_cmd and is_forbidden and error.original.text == no_dms:\n msg = (\"I couldn't send the help message to you in DM. Either\"\n \" you blocked me or you disabled DMs in this server.\")\n await ctx.send(msg)\n return\n \"\"\"\n log.exception(\"Exception in command '{}'\"\n \"\".format(ctx.command.qualified_name),\n exc_info=error.original)\n message = (\"Error in command '{}'. Check your console or \"\n \"logs for details.\"\n \"\".format(ctx.command.qualified_name))\n exception_log = (\"Exception in command '{}'\\n\"\n \"\".format(ctx.command.qualified_name))\n exception_log += \"\".join(traceback.format_exception(type(error),\n error, error.__traceback__))\n bot._last_exception = exception_log\n await ctx.send(inline(message))\n elif isinstance(error, commands.CommandNotFound):\n pass\n elif isinstance(error, commands.CheckFailure):\n await ctx.send(\"\u26d4 You are not authorized to issue that command.\")\n elif isinstance(error, commands.NoPrivateMessage):\n await ctx.send(\"That command is not available in DMs.\")\n elif isinstance(error, commands.CommandOnCooldown):\n await ctx.send(\"This command is on cooldown. \"\n \"Try again in {:.2f}s\"\n \"\".format(error.retry_after))\n else:\n log.exception(type(error).__name__, exc_info=error)\n\n sentry_log.exception(\"Exception in command '{}'\"\n \"\".format(ctx.command.qualified_name),\n exc_info=error.original)\n\n @bot.event\n async def on_message(message):\n bot.counter[\"messages_read\"] += 1\n await bot.process_commands(message)\n\n @bot.event\n async def on_resumed():\n bot.counter[\"sessions_resumed\"] += 1\n\n @bot.event\n async def on_command(command):\n bot.counter[\"processed_commands\"] += 1\n\ndef _get_settings_symbols():\n \"\"\"Get symbols for displaying settings on stdout.\n\n This is so we don't get encoding errors when trying to print unicode\n emojis to stdout (particularly with Windows Command Prompt).\n \"\"\"\n encoder = codecs.getencoder(sys.stdout.encoding)\n check_mark = \"\\N{SQUARE ROOT}\"\n try:\n encoder(check_mark)\n except UnicodeEncodeError:\n on_symbol = \"[X]\"\n off_symbol = \"[ ]\"\n else:\n on_symbol = check_mark\n off_symbol = \"X\"\n\n return on_symbol, off_symbol\n", "path": "redbot/core/events.py"}], "after_files": [{"content": "import sys\nimport codecs\nimport datetime\nimport logging\nimport pkg_resources\nimport traceback\nfrom pkg_resources import DistributionNotFound\n\n\nimport discord\nfrom discord.ext import commands\n\nfrom . import __version__\nfrom .data_manager import storage_type\nfrom .utils.chat_formatting import inline, bordered\nfrom .rpc import initialize\nfrom colorama import Fore, Style, init\n\nlog = logging.getLogger(\"red\")\nsentry_log = logging.getLogger(\"red.sentry\")\ninit()\n\nINTRO = \"\"\"\n______ _ ______ _ _ ______ _ \n| ___ \\ | | | _ (_) | | | ___ \\ | | \n| |_/ /___ __| | ______ | | | |_ ___ ___ ___ _ __ __| | | |_/ / ___ | |_ \n| // _ \\/ _` | |______| | | | | / __|/ __/ _ \\| '__/ _` | | ___ \\/ _ \\| __|\n| |\\ \\ __/ (_| | | |/ /| \\__ \\ (_| (_) | | | (_| | | |_/ / (_) | |_ \n\\_| \\_\\___|\\__,_| |___/ |_|___/\\___\\___/|_| \\__,_| \\____/ \\___/ \\__|\n\"\"\"\n\n\ndef init_events(bot, cli_flags):\n\n @bot.event\n async def on_connect():\n if bot.uptime is None:\n print(\"Connected to Discord. Getting ready...\")\n\n @bot.event\n async def on_ready():\n if bot.uptime is not None:\n return\n\n bot.uptime = datetime.datetime.utcnow()\n\n if cli_flags.no_cogs is False:\n print(\"Loading packages...\")\n failed = []\n packages = await bot.db.packages()\n\n for package in packages:\n try:\n spec = await bot.cog_mgr.find_cog(package)\n bot.load_extension(spec)\n except Exception as e:\n log.exception(\"Failed to load package {}\".format(package),\n exc_info=e)\n await bot.remove_loaded_package(package)\n if packages:\n print(\"Loaded packages: \" + \", \".join(packages))\n\n guilds = len(bot.guilds)\n users = len(set([m for m in bot.get_all_members()]))\n\n try:\n data = await bot.application_info()\n invite_url = discord.utils.oauth_url(data.id)\n except:\n if bot.user.bot:\n invite_url = \"Could not fetch invite url\"\n else:\n invite_url = None\n\n prefixes = await bot.db.prefix()\n lang = await bot.db.locale()\n red_version = __version__\n red_pkg = pkg_resources.get_distribution(\"Red-DiscordBot\")\n dpy_version = discord.__version__\n\n INFO = [str(bot.user), \"Prefixes: {}\".format(', '.join(prefixes)),\n 'Language: {}'.format(lang),\n \"Red Bot Version: {}\".format(red_version),\n \"Discord.py Version: {}\".format(dpy_version),\n \"Shards: {}\".format(bot.shard_count)]\n\n if guilds:\n INFO.extend((\"Servers: {}\".format(guilds), \"Users: {}\".format(users)))\n else:\n print(\"Ready. I'm not in any server yet!\")\n\n INFO.append('{} cogs with {} commands'.format(len(bot.cogs), len(bot.commands)))\n\n INFO2 = []\n\n sentry = await bot.db.enable_sentry()\n mongo_enabled = storage_type() != \"JSON\"\n reqs_installed = {\n \"voice\": None,\n \"docs\": None,\n \"test\": None\n }\n for key in reqs_installed.keys():\n reqs = [x.name for x in red_pkg._dep_map[key]]\n try:\n pkg_resources.require(reqs)\n except DistributionNotFound:\n reqs_installed[key] = False\n else:\n reqs_installed[key] = True\n\n options = (\n (\"Error Reporting\", sentry),\n (\"MongoDB\", mongo_enabled),\n (\"Voice\", reqs_installed[\"voice\"]),\n (\"Docs\", reqs_installed[\"docs\"]),\n (\"Tests\", reqs_installed[\"test\"])\n )\n\n on_symbol, off_symbol = _get_settings_symbols()\n\n for option, enabled in options:\n enabled = on_symbol if enabled else off_symbol\n INFO2.append(\"{} {}\".format(enabled, option))\n\n print(Fore.RED + INTRO)\n print(Style.RESET_ALL)\n print(bordered(INFO, INFO2))\n\n if invite_url:\n print(\"\\nInvite URL: {}\\n\".format(invite_url))\n\n if bot.rpc_enabled:\n await initialize(bot)\n\n @bot.event\n async def on_error(event_method, *args, **kwargs):\n sentry_log.exception(\"Exception in {}\".format(event_method))\n\n @bot.event\n async def on_command_error(ctx, error):\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send_help()\n elif isinstance(error, commands.BadArgument):\n await ctx.send_help()\n elif isinstance(error, commands.DisabledCommand):\n await ctx.send(\"That command is disabled.\")\n elif isinstance(error, commands.CommandInvokeError):\n # Need to test if the following still works\n \"\"\"\n no_dms = \"Cannot send messages to this user\"\n is_help_cmd = ctx.command.qualified_name == \"help\"\n is_forbidden = isinstance(error.original, discord.Forbidden)\n if is_help_cmd and is_forbidden and error.original.text == no_dms:\n msg = (\"I couldn't send the help message to you in DM. Either\"\n \" you blocked me or you disabled DMs in this server.\")\n await ctx.send(msg)\n return\n \"\"\"\n log.exception(\"Exception in command '{}'\"\n \"\".format(ctx.command.qualified_name),\n exc_info=error.original)\n sentry_log.exception(\"Exception in command '{}'\"\n \"\".format(ctx.command.qualified_name),\n exc_info=error.original)\n\n message = (\"Error in command '{}'. Check your console or \"\n \"logs for details.\"\n \"\".format(ctx.command.qualified_name))\n exception_log = (\"Exception in command '{}'\\n\"\n \"\".format(ctx.command.qualified_name))\n exception_log += \"\".join(traceback.format_exception(type(error),\n error, error.__traceback__))\n bot._last_exception = exception_log\n await ctx.send(inline(message))\n elif isinstance(error, commands.CommandNotFound):\n pass\n elif isinstance(error, commands.CheckFailure):\n await ctx.send(\"\u26d4 You are not authorized to issue that command.\")\n elif isinstance(error, commands.NoPrivateMessage):\n await ctx.send(\"That command is not available in DMs.\")\n elif isinstance(error, commands.CommandOnCooldown):\n await ctx.send(\"This command is on cooldown. \"\n \"Try again in {:.2f}s\"\n \"\".format(error.retry_after))\n else:\n log.exception(type(error).__name__, exc_info=error)\n try:\n sentry_error = error.original\n except AttributeError:\n sentry_error = error\n\n sentry_log.exception(\"Unhandled command error.\",\n exc_info=sentry_error)\n\n @bot.event\n async def on_message(message):\n bot.counter[\"messages_read\"] += 1\n await bot.process_commands(message)\n\n @bot.event\n async def on_resumed():\n bot.counter[\"sessions_resumed\"] += 1\n\n @bot.event\n async def on_command(command):\n bot.counter[\"processed_commands\"] += 1\n\ndef _get_settings_symbols():\n \"\"\"Get symbols for displaying settings on stdout.\n\n This is so we don't get encoding errors when trying to print unicode\n emojis to stdout (particularly with Windows Command Prompt).\n \"\"\"\n encoder = codecs.getencoder(sys.stdout.encoding)\n check_mark = \"\\N{SQUARE ROOT}\"\n try:\n encoder(check_mark)\n except UnicodeEncodeError:\n on_symbol = \"[X]\"\n off_symbol = \"[ ]\"\n else:\n on_symbol = check_mark\n off_symbol = \"X\"\n\n return on_symbol, off_symbol\n", "path": "redbot/core/events.py"}]}
| 2,575 | 339 |
gh_patches_debug_19906
|
rasdani/github-patches
|
git_diff
|
mitmproxy__mitmproxy-4246
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
v4 --replacements vs v5 --modify-headers
I'm trying to replace the `User-Agent` request header if it contains a certain string.
This works with "mitmproxy-4.0.4-linux":
```
./mitmproxy --replacements ":~hq User-Agent:Mozilla(.+):CUSTOMAGENT"
```
With "mitmproxy-5.2-linux", this at least replaces the `User-Agent`, but is missing my "certain string condition":
```
./mitmproxy --modify-headers "|~hq .+|User-Agent|CUSTOMAGENT"
```
How do I add my `Mozilla` condition in v5?
None of these work:
```
./mitmproxy --modify-headers "|~hq ^(.*?)Mozilla(.*?)$|User-Agent|CUSTOMAGENT"
./mitmproxy --modify-headers "/~hq .*?Mozilla.*?/User-Agent/CUSTOMAGENT"
./mitmproxy --modify-headers "|~hq Mozilla|User-Agent|CUSTOMAGENT"
./mitmproxy --modify-headers "|~hq User-Agent: Mozilla|User-Agent|CUSTOMAGENT"
./mitmproxy --modify-headers "|~hq \"^(.*?)Mozilla(.*?)$\"|User-Agent|CUSTOMAGENT"
```
I've been trying for hours, and I feel like I've tried every variation under the sun. There's a very small chance it's a bug, but most likely I'm just doing it wrong. If it matters, this system is Ubuntu 16.04.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mitmproxy/addons/modifyheaders.py`
Content:
```
1 import re
2 import typing
3 from pathlib import Path
4
5 from mitmproxy import ctx, exceptions, flowfilter, http
6 from mitmproxy.net.http import Headers
7 from mitmproxy.utils import strutils
8 from mitmproxy.utils.spec import parse_spec
9
10
11 class ModifySpec(typing.NamedTuple):
12 matches: flowfilter.TFilter
13 subject: bytes
14 replacement_str: str
15
16 def read_replacement(self) -> bytes:
17 """
18 Process the replacement str. This usually just involves converting it to bytes.
19 However, if it starts with `@`, we interpret the rest as a file path to read from.
20
21 Raises:
22 - IOError if the file cannot be read.
23 """
24 if self.replacement_str.startswith("@"):
25 return Path(self.replacement_str[1:]).expanduser().read_bytes()
26 else:
27 # We could cache this at some point, but unlikely to be a problem.
28 return strutils.escaped_str_to_bytes(self.replacement_str)
29
30
31 def parse_modify_spec(option: str, subject_is_regex: bool) -> ModifySpec:
32 flow_filter, subject_str, replacement = parse_spec(option)
33
34 subject = strutils.escaped_str_to_bytes(subject_str)
35 if subject_is_regex:
36 try:
37 re.compile(subject)
38 except re.error as e:
39 raise ValueError(f"Invalid regular expression {subject!r} ({e})")
40
41 spec = ModifySpec(flow_filter, subject, replacement)
42
43 try:
44 spec.read_replacement()
45 except OSError as e:
46 raise ValueError(f"Invalid file path: {replacement[1:]} ({e})")
47
48 return spec
49
50
51 class ModifyHeaders:
52 def __init__(self):
53 self.replacements: typing.List[ModifySpec] = []
54
55 def load(self, loader):
56 loader.add_option(
57 "modify_headers", typing.Sequence[str], [],
58 """
59 Header modify pattern of the form "[/flow-filter]/header-name/[@]header-value", where the
60 separator can be any character. The @ allows to provide a file path that is used to read
61 the header value string. An empty header-value removes existing header-name headers.
62 """
63 )
64
65 def configure(self, updated):
66 if "modify_headers" in updated:
67 self.replacements = []
68 for option in ctx.options.modify_headers:
69 try:
70 spec = parse_modify_spec(option, False)
71 except ValueError as e:
72 raise exceptions.OptionsError(f"Cannot parse modify_headers option {option}: {e}") from e
73 self.replacements.append(spec)
74
75 def request(self, flow):
76 if flow.response or flow.error or flow.reply.state == "taken":
77 return
78 self.run(flow, flow.request.headers)
79
80 def response(self, flow):
81 if flow.error or flow.reply.state == "taken":
82 return
83 self.run(flow, flow.response.headers)
84
85 def run(self, flow: http.HTTPFlow, hdrs: Headers) -> None:
86 # unset all specified headers
87 for spec in self.replacements:
88 if spec.matches(flow):
89 hdrs.pop(spec.subject, None)
90
91 # set all specified headers if the replacement string is not empty
92 for spec in self.replacements:
93 if spec.matches(flow):
94 try:
95 replacement = spec.read_replacement()
96 except OSError as e:
97 ctx.log.warn(f"Could not read replacement file: {e}")
98 continue
99 else:
100 if replacement:
101 hdrs.add(spec.subject, replacement)
102
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mitmproxy/addons/modifyheaders.py b/mitmproxy/addons/modifyheaders.py
--- a/mitmproxy/addons/modifyheaders.py
+++ b/mitmproxy/addons/modifyheaders.py
@@ -83,14 +83,21 @@
self.run(flow, flow.response.headers)
def run(self, flow: http.HTTPFlow, hdrs: Headers) -> None:
- # unset all specified headers
+ matches = []
+
+ # first check all the filters against the original, unmodified flow
for spec in self.replacements:
- if spec.matches(flow):
+ matches.append(spec.matches(flow))
+
+ # unset all specified headers
+ for i, spec in enumerate(self.replacements):
+ if matches[i]:
hdrs.pop(spec.subject, None)
# set all specified headers if the replacement string is not empty
- for spec in self.replacements:
- if spec.matches(flow):
+
+ for i, spec in enumerate(self.replacements):
+ if matches[i]:
try:
replacement = spec.read_replacement()
except OSError as e:
|
{"golden_diff": "diff --git a/mitmproxy/addons/modifyheaders.py b/mitmproxy/addons/modifyheaders.py\n--- a/mitmproxy/addons/modifyheaders.py\n+++ b/mitmproxy/addons/modifyheaders.py\n@@ -83,14 +83,21 @@\n self.run(flow, flow.response.headers)\n \n def run(self, flow: http.HTTPFlow, hdrs: Headers) -> None:\n- # unset all specified headers\n+ matches = []\n+\n+ # first check all the filters against the original, unmodified flow\n for spec in self.replacements:\n- if spec.matches(flow):\n+ matches.append(spec.matches(flow))\n+\n+ # unset all specified headers\n+ for i, spec in enumerate(self.replacements):\n+ if matches[i]:\n hdrs.pop(spec.subject, None)\n \n # set all specified headers if the replacement string is not empty\n- for spec in self.replacements:\n- if spec.matches(flow):\n+\n+ for i, spec in enumerate(self.replacements):\n+ if matches[i]:\n try:\n replacement = spec.read_replacement()\n except OSError as e:\n", "issue": "v4 --replacements vs v5 --modify-headers\nI'm trying to replace the `User-Agent` request header if it contains a certain string.\r\n\r\nThis works with \"mitmproxy-4.0.4-linux\":\r\n\r\n```\r\n./mitmproxy --replacements \":~hq User-Agent:Mozilla(.+):CUSTOMAGENT\"\r\n```\r\n\r\nWith \"mitmproxy-5.2-linux\", this at least replaces the `User-Agent`, but is missing my \"certain string condition\":\r\n\r\n```\r\n./mitmproxy --modify-headers \"|~hq .+|User-Agent|CUSTOMAGENT\"\r\n```\r\n\r\nHow do I add my `Mozilla` condition in v5?\r\n\r\nNone of these work:\r\n\r\n```\r\n./mitmproxy --modify-headers \"|~hq ^(.*?)Mozilla(.*?)$|User-Agent|CUSTOMAGENT\"\r\n\r\n./mitmproxy --modify-headers \"/~hq .*?Mozilla.*?/User-Agent/CUSTOMAGENT\"\r\n\r\n./mitmproxy --modify-headers \"|~hq Mozilla|User-Agent|CUSTOMAGENT\"\r\n\r\n./mitmproxy --modify-headers \"|~hq User-Agent: Mozilla|User-Agent|CUSTOMAGENT\"\r\n\r\n./mitmproxy --modify-headers \"|~hq \\\"^(.*?)Mozilla(.*?)$\\\"|User-Agent|CUSTOMAGENT\"\r\n```\r\n\r\nI've been trying for hours, and I feel like I've tried every variation under the sun. There's a very small chance it's a bug, but most likely I'm just doing it wrong. If it matters, this system is Ubuntu 16.04.\r\n\r\n\r\n\n", "before_files": [{"content": "import re\nimport typing\nfrom pathlib import Path\n\nfrom mitmproxy import ctx, exceptions, flowfilter, http\nfrom mitmproxy.net.http import Headers\nfrom mitmproxy.utils import strutils\nfrom mitmproxy.utils.spec import parse_spec\n\n\nclass ModifySpec(typing.NamedTuple):\n matches: flowfilter.TFilter\n subject: bytes\n replacement_str: str\n\n def read_replacement(self) -> bytes:\n \"\"\"\n Process the replacement str. This usually just involves converting it to bytes.\n However, if it starts with `@`, we interpret the rest as a file path to read from.\n\n Raises:\n - IOError if the file cannot be read.\n \"\"\"\n if self.replacement_str.startswith(\"@\"):\n return Path(self.replacement_str[1:]).expanduser().read_bytes()\n else:\n # We could cache this at some point, but unlikely to be a problem.\n return strutils.escaped_str_to_bytes(self.replacement_str)\n\n\ndef parse_modify_spec(option: str, subject_is_regex: bool) -> ModifySpec:\n flow_filter, subject_str, replacement = parse_spec(option)\n\n subject = strutils.escaped_str_to_bytes(subject_str)\n if subject_is_regex:\n try:\n re.compile(subject)\n except re.error as e:\n raise ValueError(f\"Invalid regular expression {subject!r} ({e})\")\n\n spec = ModifySpec(flow_filter, subject, replacement)\n\n try:\n spec.read_replacement()\n except OSError as e:\n raise ValueError(f\"Invalid file path: {replacement[1:]} ({e})\")\n\n return spec\n\n\nclass ModifyHeaders:\n def __init__(self):\n self.replacements: typing.List[ModifySpec] = []\n\n def load(self, loader):\n loader.add_option(\n \"modify_headers\", typing.Sequence[str], [],\n \"\"\"\n Header modify pattern of the form \"[/flow-filter]/header-name/[@]header-value\", where the\n separator can be any character. The @ allows to provide a file path that is used to read\n the header value string. An empty header-value removes existing header-name headers.\n \"\"\"\n )\n\n def configure(self, updated):\n if \"modify_headers\" in updated:\n self.replacements = []\n for option in ctx.options.modify_headers:\n try:\n spec = parse_modify_spec(option, False)\n except ValueError as e:\n raise exceptions.OptionsError(f\"Cannot parse modify_headers option {option}: {e}\") from e\n self.replacements.append(spec)\n\n def request(self, flow):\n if flow.response or flow.error or flow.reply.state == \"taken\":\n return\n self.run(flow, flow.request.headers)\n\n def response(self, flow):\n if flow.error or flow.reply.state == \"taken\":\n return\n self.run(flow, flow.response.headers)\n\n def run(self, flow: http.HTTPFlow, hdrs: Headers) -> None:\n # unset all specified headers\n for spec in self.replacements:\n if spec.matches(flow):\n hdrs.pop(spec.subject, None)\n\n # set all specified headers if the replacement string is not empty\n for spec in self.replacements:\n if spec.matches(flow):\n try:\n replacement = spec.read_replacement()\n except OSError as e:\n ctx.log.warn(f\"Could not read replacement file: {e}\")\n continue\n else:\n if replacement:\n hdrs.add(spec.subject, replacement)\n", "path": "mitmproxy/addons/modifyheaders.py"}], "after_files": [{"content": "import re\nimport typing\nfrom pathlib import Path\n\nfrom mitmproxy import ctx, exceptions, flowfilter, http\nfrom mitmproxy.net.http import Headers\nfrom mitmproxy.utils import strutils\nfrom mitmproxy.utils.spec import parse_spec\n\n\nclass ModifySpec(typing.NamedTuple):\n matches: flowfilter.TFilter\n subject: bytes\n replacement_str: str\n\n def read_replacement(self) -> bytes:\n \"\"\"\n Process the replacement str. This usually just involves converting it to bytes.\n However, if it starts with `@`, we interpret the rest as a file path to read from.\n\n Raises:\n - IOError if the file cannot be read.\n \"\"\"\n if self.replacement_str.startswith(\"@\"):\n return Path(self.replacement_str[1:]).expanduser().read_bytes()\n else:\n # We could cache this at some point, but unlikely to be a problem.\n return strutils.escaped_str_to_bytes(self.replacement_str)\n\n\ndef parse_modify_spec(option: str, subject_is_regex: bool) -> ModifySpec:\n flow_filter, subject_str, replacement = parse_spec(option)\n\n subject = strutils.escaped_str_to_bytes(subject_str)\n if subject_is_regex:\n try:\n re.compile(subject)\n except re.error as e:\n raise ValueError(f\"Invalid regular expression {subject!r} ({e})\")\n\n spec = ModifySpec(flow_filter, subject, replacement)\n\n try:\n spec.read_replacement()\n except OSError as e:\n raise ValueError(f\"Invalid file path: {replacement[1:]} ({e})\")\n\n return spec\n\n\nclass ModifyHeaders:\n def __init__(self):\n self.replacements: typing.List[ModifySpec] = []\n\n def load(self, loader):\n loader.add_option(\n \"modify_headers\", typing.Sequence[str], [],\n \"\"\"\n Header modify pattern of the form \"[/flow-filter]/header-name/[@]header-value\", where the\n separator can be any character. The @ allows to provide a file path that is used to read\n the header value string. An empty header-value removes existing header-name headers.\n \"\"\"\n )\n\n def configure(self, updated):\n if \"modify_headers\" in updated:\n self.replacements = []\n for option in ctx.options.modify_headers:\n try:\n spec = parse_modify_spec(option, False)\n except ValueError as e:\n raise exceptions.OptionsError(f\"Cannot parse modify_headers option {option}: {e}\") from e\n self.replacements.append(spec)\n\n def request(self, flow):\n if flow.response or flow.error or flow.reply.state == \"taken\":\n return\n self.run(flow, flow.request.headers)\n\n def response(self, flow):\n if flow.error or flow.reply.state == \"taken\":\n return\n self.run(flow, flow.response.headers)\n\n def run(self, flow: http.HTTPFlow, hdrs: Headers) -> None:\n matches = []\n\n # first check all the filters against the original, unmodified flow\n for spec in self.replacements:\n matches.append(spec.matches(flow))\n\n # unset all specified headers\n for i, spec in enumerate(self.replacements):\n if matches[i]:\n hdrs.pop(spec.subject, None)\n\n # set all specified headers if the replacement string is not empty\n\n for i, spec in enumerate(self.replacements):\n if matches[i]:\n try:\n replacement = spec.read_replacement()\n except OSError as e:\n ctx.log.warn(f\"Could not read replacement file: {e}\")\n continue\n else:\n if replacement:\n hdrs.add(spec.subject, replacement)\n", "path": "mitmproxy/addons/modifyheaders.py"}]}
| 1,521 | 247 |
gh_patches_debug_38553
|
rasdani/github-patches
|
git_diff
|
ansible__awx-9295
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
awx.awx.tower_settings does not return expected changes when running in check mode with diff
<!-- Issues are for **concrete, actionable bugs and feature requests** only - if you're just asking for debugging help or technical support, please use:
- http://webchat.freenode.net/?channels=ansible-awx
- https://groups.google.com/forum/#!forum/awx-project
We have to limit this because of limited volunteer time to respond to issues! -->
##### ISSUE TYPE
- Bug Report
##### SUMMARY
Running the module with check and diff mode simultaneously does not return the list of expected changes. It only returns that the module will change some unknown thing (by setting the `changed` variable true).
As check and diff mode is mainly used for, erm.. checking, this makes it hard to actually check the correct invocation of the module, especially when multiple settings are set.
##### ENVIRONMENT
* AWX version: 15.0.1
* AWX install method: docker on linux
* Ansible version: 2.9.10
* Operating System: Debian 10
* Web Browser: Chrome
##### STEPS TO REPRODUCE
Task extract:
```
- name: Set LDAP
tower_settings:
settings:
AUTH_LDAP_SERVER_URI: "ldap://ldap.example.com:389"
AUTH_LDAP_BIND_DN: CN=user,DC=example,DC=com
AUTH_LDAP_BIND_PASSWORD: password
AUTH_LDAP_START_TLS: true
```
Make sure that some of these settings have a different value in AWX.
Run the task with and without check&diff mode.
##### EXPECTED RESULTS
Expected at least some insight why the module reports changed status when running with check&diff mode.
For eg if AWX server's AUTH_LDAP_SERVER_URI is `ldap://nonexistent.com:389`, then running check&diff mode could produce similar output when running in normal mode:
```
changed: [localhost] => {
"changed": true,
"invocation": {
"module_args": {
"settings": {
"AUTH_LDAP_SERVER_URI": "ldap://ldap.example.com:389",
"AUTH_LDAP_BIND_DN": "CN=user,DC=example,DC=com",
"AUTH_LDAP_BIND_PASSWORD": "password",
"AUTH_LDAP_START_TLS": true
},
<...other module args ...>
}
},
"old_values": {
"AUTH_LDAP_SERVER_URI": "ldap://nonexistent.com:389"
},
"values": {
"AUTH_LDAP_SERVER_URI": "ldap://ldap.example.com:389"
}
}
```
##### ACTUAL RESULTS
Running the module with check&diff mode produces less usable output regarding the variables to be changed:
```
changed: [localhost] => {
"changed": true,
"invocation": {
"module_args": {
"settings": {
"AUTH_LDAP_SERVER_URI": "ldap://ldap.example.com:389",
"AUTH_LDAP_BIND_DN": "CN=user,DC=example,DC=com",
"AUTH_LDAP_BIND_PASSWORD": "password",
"AUTH_LDAP_START_TLS": true
},
<...other module args ...>
}
}
```
##### ADDITIONAL INFORMATION
As i see the main reason behind such behaviour is because a [new json_response dict is created](https://github.com/ansible/awx/blob/29926ba5d99b760b32de35e07c211fd3e7691c8d/awx_collection/plugins/modules/tower_settings.py#L136) in the module's code, but the [check_mode exit point](https://github.com/ansible/awx/blob/29926ba5d99b760b32de35e07c211fd3e7691c8d/awx_collection/plugins/module_utils/tower_api.py#L85) uses its own `json_output` dict.
Edit:
Completely forgot about diff mode, but ofc thats where i expect to see the detailed changes.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `awx_collection/plugins/modules/tower_settings.py`
Content:
```
1 #!/usr/bin/python
2 # coding: utf-8 -*-
3
4 # (c) 2018, Nikhil Jain <[email protected]>
5 # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
6
7 from __future__ import absolute_import, division, print_function
8 __metaclass__ = type
9
10
11 ANSIBLE_METADATA = {'metadata_version': '1.1',
12 'status': ['preview'],
13 'supported_by': 'community'}
14
15
16 DOCUMENTATION = '''
17 ---
18 module: tower_settings
19 author: "Nikhil Jain (@jainnikhil30)"
20 short_description: Modify Ansible Tower settings.
21 description:
22 - Modify Ansible Tower settings. See
23 U(https://www.ansible.com/tower) for an overview.
24 options:
25 name:
26 description:
27 - Name of setting to modify
28 type: str
29 value:
30 description:
31 - Value to be modified for given setting.
32 - If given a non-string type, will make best effort to cast it to type API expects.
33 - For better control over types, use the C(settings) param instead.
34 type: str
35 settings:
36 description:
37 - A data structure to be sent into the settings endpoint
38 type: dict
39 requirements:
40 - pyyaml
41 extends_documentation_fragment: awx.awx.auth
42 '''
43
44 EXAMPLES = '''
45 - name: Set the value of AWX_PROOT_BASE_PATH
46 tower_settings:
47 name: AWX_PROOT_BASE_PATH
48 value: "/tmp"
49 register: testing_settings
50
51 - name: Set the value of AWX_PROOT_SHOW_PATHS
52 tower_settings:
53 name: "AWX_PROOT_SHOW_PATHS"
54 value: "'/var/lib/awx/projects/', '/tmp'"
55 register: testing_settings
56
57 - name: Set the LDAP Auth Bind Password
58 tower_settings:
59 name: "AUTH_LDAP_BIND_PASSWORD"
60 value: "Password"
61 no_log: true
62
63 - name: Set all the LDAP Auth Bind Params
64 tower_settings:
65 settings:
66 AUTH_LDAP_BIND_PASSWORD: "password"
67 AUTH_LDAP_USER_ATTR_MAP:
68 email: "mail"
69 first_name: "givenName"
70 last_name: "surname"
71 '''
72
73 from ..module_utils.tower_api import TowerAPIModule
74
75 try:
76 import yaml
77 HAS_YAML = True
78 except ImportError:
79 HAS_YAML = False
80
81
82 def coerce_type(module, value):
83 # If our value is already None we can just return directly
84 if value is None:
85 return value
86
87 yaml_ish = bool((
88 value.startswith('{') and value.endswith('}')
89 ) or (
90 value.startswith('[') and value.endswith(']'))
91 )
92 if yaml_ish:
93 if not HAS_YAML:
94 module.fail_json(msg="yaml is not installed, try 'pip install pyyaml'")
95 return yaml.safe_load(value)
96 elif value.lower in ('true', 'false', 't', 'f'):
97 return {'t': True, 'f': False}[value[0].lower()]
98 try:
99 return int(value)
100 except ValueError:
101 pass
102 return value
103
104
105 def main():
106 # Any additional arguments that are not fields of the item can be added here
107 argument_spec = dict(
108 name=dict(),
109 value=dict(),
110 settings=dict(type='dict'),
111 )
112
113 # Create a module for ourselves
114 module = TowerAPIModule(
115 argument_spec=argument_spec,
116 required_one_of=[['name', 'settings']],
117 mutually_exclusive=[['name', 'settings']],
118 required_if=[['name', 'present', ['value']]]
119 )
120
121 # Extract our parameters
122 name = module.params.get('name')
123 value = module.params.get('value')
124 new_settings = module.params.get('settings')
125
126 # If we were given a name/value pair we will just make settings out of that and proceed normally
127 if new_settings is None:
128 new_value = coerce_type(module, value)
129
130 new_settings = {name: new_value}
131
132 # Load the existing settings
133 existing_settings = module.get_endpoint('settings/all')['json']
134
135 # Begin a json response
136 json_response = {'changed': False, 'old_values': {}}
137
138 # Check any of the settings to see if anything needs to be updated
139 needs_update = False
140 for a_setting in new_settings:
141 if a_setting not in existing_settings or existing_settings[a_setting] != new_settings[a_setting]:
142 # At least one thing is different so we need to patch
143 needs_update = True
144 json_response['old_values'][a_setting] = existing_settings[a_setting]
145
146 # If nothing needs an update we can simply exit with the response (as not changed)
147 if not needs_update:
148 module.exit_json(**json_response)
149
150 # Make the call to update the settings
151 response = module.patch_endpoint('settings/all', **{'data': new_settings})
152
153 if response['status_code'] == 200:
154 # Set the changed response to True
155 json_response['changed'] = True
156
157 # To deal with the old style values we need to return 'value' in the response
158 new_values = {}
159 for a_setting in new_settings:
160 new_values[a_setting] = response['json'][a_setting]
161
162 # If we were using a name we will just add a value of a string, otherwise we will return an array in values
163 if name is not None:
164 json_response['value'] = new_values[name]
165 else:
166 json_response['values'] = new_values
167
168 module.exit_json(**json_response)
169 elif 'json' in response and '__all__' in response['json']:
170 module.fail_json(msg=response['json']['__all__'])
171 else:
172 module.fail_json(**{'msg': "Unable to update settings, see response", 'response': response})
173
174
175 if __name__ == '__main__':
176 main()
177
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/awx_collection/plugins/modules/tower_settings.py b/awx_collection/plugins/modules/tower_settings.py
--- a/awx_collection/plugins/modules/tower_settings.py
+++ b/awx_collection/plugins/modules/tower_settings.py
@@ -133,7 +133,7 @@
existing_settings = module.get_endpoint('settings/all')['json']
# Begin a json response
- json_response = {'changed': False, 'old_values': {}}
+ json_output = {'changed': False, 'old_values': {}, 'new_values': {}}
# Check any of the settings to see if anything needs to be updated
needs_update = False
@@ -141,18 +141,29 @@
if a_setting not in existing_settings or existing_settings[a_setting] != new_settings[a_setting]:
# At least one thing is different so we need to patch
needs_update = True
- json_response['old_values'][a_setting] = existing_settings[a_setting]
+ json_output['old_values'][a_setting] = existing_settings[a_setting]
+ json_output['new_values'][a_setting] = new_settings[a_setting]
+
+ if module._diff:
+ json_output['diff'] = {
+ 'before': json_output['old_values'],
+ 'after': json_output['new_values']
+ }
# If nothing needs an update we can simply exit with the response (as not changed)
if not needs_update:
- module.exit_json(**json_response)
+ module.exit_json(**json_output)
+
+ if module.check_mode and module._diff:
+ json_output['changed'] = True
+ module.exit_json(**json_output)
# Make the call to update the settings
response = module.patch_endpoint('settings/all', **{'data': new_settings})
if response['status_code'] == 200:
# Set the changed response to True
- json_response['changed'] = True
+ json_output['changed'] = True
# To deal with the old style values we need to return 'value' in the response
new_values = {}
@@ -161,11 +172,11 @@
# If we were using a name we will just add a value of a string, otherwise we will return an array in values
if name is not None:
- json_response['value'] = new_values[name]
+ json_output['value'] = new_values[name]
else:
- json_response['values'] = new_values
+ json_output['values'] = new_values
- module.exit_json(**json_response)
+ module.exit_json(**json_output)
elif 'json' in response and '__all__' in response['json']:
module.fail_json(msg=response['json']['__all__'])
else:
|
{"golden_diff": "diff --git a/awx_collection/plugins/modules/tower_settings.py b/awx_collection/plugins/modules/tower_settings.py\n--- a/awx_collection/plugins/modules/tower_settings.py\n+++ b/awx_collection/plugins/modules/tower_settings.py\n@@ -133,7 +133,7 @@\n existing_settings = module.get_endpoint('settings/all')['json']\n \n # Begin a json response\n- json_response = {'changed': False, 'old_values': {}}\n+ json_output = {'changed': False, 'old_values': {}, 'new_values': {}}\n \n # Check any of the settings to see if anything needs to be updated\n needs_update = False\n@@ -141,18 +141,29 @@\n if a_setting not in existing_settings or existing_settings[a_setting] != new_settings[a_setting]:\n # At least one thing is different so we need to patch\n needs_update = True\n- json_response['old_values'][a_setting] = existing_settings[a_setting]\n+ json_output['old_values'][a_setting] = existing_settings[a_setting]\n+ json_output['new_values'][a_setting] = new_settings[a_setting]\n+\n+ if module._diff:\n+ json_output['diff'] = {\n+ 'before': json_output['old_values'],\n+ 'after': json_output['new_values']\n+ }\n \n # If nothing needs an update we can simply exit with the response (as not changed)\n if not needs_update:\n- module.exit_json(**json_response)\n+ module.exit_json(**json_output)\n+\n+ if module.check_mode and module._diff:\n+ json_output['changed'] = True\n+ module.exit_json(**json_output)\n \n # Make the call to update the settings\n response = module.patch_endpoint('settings/all', **{'data': new_settings})\n \n if response['status_code'] == 200:\n # Set the changed response to True\n- json_response['changed'] = True\n+ json_output['changed'] = True\n \n # To deal with the old style values we need to return 'value' in the response\n new_values = {}\n@@ -161,11 +172,11 @@\n \n # If we were using a name we will just add a value of a string, otherwise we will return an array in values\n if name is not None:\n- json_response['value'] = new_values[name]\n+ json_output['value'] = new_values[name]\n else:\n- json_response['values'] = new_values\n+ json_output['values'] = new_values\n \n- module.exit_json(**json_response)\n+ module.exit_json(**json_output)\n elif 'json' in response and '__all__' in response['json']:\n module.fail_json(msg=response['json']['__all__'])\n else:\n", "issue": "awx.awx.tower_settings does not return expected changes when running in check mode with diff\n<!-- Issues are for **concrete, actionable bugs and feature requests** only - if you're just asking for debugging help or technical support, please use:\r\n\r\n- http://webchat.freenode.net/?channels=ansible-awx\r\n- https://groups.google.com/forum/#!forum/awx-project\r\n\r\nWe have to limit this because of limited volunteer time to respond to issues! -->\r\n\r\n##### ISSUE TYPE\r\n - Bug Report\r\n\r\n##### SUMMARY\r\nRunning the module with check and diff mode simultaneously does not return the list of expected changes. It only returns that the module will change some unknown thing (by setting the `changed` variable true).\r\nAs check and diff mode is mainly used for, erm.. checking, this makes it hard to actually check the correct invocation of the module, especially when multiple settings are set.\r\n\r\n##### ENVIRONMENT\r\n* AWX version: 15.0.1\r\n* AWX install method: docker on linux\r\n* Ansible version: 2.9.10\r\n* Operating System: Debian 10\r\n* Web Browser: Chrome\r\n\r\n##### STEPS TO REPRODUCE\r\n\r\nTask extract:\r\n```\r\n- name: Set LDAP\r\n tower_settings:\r\n settings:\r\n AUTH_LDAP_SERVER_URI: \"ldap://ldap.example.com:389\"\r\n AUTH_LDAP_BIND_DN: CN=user,DC=example,DC=com\r\n AUTH_LDAP_BIND_PASSWORD: password\r\n AUTH_LDAP_START_TLS: true\r\n```\r\nMake sure that some of these settings have a different value in AWX.\r\nRun the task with and without check&diff mode.\r\n\r\n##### EXPECTED RESULTS\r\nExpected at least some insight why the module reports changed status when running with check&diff mode.\r\n\r\nFor eg if AWX server's AUTH_LDAP_SERVER_URI is `ldap://nonexistent.com:389`, then running check&diff mode could produce similar output when running in normal mode:\r\n```\r\nchanged: [localhost] => {\r\n \"changed\": true,\r\n \"invocation\": {\r\n \"module_args\": {\r\n \"settings\": {\r\n \"AUTH_LDAP_SERVER_URI\": \"ldap://ldap.example.com:389\",\r\n \"AUTH_LDAP_BIND_DN\": \"CN=user,DC=example,DC=com\",\r\n \"AUTH_LDAP_BIND_PASSWORD\": \"password\",\r\n \"AUTH_LDAP_START_TLS\": true\r\n },\r\n <...other module args ...>\r\n }\r\n },\r\n \"old_values\": {\r\n \"AUTH_LDAP_SERVER_URI\": \"ldap://nonexistent.com:389\"\r\n },\r\n \"values\": {\r\n \"AUTH_LDAP_SERVER_URI\": \"ldap://ldap.example.com:389\"\r\n }\r\n}\r\n```\r\n\r\n##### ACTUAL RESULTS\r\nRunning the module with check&diff mode produces less usable output regarding the variables to be changed:\r\n```\r\nchanged: [localhost] => {\r\n \"changed\": true,\r\n \"invocation\": {\r\n \"module_args\": {\r\n \"settings\": {\r\n \"AUTH_LDAP_SERVER_URI\": \"ldap://ldap.example.com:389\",\r\n \"AUTH_LDAP_BIND_DN\": \"CN=user,DC=example,DC=com\",\r\n \"AUTH_LDAP_BIND_PASSWORD\": \"password\",\r\n \"AUTH_LDAP_START_TLS\": true\r\n },\r\n <...other module args ...>\r\n }\r\n}\r\n```\r\n##### ADDITIONAL INFORMATION\r\n\r\nAs i see the main reason behind such behaviour is because a [new json_response dict is created](https://github.com/ansible/awx/blob/29926ba5d99b760b32de35e07c211fd3e7691c8d/awx_collection/plugins/modules/tower_settings.py#L136) in the module's code, but the [check_mode exit point](https://github.com/ansible/awx/blob/29926ba5d99b760b32de35e07c211fd3e7691c8d/awx_collection/plugins/module_utils/tower_api.py#L85) uses its own `json_output` dict.\r\n\r\nEdit:\r\nCompletely forgot about diff mode, but ofc thats where i expect to see the detailed changes.\n", "before_files": [{"content": "#!/usr/bin/python\n# coding: utf-8 -*-\n\n# (c) 2018, Nikhil Jain <[email protected]>\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nANSIBLE_METADATA = {'metadata_version': '1.1',\n 'status': ['preview'],\n 'supported_by': 'community'}\n\n\nDOCUMENTATION = '''\n---\nmodule: tower_settings\nauthor: \"Nikhil Jain (@jainnikhil30)\"\nshort_description: Modify Ansible Tower settings.\ndescription:\n - Modify Ansible Tower settings. See\n U(https://www.ansible.com/tower) for an overview.\noptions:\n name:\n description:\n - Name of setting to modify\n type: str\n value:\n description:\n - Value to be modified for given setting.\n - If given a non-string type, will make best effort to cast it to type API expects.\n - For better control over types, use the C(settings) param instead.\n type: str\n settings:\n description:\n - A data structure to be sent into the settings endpoint\n type: dict\nrequirements:\n - pyyaml\nextends_documentation_fragment: awx.awx.auth\n'''\n\nEXAMPLES = '''\n- name: Set the value of AWX_PROOT_BASE_PATH\n tower_settings:\n name: AWX_PROOT_BASE_PATH\n value: \"/tmp\"\n register: testing_settings\n\n- name: Set the value of AWX_PROOT_SHOW_PATHS\n tower_settings:\n name: \"AWX_PROOT_SHOW_PATHS\"\n value: \"'/var/lib/awx/projects/', '/tmp'\"\n register: testing_settings\n\n- name: Set the LDAP Auth Bind Password\n tower_settings:\n name: \"AUTH_LDAP_BIND_PASSWORD\"\n value: \"Password\"\n no_log: true\n\n- name: Set all the LDAP Auth Bind Params\n tower_settings:\n settings:\n AUTH_LDAP_BIND_PASSWORD: \"password\"\n AUTH_LDAP_USER_ATTR_MAP:\n email: \"mail\"\n first_name: \"givenName\"\n last_name: \"surname\"\n'''\n\nfrom ..module_utils.tower_api import TowerAPIModule\n\ntry:\n import yaml\n HAS_YAML = True\nexcept ImportError:\n HAS_YAML = False\n\n\ndef coerce_type(module, value):\n # If our value is already None we can just return directly\n if value is None:\n return value\n\n yaml_ish = bool((\n value.startswith('{') and value.endswith('}')\n ) or (\n value.startswith('[') and value.endswith(']'))\n )\n if yaml_ish:\n if not HAS_YAML:\n module.fail_json(msg=\"yaml is not installed, try 'pip install pyyaml'\")\n return yaml.safe_load(value)\n elif value.lower in ('true', 'false', 't', 'f'):\n return {'t': True, 'f': False}[value[0].lower()]\n try:\n return int(value)\n except ValueError:\n pass\n return value\n\n\ndef main():\n # Any additional arguments that are not fields of the item can be added here\n argument_spec = dict(\n name=dict(),\n value=dict(),\n settings=dict(type='dict'),\n )\n\n # Create a module for ourselves\n module = TowerAPIModule(\n argument_spec=argument_spec,\n required_one_of=[['name', 'settings']],\n mutually_exclusive=[['name', 'settings']],\n required_if=[['name', 'present', ['value']]]\n )\n\n # Extract our parameters\n name = module.params.get('name')\n value = module.params.get('value')\n new_settings = module.params.get('settings')\n\n # If we were given a name/value pair we will just make settings out of that and proceed normally\n if new_settings is None:\n new_value = coerce_type(module, value)\n\n new_settings = {name: new_value}\n\n # Load the existing settings\n existing_settings = module.get_endpoint('settings/all')['json']\n\n # Begin a json response\n json_response = {'changed': False, 'old_values': {}}\n\n # Check any of the settings to see if anything needs to be updated\n needs_update = False\n for a_setting in new_settings:\n if a_setting not in existing_settings or existing_settings[a_setting] != new_settings[a_setting]:\n # At least one thing is different so we need to patch\n needs_update = True\n json_response['old_values'][a_setting] = existing_settings[a_setting]\n\n # If nothing needs an update we can simply exit with the response (as not changed)\n if not needs_update:\n module.exit_json(**json_response)\n\n # Make the call to update the settings\n response = module.patch_endpoint('settings/all', **{'data': new_settings})\n\n if response['status_code'] == 200:\n # Set the changed response to True\n json_response['changed'] = True\n\n # To deal with the old style values we need to return 'value' in the response\n new_values = {}\n for a_setting in new_settings:\n new_values[a_setting] = response['json'][a_setting]\n\n # If we were using a name we will just add a value of a string, otherwise we will return an array in values\n if name is not None:\n json_response['value'] = new_values[name]\n else:\n json_response['values'] = new_values\n\n module.exit_json(**json_response)\n elif 'json' in response and '__all__' in response['json']:\n module.fail_json(msg=response['json']['__all__'])\n else:\n module.fail_json(**{'msg': \"Unable to update settings, see response\", 'response': response})\n\n\nif __name__ == '__main__':\n main()\n", "path": "awx_collection/plugins/modules/tower_settings.py"}], "after_files": [{"content": "#!/usr/bin/python\n# coding: utf-8 -*-\n\n# (c) 2018, Nikhil Jain <[email protected]>\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nANSIBLE_METADATA = {'metadata_version': '1.1',\n 'status': ['preview'],\n 'supported_by': 'community'}\n\n\nDOCUMENTATION = '''\n---\nmodule: tower_settings\nauthor: \"Nikhil Jain (@jainnikhil30)\"\nshort_description: Modify Ansible Tower settings.\ndescription:\n - Modify Ansible Tower settings. See\n U(https://www.ansible.com/tower) for an overview.\noptions:\n name:\n description:\n - Name of setting to modify\n type: str\n value:\n description:\n - Value to be modified for given setting.\n - If given a non-string type, will make best effort to cast it to type API expects.\n - For better control over types, use the C(settings) param instead.\n type: str\n settings:\n description:\n - A data structure to be sent into the settings endpoint\n type: dict\nrequirements:\n - pyyaml\nextends_documentation_fragment: awx.awx.auth\n'''\n\nEXAMPLES = '''\n- name: Set the value of AWX_PROOT_BASE_PATH\n tower_settings:\n name: AWX_PROOT_BASE_PATH\n value: \"/tmp\"\n register: testing_settings\n\n- name: Set the value of AWX_PROOT_SHOW_PATHS\n tower_settings:\n name: \"AWX_PROOT_SHOW_PATHS\"\n value: \"'/var/lib/awx/projects/', '/tmp'\"\n register: testing_settings\n\n- name: Set the LDAP Auth Bind Password\n tower_settings:\n name: \"AUTH_LDAP_BIND_PASSWORD\"\n value: \"Password\"\n no_log: true\n\n- name: Set all the LDAP Auth Bind Params\n tower_settings:\n settings:\n AUTH_LDAP_BIND_PASSWORD: \"password\"\n AUTH_LDAP_USER_ATTR_MAP:\n email: \"mail\"\n first_name: \"givenName\"\n last_name: \"surname\"\n'''\n\nfrom ..module_utils.tower_api import TowerAPIModule\n\ntry:\n import yaml\n HAS_YAML = True\nexcept ImportError:\n HAS_YAML = False\n\n\ndef coerce_type(module, value):\n # If our value is already None we can just return directly\n if value is None:\n return value\n\n yaml_ish = bool((\n value.startswith('{') and value.endswith('}')\n ) or (\n value.startswith('[') and value.endswith(']'))\n )\n if yaml_ish:\n if not HAS_YAML:\n module.fail_json(msg=\"yaml is not installed, try 'pip install pyyaml'\")\n return yaml.safe_load(value)\n elif value.lower in ('true', 'false', 't', 'f'):\n return {'t': True, 'f': False}[value[0].lower()]\n try:\n return int(value)\n except ValueError:\n pass\n return value\n\n\ndef main():\n # Any additional arguments that are not fields of the item can be added here\n argument_spec = dict(\n name=dict(),\n value=dict(),\n settings=dict(type='dict'),\n )\n\n # Create a module for ourselves\n module = TowerAPIModule(\n argument_spec=argument_spec,\n required_one_of=[['name', 'settings']],\n mutually_exclusive=[['name', 'settings']],\n required_if=[['name', 'present', ['value']]]\n )\n\n # Extract our parameters\n name = module.params.get('name')\n value = module.params.get('value')\n new_settings = module.params.get('settings')\n\n # If we were given a name/value pair we will just make settings out of that and proceed normally\n if new_settings is None:\n new_value = coerce_type(module, value)\n\n new_settings = {name: new_value}\n\n # Load the existing settings\n existing_settings = module.get_endpoint('settings/all')['json']\n\n # Begin a json response\n json_output = {'changed': False, 'old_values': {}, 'new_values': {}}\n\n # Check any of the settings to see if anything needs to be updated\n needs_update = False\n for a_setting in new_settings:\n if a_setting not in existing_settings or existing_settings[a_setting] != new_settings[a_setting]:\n # At least one thing is different so we need to patch\n needs_update = True\n json_output['old_values'][a_setting] = existing_settings[a_setting]\n json_output['new_values'][a_setting] = new_settings[a_setting]\n\n if module._diff:\n json_output['diff'] = {\n 'before': json_output['old_values'],\n 'after': json_output['new_values']\n }\n\n # If nothing needs an update we can simply exit with the response (as not changed)\n if not needs_update:\n module.exit_json(**json_output)\n\n if module.check_mode and module._diff:\n json_output['changed'] = True\n module.exit_json(**json_output)\n\n # Make the call to update the settings\n response = module.patch_endpoint('settings/all', **{'data': new_settings})\n\n if response['status_code'] == 200:\n # Set the changed response to True\n json_output['changed'] = True\n\n # To deal with the old style values we need to return 'value' in the response\n new_values = {}\n for a_setting in new_settings:\n new_values[a_setting] = response['json'][a_setting]\n\n # If we were using a name we will just add a value of a string, otherwise we will return an array in values\n if name is not None:\n json_output['value'] = new_values[name]\n else:\n json_output['values'] = new_values\n\n module.exit_json(**json_output)\n elif 'json' in response and '__all__' in response['json']:\n module.fail_json(msg=response['json']['__all__'])\n else:\n module.fail_json(**{'msg': \"Unable to update settings, see response\", 'response': response})\n\n\nif __name__ == '__main__':\n main()\n", "path": "awx_collection/plugins/modules/tower_settings.py"}]}
| 2,885 | 618 |
gh_patches_debug_23291
|
rasdani/github-patches
|
git_diff
|
scikit-hep__awkward-3115
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
typing ak.Array for numba.cuda.jit signature
### Version of Awkward Array
2.6.2
### Description and code to reproduce
Hey guys, I followed a hint from the discussion in [#696](https://github.com/scikit-hep/awkward/discussions/696#discussion-2571850) to type `ak.Array` for numba signatures. So I tried something like
```python
import awkward as ak
import numba as nb
from numba import types
cpu_arr_type = ak.Array([[[0, 1], [2, 3]], [[4, 5]]], backend='cpu').numba_type
@nb.njit(types.void(cpu_arr_type))
def cpu_kernel(arr):
do_something_with_arr
```
and this works like a charm.
However, I'm interested in the same case but with a cuda kernel. So I tried what appeared more natural to do:
```python
gpu_arr_type = ak.Array([[[0, 1], [2, 3]], [[4, 5]]], backend='cuda').numba_type
@nb.cuda.jit(types.void(gpu_arr_type), extensions=[ak.numba.cuda])
def cuda_kernel(arr):
do_something_with_arr
```
This time, I get the error:
```python
self = <awkward._connect.numba.arrayview_cuda.ArrayViewArgHandler object at 0x784afbc13fa0>
ty = ak.ArrayView(ak.ListArrayType(array(int64, 1d, C), ak.ListArrayType(array(int64, 1d, C), ak.NumpyArrayType(array(int64, 1d, C), {}), {}), {}), None, ())
val = <Array [[[4, 1], [2, -1]], [...], [[4, 0]]] type='3 * var * var * int64'>
stream = 0, retr = []
def prepare_args(self, ty, val, stream, retr):
if isinstance(val, ak.Array):
if isinstance(val.layout.backend, CupyBackend):
# Use uint64 for pos, start, stop, the array pointers values, and the pylookup value
tys = numba.types.UniTuple(numba.types.uint64, 5)
> start = val._numbaview.start
E AttributeError: 'NoneType' object has no attribute 'start'
.../site-packages/awkward/_connect/numba/arrayview_cuda.py:21: AttributeError
```
How should this latter case be correctly treated? Note that, without typing, the thing works as expected:
```python
@nb.cuda.jit(extensions=[ak.numba.cuda])
def cuda_kernel_no_typing(arr):
do_something_with_arr
```
However, I'm interested in `ak.Array`s with the 3D layout of integers (as above) and would like to take advantage of numba's eager compilation. I'm passing the `arr` for testing as
```python
backend = 'cpu' # or 'cuda'
arr = ak.to_backend(
ak.Array([
[[4, 1], [2, -1]],
[[0, -1], [1, 1], [3, -1]],
[[4, 0]]
]),
backend
)
```
Any help is appreciated!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/awkward/_connect/numba/arrayview_cuda.py`
Content:
```
1 # BSD 3-Clause License; see https://github.com/scikit-hep/awkward/blob/main/LICENSE
2
3 from __future__ import annotations
4
5 import numba
6 from numba.core.errors import NumbaTypeError
7
8 import awkward as ak
9 from awkward._backends.cupy import CupyBackend
10
11 ########## ArrayView Arguments Handler for CUDA JIT
12
13
14 class ArrayViewArgHandler:
15 def prepare_args(self, ty, val, stream, retr):
16 if isinstance(val, ak.Array):
17 if isinstance(val.layout.backend, CupyBackend):
18 # Use uint64 for pos, start, stop, the array pointers values, and the pylookup value
19 tys = numba.types.UniTuple(numba.types.uint64, 5)
20
21 start = val._numbaview.start
22 stop = val._numbaview.stop
23 pos = val._numbaview.pos
24 arrayptrs = val._numbaview.lookup.arrayptrs.data.ptr
25 pylookup = 0
26
27 return tys, (pos, start, stop, arrayptrs, pylookup)
28 else:
29 raise NumbaTypeError(
30 '`ak.to_backend` should be called with `backend="cuda"` to put '
31 "the array on the GPU before using it: "
32 'ak.to_backend(array, backend="cuda")'
33 )
34
35 else:
36 return ty, val
37
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/awkward/_connect/numba/arrayview_cuda.py b/src/awkward/_connect/numba/arrayview_cuda.py
--- a/src/awkward/_connect/numba/arrayview_cuda.py
+++ b/src/awkward/_connect/numba/arrayview_cuda.py
@@ -15,13 +15,22 @@
def prepare_args(self, ty, val, stream, retr):
if isinstance(val, ak.Array):
if isinstance(val.layout.backend, CupyBackend):
+ if ty is not val.numba_type:
+ raise NumbaTypeError(
+ f"the array type: {val.numba_type} does not match "
+ f"the kernel signature type: {ty}"
+ )
+
# Use uint64 for pos, start, stop, the array pointers values, and the pylookup value
tys = numba.types.UniTuple(numba.types.uint64, 5)
- start = val._numbaview.start
- stop = val._numbaview.stop
- pos = val._numbaview.pos
- arrayptrs = val._numbaview.lookup.arrayptrs.data.ptr
+ view = val._numbaview
+ assert view is not None
+
+ start = view.start
+ stop = view.stop
+ pos = view.pos
+ arrayptrs = view.lookup.arrayptrs.data.ptr
pylookup = 0
return tys, (pos, start, stop, arrayptrs, pylookup)
|
{"golden_diff": "diff --git a/src/awkward/_connect/numba/arrayview_cuda.py b/src/awkward/_connect/numba/arrayview_cuda.py\n--- a/src/awkward/_connect/numba/arrayview_cuda.py\n+++ b/src/awkward/_connect/numba/arrayview_cuda.py\n@@ -15,13 +15,22 @@\n def prepare_args(self, ty, val, stream, retr):\n if isinstance(val, ak.Array):\n if isinstance(val.layout.backend, CupyBackend):\n+ if ty is not val.numba_type:\n+ raise NumbaTypeError(\n+ f\"the array type: {val.numba_type} does not match \"\n+ f\"the kernel signature type: {ty}\"\n+ )\n+\n # Use uint64 for pos, start, stop, the array pointers values, and the pylookup value\n tys = numba.types.UniTuple(numba.types.uint64, 5)\n \n- start = val._numbaview.start\n- stop = val._numbaview.stop\n- pos = val._numbaview.pos\n- arrayptrs = val._numbaview.lookup.arrayptrs.data.ptr\n+ view = val._numbaview\n+ assert view is not None\n+\n+ start = view.start\n+ stop = view.stop\n+ pos = view.pos\n+ arrayptrs = view.lookup.arrayptrs.data.ptr\n pylookup = 0\n \n return tys, (pos, start, stop, arrayptrs, pylookup)\n", "issue": "typing ak.Array for numba.cuda.jit signature\n### Version of Awkward Array\n\n2.6.2\n\n### Description and code to reproduce\n\nHey guys, I followed a hint from the discussion in [#696](https://github.com/scikit-hep/awkward/discussions/696#discussion-2571850) to type `ak.Array` for numba signatures. So I tried something like\r\n\r\n```python\r\nimport awkward as ak\r\nimport numba as nb\r\nfrom numba import types\r\n\r\ncpu_arr_type = ak.Array([[[0, 1], [2, 3]], [[4, 5]]], backend='cpu').numba_type\r\n\r\[email protected](types.void(cpu_arr_type))\r\ndef cpu_kernel(arr):\r\n do_something_with_arr\r\n```\r\nand this works like a charm.\r\n\r\nHowever, I'm interested in the same case but with a cuda kernel. So I tried what appeared more natural to do:\r\n```python\r\ngpu_arr_type = ak.Array([[[0, 1], [2, 3]], [[4, 5]]], backend='cuda').numba_type\r\n\r\[email protected](types.void(gpu_arr_type), extensions=[ak.numba.cuda])\r\ndef cuda_kernel(arr):\r\n do_something_with_arr\r\n```\r\nThis time, I get the error:\r\n```python\r\nself = <awkward._connect.numba.arrayview_cuda.ArrayViewArgHandler object at 0x784afbc13fa0>\r\nty = ak.ArrayView(ak.ListArrayType(array(int64, 1d, C), ak.ListArrayType(array(int64, 1d, C), ak.NumpyArrayType(array(int64, 1d, C), {}), {}), {}), None, ())\r\nval = <Array [[[4, 1], [2, -1]], [...], [[4, 0]]] type='3 * var * var * int64'>\r\nstream = 0, retr = []\r\n\r\n def prepare_args(self, ty, val, stream, retr):\r\n if isinstance(val, ak.Array):\r\n if isinstance(val.layout.backend, CupyBackend):\r\n # Use uint64 for pos, start, stop, the array pointers values, and the pylookup value\r\n tys = numba.types.UniTuple(numba.types.uint64, 5)\r\n \r\n> start = val._numbaview.start\r\nE AttributeError: 'NoneType' object has no attribute 'start'\r\n\r\n.../site-packages/awkward/_connect/numba/arrayview_cuda.py:21: AttributeError\r\n```\r\nHow should this latter case be correctly treated? Note that, without typing, the thing works as expected:\r\n```python\r\[email protected](extensions=[ak.numba.cuda])\r\ndef cuda_kernel_no_typing(arr):\r\n do_something_with_arr\r\n```\r\nHowever, I'm interested in `ak.Array`s with the 3D layout of integers (as above) and would like to take advantage of numba's eager compilation. I'm passing the `arr` for testing as\r\n```python\r\nbackend = 'cpu' # or 'cuda'\r\narr = ak.to_backend(\r\n ak.Array([\r\n [[4, 1], [2, -1]],\r\n [[0, -1], [1, 1], [3, -1]],\r\n [[4, 0]]\r\n ]),\r\n backend\r\n)\r\n```\r\nAny help is appreciated!\r\n\n", "before_files": [{"content": "# BSD 3-Clause License; see https://github.com/scikit-hep/awkward/blob/main/LICENSE\n\nfrom __future__ import annotations\n\nimport numba\nfrom numba.core.errors import NumbaTypeError\n\nimport awkward as ak\nfrom awkward._backends.cupy import CupyBackend\n\n########## ArrayView Arguments Handler for CUDA JIT\n\n\nclass ArrayViewArgHandler:\n def prepare_args(self, ty, val, stream, retr):\n if isinstance(val, ak.Array):\n if isinstance(val.layout.backend, CupyBackend):\n # Use uint64 for pos, start, stop, the array pointers values, and the pylookup value\n tys = numba.types.UniTuple(numba.types.uint64, 5)\n\n start = val._numbaview.start\n stop = val._numbaview.stop\n pos = val._numbaview.pos\n arrayptrs = val._numbaview.lookup.arrayptrs.data.ptr\n pylookup = 0\n\n return tys, (pos, start, stop, arrayptrs, pylookup)\n else:\n raise NumbaTypeError(\n '`ak.to_backend` should be called with `backend=\"cuda\"` to put '\n \"the array on the GPU before using it: \"\n 'ak.to_backend(array, backend=\"cuda\")'\n )\n\n else:\n return ty, val\n", "path": "src/awkward/_connect/numba/arrayview_cuda.py"}], "after_files": [{"content": "# BSD 3-Clause License; see https://github.com/scikit-hep/awkward/blob/main/LICENSE\n\nfrom __future__ import annotations\n\nimport numba\nfrom numba.core.errors import NumbaTypeError\n\nimport awkward as ak\nfrom awkward._backends.cupy import CupyBackend\n\n########## ArrayView Arguments Handler for CUDA JIT\n\n\nclass ArrayViewArgHandler:\n def prepare_args(self, ty, val, stream, retr):\n if isinstance(val, ak.Array):\n if isinstance(val.layout.backend, CupyBackend):\n if ty is not val.numba_type:\n raise NumbaTypeError(\n f\"the array type: {val.numba_type} does not match \"\n f\"the kernel signature type: {ty}\"\n )\n\n # Use uint64 for pos, start, stop, the array pointers values, and the pylookup value\n tys = numba.types.UniTuple(numba.types.uint64, 5)\n\n view = val._numbaview\n assert view is not None\n\n start = view.start\n stop = view.stop\n pos = view.pos\n arrayptrs = view.lookup.arrayptrs.data.ptr\n pylookup = 0\n\n return tys, (pos, start, stop, arrayptrs, pylookup)\n else:\n raise NumbaTypeError(\n '`ak.to_backend` should be called with `backend=\"cuda\"` to put '\n \"the array on the GPU before using it: \"\n 'ak.to_backend(array, backend=\"cuda\")'\n )\n\n else:\n return ty, val\n", "path": "src/awkward/_connect/numba/arrayview_cuda.py"}]}
| 1,362 | 342 |
gh_patches_debug_4491
|
rasdani/github-patches
|
git_diff
|
fossasia__open-event-server-4616
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Generated reset password link is correct but not clickable
**I'm submitting a ...** (check one with "x")
- [x] bug report
- [ ] feature request
- [ ] support request => Please do not submit support requests here, instead ask your query in out Gitter channel at https://gitter.im/fossasia/open-event-server
**Current behavior:**
<!-- Describe how the bug manifests. -->

The link appears as static text in the email.
**Expected behavior:**
<!-- Describe what the behavior would be without the bug. -->
The link should be clickable.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `app/api/helpers/system_mails.py`
Content:
```
1 """
2 All the System mails
3 Register a mail here before using it
4 """
5 from app.models.mail import INVITE_PAPERS, NEW_SESSION, USER_CONFIRM, \
6 USER_REGISTER, PASSWORD_RESET, EVENT_ROLE, SESSION_ACCEPT_REJECT, \
7 SESSION_SCHEDULE, NEXT_EVENT, EVENT_PUBLISH, AFTER_EVENT, USER_CHANGE_EMAIL, USER_REGISTER_WITH_PASSWORD, \
8 TICKET_PURCHASED, EVENT_EXPORTED, EVENT_EXPORT_FAIL, MAIL_TO_EXPIRED_ORDERS, MONTHLY_PAYMENT_EMAIL, \
9 MONTHLY_PAYMENT_FOLLOWUP_EMAIL, EVENT_IMPORTED, EVENT_IMPORT_FAIL, TICKET_PURCHASED_ORGANIZER, TICKET_CANCELLED, \
10 TICKET_PURCHASED_ATTENDEE, PASSWORD_CHANGE
11
12 MAILS = {
13 EVENT_PUBLISH: {
14 'recipient': 'Organizer, Speaker',
15 'subject': u'{event_name} is Live',
16 'message': (
17 u"Hi {email}<br/>" +
18 u"Event, {event_name}, is up and running and ready for action. Go ahead and check it out." +
19 u"<br/> Visit this link to view it: {link}"
20 )
21 },
22 INVITE_PAPERS: {
23 'recipient': 'Speaker',
24 'subject': u'Invitation to Submit Papers for {event_name}',
25 'message': (
26 u"Hi {email}<br/>" +
27 u"You are invited to submit papers for event: {event_name}" +
28 u"<br/> Visit this link to fill up details: {link}"
29 )
30 },
31 SESSION_ACCEPT_REJECT: {
32 'recipient': 'Speaker',
33 'subject': u'Session {session_name} has been {acceptance}',
34 'message': (
35 u"Hi {email},<br/>" +
36 u"The session <strong>{session_name}</strong> has been <strong>{acceptance}</strong> by the organizer. " +
37 u"<br/> Visit this link to view the session: {link}"
38 )
39 },
40 SESSION_SCHEDULE: {
41 'recipient': 'Organizer, Speaker',
42 'subject': u'Schedule for Session {session_name} has been changed',
43 'message': (
44 u"Hi {email},<br/>" +
45 u"The schedule for session <strong>{session_name}</strong> has been changed. " +
46 u"<br/> Visit this link to view the session: {link}"
47 )
48 },
49 NEXT_EVENT: {
50 'recipient': 'Organizer, Speaker',
51 'subject': u'Event {event_name} is coming soon',
52 'message': (
53 u"Hi {email},<br/>" +
54 u"Here are the upcoming events: {up_coming_events} .Get ready!! " +
55 u"<br/> Visit this link to view the event: {link}"
56 )
57 },
58 AFTER_EVENT: {
59 'recipient': 'Organizer, Speaker',
60 'subject': u'Event {event_name} is over',
61 'message': (
62 u"Hi {email},<br/>" +
63 u"Thank You for participating in our event. We hope you enjoyed it. "
64 u"Please check the list of more upcoming events" +
65 u"Here are the upcoming events: {upcoming_events} .Get ready!! "
66 ),
67 'sent_at': '1 day after the event'
68 },
69 NEW_SESSION: {
70 'recipient': 'Organizer',
71 'subject': u'New session proposal for {event_name}',
72 'message': (
73 u"Hi {email},<br/>" +
74 u"The event <strong>{event_name}</strong> has received a new session proposal. " +
75 u"<br/> Visit this link to view the session: {link}"
76 )
77 },
78 USER_REGISTER: {
79 'recipient': 'User',
80 'subject': u'Account Created on {app_name}',
81 'message': (
82 u"Your Account Has Been Created! Congratulations!" +
83 u"<br/> Your login: {email}"
84 )
85 },
86 USER_REGISTER_WITH_PASSWORD: {
87 'recipient': 'User',
88 'subject': u'Welcome to {app_name}',
89 'message': (
90 u"Your Account Has Been Created! Congratulations!" +
91 u"<br/> <strong>Your login:</strong><br><strong>Email:</strong> {email}<br>"
92 )
93 },
94 USER_CONFIRM: {
95 'recipient': 'User',
96 'subject': u'Email Confirmation to Create Account for Open-Event',
97 'message': (
98 u"Hi {email},<br/>" +
99 u"Please visit this link to confirm your email: {link}"
100 )
101 },
102 USER_CHANGE_EMAIL: {
103 'recipient': 'User',
104 'subject': u'Your email has been already changed',
105 'message': (
106 u"Hi {email},<br/>" +
107 u"Your email has been already changed from {email} to {new_email}. You should verify your new email"
108 )
109 },
110 PASSWORD_RESET: {
111 'recipient': 'User',
112 'subject': u'{app_name}: Password Reset',
113 'message': (
114 u"Please use the following link to reset your password.<br> {link}"
115 )
116 },
117 PASSWORD_CHANGE: {
118 'recipient': 'User',
119 'subject': u'{app_name}: Password Change',
120 'message': (
121 u"Your password has been successfully changed. Please login with your new password."
122 )
123 },
124 EVENT_ROLE: {
125 'recipient': 'User',
126 'subject': u'Invitation to be {role} at {event}',
127 'message': (
128 u"Hello {email},<br><br>" +
129 u"You've been invited to be a <strong>{role}</strong> at <strong>{event}</strong>.<br>" +
130 u"To accept the role please sign up using the following link: <a href='{link}' target='_blank'>Link</a>."
131 )
132 },
133 TICKET_PURCHASED: {
134 'recipient': 'User',
135 'subject': u'Your order invoice and tickets for {event_name} ({invoice_id}) ',
136 'message': (
137 u"Hi, this is a confirmation mail of your tickets for the event {event_name}"
138 u"<br/>Your order has been processed successfully." +
139 u"<br/> <a href='{pdf_url}'>Click here</a> to view/download your invoice."
140 u"<br><br><em>Looking forward to seeing you at the event."
141 u"<br/>Login to manage your orders at https://eventyay.com </em>"
142 )
143 },
144 TICKET_PURCHASED_ATTENDEE: {
145 'recipient': 'Attendee',
146 'subject': u'Your tickets for {event_name} ({invoice_id}) ',
147 'message': (
148 u"Hi, this is a confirmation mail of your tickets for the event {event_name}"
149 u"<br/>Your order has been processed successfully." +
150 u"<br/> <a href='{pdf_url}'>Click here</a> to view/download your ticket."
151 u"<br><br><em>Looking forward to seeing you at the event."
152 )
153 },
154
155 TICKET_PURCHASED_ORGANIZER: {
156 'recipient': 'Organizer, Coorganizer',
157 'subject': u'New ticket purchase for {event_name} by {buyer_email} ({invoice_id}) ',
158 'message': (
159 u"Hi, {buyer_email} just bought tickets for the event {event_name}"
160 u"<br/>The order has been processed successfully." +
161 u"<br/> <a href='{order_url}'>Click here</a> to view/download the invoice."
162 u"<br/>Login to manage the orders at https://eventyay.com </em>"
163 )
164 },
165 TICKET_CANCELLED: {
166 'recipient': 'User',
167 'subject': u'Your order for {event_name} has been cancelled ({invoice_id})',
168 'message': (
169 u"Hi,Your order for {event_name} has been cancelled has been cancelled by the organizer"
170 u"<br/>Please contact the organizer for more info" +
171 u"<br/>Message from the organizer: {cancel_note}"
172 u"<br/> <a href='{order_url}'>Click here</a> to view/download the invoice."
173 u"<br/>Login to manage the orders at https://eventyay.com </em>"
174 )
175 },
176 EVENT_EXPORTED: {
177 'recipient': 'User',
178 'subject': u'Event {event_name} has been exported',
179 'message': (
180 u"Click on the following link to download the event." +
181 u"<br> <a href='{download_url}'>Download</a>"
182 )
183 },
184 EVENT_EXPORT_FAIL: {
185 'recipient': 'User',
186 'subject': u'Export of event {event_name} failed',
187 'message': (
188 u"The error was as follows - <br>" +
189 u"<pre>{error_text}</pre>"
190 )
191 },
192 MAIL_TO_EXPIRED_ORDERS: {
193 'recipient': 'User',
194 'subject': u'Tickets for {event_name} are still available ',
195 'message': (
196 u"This is just a gentle reminder that the payment for your order {invoice_id} is still left." +
197 u"<br/> The tickets for this event are still available. <a href='{order_url}'>Click here</a> to "
198 u"purchase your ticket for this event."
199 u"<br><br><em>Looking forward to seeing you at the event.</em>"
200 )
201 },
202 MONTHLY_PAYMENT_EMAIL: {
203 'recipient': 'Organizer',
204 'subject': u'{date} - Monthly service fee invoice for {event_name}',
205 'message': (
206 u"The total service fee for the ticket sales of {event_name} in the month of {date} is {amount}." +
207 u"<br/> That payment for the same has to be made in two weeks. <a href='{payment_url}'>Click here</a> to "
208 u"view your invoice and complete the payment."
209 u"<br><br><em>Thank you for using {app_name}.</em>"
210 ),
211 'sent_at': '1st day of the month'
212 },
213 MONTHLY_PAYMENT_FOLLOWUP_EMAIL: {
214 'recipient': 'Organizer',
215 'subject': u'Past Due: {date} - Monthly service fee invoice for {event_name}',
216 'message': (
217 u"The total service fee for the ticket sales of {event_name} in the month of {date} is {amount}." +
218 u"<br/> That payment for the same is past the due date. <a href='{payment_url}'>Click here</a> to "
219 u"view your invoice and complete the payment to prevent loss of functionality."
220 u"<br><br><em>Thank you for using {app_name}.</em>"
221 ),
222 'sent_at': '15th day of the month'
223 },
224 EVENT_IMPORTED: {
225 'recipient': 'User',
226 'subject': u'Event {event_name} has been imported',
227 'message': (
228 u"Click on the following link to manage your event" +
229 u"<br> <a href='{event_url}'>Link</a>"
230 )
231 },
232 EVENT_IMPORT_FAIL: {
233 'recipient': 'User',
234 'subject': u'Import of event failed',
235 'message': (
236 u"The error was as follows - <br>" +
237 u"<pre>{error_text}</pre>"
238 )
239 }
240 }
241
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/app/api/helpers/system_mails.py b/app/api/helpers/system_mails.py
--- a/app/api/helpers/system_mails.py
+++ b/app/api/helpers/system_mails.py
@@ -111,7 +111,7 @@
'recipient': 'User',
'subject': u'{app_name}: Password Reset',
'message': (
- u"Please use the following link to reset your password.<br> {link}"
+ u"Please use the following link to reset your password.<br> <a href='{link}' target='_blank'>{link}</a>"
)
},
PASSWORD_CHANGE: {
|
{"golden_diff": "diff --git a/app/api/helpers/system_mails.py b/app/api/helpers/system_mails.py\n--- a/app/api/helpers/system_mails.py\n+++ b/app/api/helpers/system_mails.py\n@@ -111,7 +111,7 @@\n 'recipient': 'User',\n 'subject': u'{app_name}: Password Reset',\n 'message': (\n- u\"Please use the following link to reset your password.<br> {link}\"\n+ u\"Please use the following link to reset your password.<br> <a href='{link}' target='_blank'>{link}</a>\"\n )\n },\n PASSWORD_CHANGE: {\n", "issue": "Generated reset password link is correct but not clickable\n**I'm submitting a ...** (check one with \"x\")\r\n- [x] bug report\r\n- [ ] feature request\r\n- [ ] support request => Please do not submit support requests here, instead ask your query in out Gitter channel at https://gitter.im/fossasia/open-event-server\r\n\r\n**Current behavior:**\r\n<!-- Describe how the bug manifests. -->\r\n\r\nThe link appears as static text in the email.\r\n**Expected behavior:**\r\n<!-- Describe what the behavior would be without the bug. -->\r\nThe link should be clickable.\r\n\n", "before_files": [{"content": "\"\"\"\nAll the System mails\nRegister a mail here before using it\n\"\"\"\nfrom app.models.mail import INVITE_PAPERS, NEW_SESSION, USER_CONFIRM, \\\n USER_REGISTER, PASSWORD_RESET, EVENT_ROLE, SESSION_ACCEPT_REJECT, \\\n SESSION_SCHEDULE, NEXT_EVENT, EVENT_PUBLISH, AFTER_EVENT, USER_CHANGE_EMAIL, USER_REGISTER_WITH_PASSWORD, \\\n TICKET_PURCHASED, EVENT_EXPORTED, EVENT_EXPORT_FAIL, MAIL_TO_EXPIRED_ORDERS, MONTHLY_PAYMENT_EMAIL, \\\n MONTHLY_PAYMENT_FOLLOWUP_EMAIL, EVENT_IMPORTED, EVENT_IMPORT_FAIL, TICKET_PURCHASED_ORGANIZER, TICKET_CANCELLED, \\\n TICKET_PURCHASED_ATTENDEE, PASSWORD_CHANGE\n\nMAILS = {\n EVENT_PUBLISH: {\n 'recipient': 'Organizer, Speaker',\n 'subject': u'{event_name} is Live',\n 'message': (\n u\"Hi {email}<br/>\" +\n u\"Event, {event_name}, is up and running and ready for action. Go ahead and check it out.\" +\n u\"<br/> Visit this link to view it: {link}\"\n )\n },\n INVITE_PAPERS: {\n 'recipient': 'Speaker',\n 'subject': u'Invitation to Submit Papers for {event_name}',\n 'message': (\n u\"Hi {email}<br/>\" +\n u\"You are invited to submit papers for event: {event_name}\" +\n u\"<br/> Visit this link to fill up details: {link}\"\n )\n },\n SESSION_ACCEPT_REJECT: {\n 'recipient': 'Speaker',\n 'subject': u'Session {session_name} has been {acceptance}',\n 'message': (\n u\"Hi {email},<br/>\" +\n u\"The session <strong>{session_name}</strong> has been <strong>{acceptance}</strong> by the organizer. \" +\n u\"<br/> Visit this link to view the session: {link}\"\n )\n },\n SESSION_SCHEDULE: {\n 'recipient': 'Organizer, Speaker',\n 'subject': u'Schedule for Session {session_name} has been changed',\n 'message': (\n u\"Hi {email},<br/>\" +\n u\"The schedule for session <strong>{session_name}</strong> has been changed. \" +\n u\"<br/> Visit this link to view the session: {link}\"\n )\n },\n NEXT_EVENT: {\n 'recipient': 'Organizer, Speaker',\n 'subject': u'Event {event_name} is coming soon',\n 'message': (\n u\"Hi {email},<br/>\" +\n u\"Here are the upcoming events: {up_coming_events} .Get ready!! \" +\n u\"<br/> Visit this link to view the event: {link}\"\n )\n },\n AFTER_EVENT: {\n 'recipient': 'Organizer, Speaker',\n 'subject': u'Event {event_name} is over',\n 'message': (\n u\"Hi {email},<br/>\" +\n u\"Thank You for participating in our event. We hope you enjoyed it. \"\n u\"Please check the list of more upcoming events\" +\n u\"Here are the upcoming events: {upcoming_events} .Get ready!! \"\n ),\n 'sent_at': '1 day after the event'\n },\n NEW_SESSION: {\n 'recipient': 'Organizer',\n 'subject': u'New session proposal for {event_name}',\n 'message': (\n u\"Hi {email},<br/>\" +\n u\"The event <strong>{event_name}</strong> has received a new session proposal. \" +\n u\"<br/> Visit this link to view the session: {link}\"\n )\n },\n USER_REGISTER: {\n 'recipient': 'User',\n 'subject': u'Account Created on {app_name}',\n 'message': (\n u\"Your Account Has Been Created! Congratulations!\" +\n u\"<br/> Your login: {email}\"\n )\n },\n USER_REGISTER_WITH_PASSWORD: {\n 'recipient': 'User',\n 'subject': u'Welcome to {app_name}',\n 'message': (\n u\"Your Account Has Been Created! Congratulations!\" +\n u\"<br/> <strong>Your login:</strong><br><strong>Email:</strong> {email}<br>\"\n )\n },\n USER_CONFIRM: {\n 'recipient': 'User',\n 'subject': u'Email Confirmation to Create Account for Open-Event',\n 'message': (\n u\"Hi {email},<br/>\" +\n u\"Please visit this link to confirm your email: {link}\"\n )\n },\n USER_CHANGE_EMAIL: {\n 'recipient': 'User',\n 'subject': u'Your email has been already changed',\n 'message': (\n u\"Hi {email},<br/>\" +\n u\"Your email has been already changed from {email} to {new_email}. You should verify your new email\"\n )\n },\n PASSWORD_RESET: {\n 'recipient': 'User',\n 'subject': u'{app_name}: Password Reset',\n 'message': (\n u\"Please use the following link to reset your password.<br> {link}\"\n )\n },\n PASSWORD_CHANGE: {\n 'recipient': 'User',\n 'subject': u'{app_name}: Password Change',\n 'message': (\n u\"Your password has been successfully changed. Please login with your new password.\"\n )\n },\n EVENT_ROLE: {\n 'recipient': 'User',\n 'subject': u'Invitation to be {role} at {event}',\n 'message': (\n u\"Hello {email},<br><br>\" +\n u\"You've been invited to be a <strong>{role}</strong> at <strong>{event}</strong>.<br>\" +\n u\"To accept the role please sign up using the following link: <a href='{link}' target='_blank'>Link</a>.\"\n )\n },\n TICKET_PURCHASED: {\n 'recipient': 'User',\n 'subject': u'Your order invoice and tickets for {event_name} ({invoice_id}) ',\n 'message': (\n u\"Hi, this is a confirmation mail of your tickets for the event {event_name}\"\n u\"<br/>Your order has been processed successfully.\" +\n u\"<br/> <a href='{pdf_url}'>Click here</a> to view/download your invoice.\"\n u\"<br><br><em>Looking forward to seeing you at the event.\"\n u\"<br/>Login to manage your orders at https://eventyay.com </em>\"\n )\n },\n TICKET_PURCHASED_ATTENDEE: {\n 'recipient': 'Attendee',\n 'subject': u'Your tickets for {event_name} ({invoice_id}) ',\n 'message': (\n u\"Hi, this is a confirmation mail of your tickets for the event {event_name}\"\n u\"<br/>Your order has been processed successfully.\" +\n u\"<br/> <a href='{pdf_url}'>Click here</a> to view/download your ticket.\"\n u\"<br><br><em>Looking forward to seeing you at the event.\"\n )\n },\n\n TICKET_PURCHASED_ORGANIZER: {\n 'recipient': 'Organizer, Coorganizer',\n 'subject': u'New ticket purchase for {event_name} by {buyer_email} ({invoice_id}) ',\n 'message': (\n u\"Hi, {buyer_email} just bought tickets for the event {event_name}\"\n u\"<br/>The order has been processed successfully.\" +\n u\"<br/> <a href='{order_url}'>Click here</a> to view/download the invoice.\"\n u\"<br/>Login to manage the orders at https://eventyay.com </em>\"\n )\n },\n TICKET_CANCELLED: {\n 'recipient': 'User',\n 'subject': u'Your order for {event_name} has been cancelled ({invoice_id})',\n 'message': (\n u\"Hi,Your order for {event_name} has been cancelled has been cancelled by the organizer\"\n u\"<br/>Please contact the organizer for more info\" +\n u\"<br/>Message from the organizer: {cancel_note}\"\n u\"<br/> <a href='{order_url}'>Click here</a> to view/download the invoice.\"\n u\"<br/>Login to manage the orders at https://eventyay.com </em>\"\n )\n },\n EVENT_EXPORTED: {\n 'recipient': 'User',\n 'subject': u'Event {event_name} has been exported',\n 'message': (\n u\"Click on the following link to download the event.\" +\n u\"<br> <a href='{download_url}'>Download</a>\"\n )\n },\n EVENT_EXPORT_FAIL: {\n 'recipient': 'User',\n 'subject': u'Export of event {event_name} failed',\n 'message': (\n u\"The error was as follows - <br>\" +\n u\"<pre>{error_text}</pre>\"\n )\n },\n MAIL_TO_EXPIRED_ORDERS: {\n 'recipient': 'User',\n 'subject': u'Tickets for {event_name} are still available ',\n 'message': (\n u\"This is just a gentle reminder that the payment for your order {invoice_id} is still left.\" +\n u\"<br/> The tickets for this event are still available. <a href='{order_url}'>Click here</a> to \"\n u\"purchase your ticket for this event.\"\n u\"<br><br><em>Looking forward to seeing you at the event.</em>\"\n )\n },\n MONTHLY_PAYMENT_EMAIL: {\n 'recipient': 'Organizer',\n 'subject': u'{date} - Monthly service fee invoice for {event_name}',\n 'message': (\n u\"The total service fee for the ticket sales of {event_name} in the month of {date} is {amount}.\" +\n u\"<br/> That payment for the same has to be made in two weeks. <a href='{payment_url}'>Click here</a> to \"\n u\"view your invoice and complete the payment.\"\n u\"<br><br><em>Thank you for using {app_name}.</em>\"\n ),\n 'sent_at': '1st day of the month'\n },\n MONTHLY_PAYMENT_FOLLOWUP_EMAIL: {\n 'recipient': 'Organizer',\n 'subject': u'Past Due: {date} - Monthly service fee invoice for {event_name}',\n 'message': (\n u\"The total service fee for the ticket sales of {event_name} in the month of {date} is {amount}.\" +\n u\"<br/> That payment for the same is past the due date. <a href='{payment_url}'>Click here</a> to \"\n u\"view your invoice and complete the payment to prevent loss of functionality.\"\n u\"<br><br><em>Thank you for using {app_name}.</em>\"\n ),\n 'sent_at': '15th day of the month'\n },\n EVENT_IMPORTED: {\n 'recipient': 'User',\n 'subject': u'Event {event_name} has been imported',\n 'message': (\n u\"Click on the following link to manage your event\" +\n u\"<br> <a href='{event_url}'>Link</a>\"\n )\n },\n EVENT_IMPORT_FAIL: {\n 'recipient': 'User',\n 'subject': u'Import of event failed',\n 'message': (\n u\"The error was as follows - <br>\" +\n u\"<pre>{error_text}</pre>\"\n )\n }\n}\n", "path": "app/api/helpers/system_mails.py"}], "after_files": [{"content": "\"\"\"\nAll the System mails\nRegister a mail here before using it\n\"\"\"\nfrom app.models.mail import INVITE_PAPERS, NEW_SESSION, USER_CONFIRM, \\\n USER_REGISTER, PASSWORD_RESET, EVENT_ROLE, SESSION_ACCEPT_REJECT, \\\n SESSION_SCHEDULE, NEXT_EVENT, EVENT_PUBLISH, AFTER_EVENT, USER_CHANGE_EMAIL, USER_REGISTER_WITH_PASSWORD, \\\n TICKET_PURCHASED, EVENT_EXPORTED, EVENT_EXPORT_FAIL, MAIL_TO_EXPIRED_ORDERS, MONTHLY_PAYMENT_EMAIL, \\\n MONTHLY_PAYMENT_FOLLOWUP_EMAIL, EVENT_IMPORTED, EVENT_IMPORT_FAIL, TICKET_PURCHASED_ORGANIZER, TICKET_CANCELLED, \\\n TICKET_PURCHASED_ATTENDEE, PASSWORD_CHANGE\n\nMAILS = {\n EVENT_PUBLISH: {\n 'recipient': 'Organizer, Speaker',\n 'subject': u'{event_name} is Live',\n 'message': (\n u\"Hi {email}<br/>\" +\n u\"Event, {event_name}, is up and running and ready for action. Go ahead and check it out.\" +\n u\"<br/> Visit this link to view it: {link}\"\n )\n },\n INVITE_PAPERS: {\n 'recipient': 'Speaker',\n 'subject': u'Invitation to Submit Papers for {event_name}',\n 'message': (\n u\"Hi {email}<br/>\" +\n u\"You are invited to submit papers for event: {event_name}\" +\n u\"<br/> Visit this link to fill up details: {link}\"\n )\n },\n SESSION_ACCEPT_REJECT: {\n 'recipient': 'Speaker',\n 'subject': u'Session {session_name} has been {acceptance}',\n 'message': (\n u\"Hi {email},<br/>\" +\n u\"The session <strong>{session_name}</strong> has been <strong>{acceptance}</strong> by the organizer. \" +\n u\"<br/> Visit this link to view the session: {link}\"\n )\n },\n SESSION_SCHEDULE: {\n 'recipient': 'Organizer, Speaker',\n 'subject': u'Schedule for Session {session_name} has been changed',\n 'message': (\n u\"Hi {email},<br/>\" +\n u\"The schedule for session <strong>{session_name}</strong> has been changed. \" +\n u\"<br/> Visit this link to view the session: {link}\"\n )\n },\n NEXT_EVENT: {\n 'recipient': 'Organizer, Speaker',\n 'subject': u'Event {event_name} is coming soon',\n 'message': (\n u\"Hi {email},<br/>\" +\n u\"Here are the upcoming events: {up_coming_events} .Get ready!! \" +\n u\"<br/> Visit this link to view the event: {link}\"\n )\n },\n AFTER_EVENT: {\n 'recipient': 'Organizer, Speaker',\n 'subject': u'Event {event_name} is over',\n 'message': (\n u\"Hi {email},<br/>\" +\n u\"Thank You for participating in our event. We hope you enjoyed it. \"\n u\"Please check the list of more upcoming events\" +\n u\"Here are the upcoming events: {upcoming_events} .Get ready!! \"\n ),\n 'sent_at': '1 day after the event'\n },\n NEW_SESSION: {\n 'recipient': 'Organizer',\n 'subject': u'New session proposal for {event_name}',\n 'message': (\n u\"Hi {email},<br/>\" +\n u\"The event <strong>{event_name}</strong> has received a new session proposal. \" +\n u\"<br/> Visit this link to view the session: {link}\"\n )\n },\n USER_REGISTER: {\n 'recipient': 'User',\n 'subject': u'Account Created on {app_name}',\n 'message': (\n u\"Your Account Has Been Created! Congratulations!\" +\n u\"<br/> Your login: {email}\"\n )\n },\n USER_REGISTER_WITH_PASSWORD: {\n 'recipient': 'User',\n 'subject': u'Welcome to {app_name}',\n 'message': (\n u\"Your Account Has Been Created! Congratulations!\" +\n u\"<br/> <strong>Your login:</strong><br><strong>Email:</strong> {email}<br>\"\n )\n },\n USER_CONFIRM: {\n 'recipient': 'User',\n 'subject': u'Email Confirmation to Create Account for Open-Event',\n 'message': (\n u\"Hi {email},<br/>\" +\n u\"Please visit this link to confirm your email: {link}\"\n )\n },\n USER_CHANGE_EMAIL: {\n 'recipient': 'User',\n 'subject': u'Your email has been already changed',\n 'message': (\n u\"Hi {email},<br/>\" +\n u\"Your email has been already changed from {email} to {new_email}. You should verify your new email\"\n )\n },\n PASSWORD_RESET: {\n 'recipient': 'User',\n 'subject': u'{app_name}: Password Reset',\n 'message': (\n u\"Please use the following link to reset your password.<br> <a href='{link}' target='_blank'>{link}</a>\"\n )\n },\n PASSWORD_CHANGE: {\n 'recipient': 'User',\n 'subject': u'{app_name}: Password Change',\n 'message': (\n u\"Your password has been successfully changed. Please login with your new password.\"\n )\n },\n EVENT_ROLE: {\n 'recipient': 'User',\n 'subject': u'Invitation to be {role} at {event}',\n 'message': (\n u\"Hello {email},<br><br>\" +\n u\"You've been invited to be a <strong>{role}</strong> at <strong>{event}</strong>.<br>\" +\n u\"To accept the role please sign up using the following link: <a href='{link}' target='_blank'>Link</a>.\"\n )\n },\n TICKET_PURCHASED: {\n 'recipient': 'User',\n 'subject': u'Your order invoice and tickets for {event_name} ({invoice_id}) ',\n 'message': (\n u\"Hi, this is a confirmation mail of your tickets for the event {event_name}\"\n u\"<br/>Your order has been processed successfully.\" +\n u\"<br/> <a href='{pdf_url}'>Click here</a> to view/download your invoice.\"\n u\"<br><br><em>Looking forward to seeing you at the event.\"\n u\"<br/>Login to manage your orders at https://eventyay.com </em>\"\n )\n },\n TICKET_PURCHASED_ATTENDEE: {\n 'recipient': 'Attendee',\n 'subject': u'Your tickets for {event_name} ({invoice_id}) ',\n 'message': (\n u\"Hi, this is a confirmation mail of your tickets for the event {event_name}\"\n u\"<br/>Your order has been processed successfully.\" +\n u\"<br/> <a href='{pdf_url}'>Click here</a> to view/download your ticket.\"\n u\"<br><br><em>Looking forward to seeing you at the event.\"\n )\n },\n\n TICKET_PURCHASED_ORGANIZER: {\n 'recipient': 'Organizer, Coorganizer',\n 'subject': u'New ticket purchase for {event_name} by {buyer_email} ({invoice_id}) ',\n 'message': (\n u\"Hi, {buyer_email} just bought tickets for the event {event_name}\"\n u\"<br/>The order has been processed successfully.\" +\n u\"<br/> <a href='{order_url}'>Click here</a> to view/download the invoice.\"\n u\"<br/>Login to manage the orders at https://eventyay.com </em>\"\n )\n },\n TICKET_CANCELLED: {\n 'recipient': 'User',\n 'subject': u'Your order for {event_name} has been cancelled ({invoice_id})',\n 'message': (\n u\"Hi,Your order for {event_name} has been cancelled has been cancelled by the organizer\"\n u\"<br/>Please contact the organizer for more info\" +\n u\"<br/>Message from the organizer: {cancel_note}\"\n u\"<br/> <a href='{order_url}'>Click here</a> to view/download the invoice.\"\n u\"<br/>Login to manage the orders at https://eventyay.com </em>\"\n )\n },\n EVENT_EXPORTED: {\n 'recipient': 'User',\n 'subject': u'Event {event_name} has been exported',\n 'message': (\n u\"Click on the following link to download the event.\" +\n u\"<br> <a href='{download_url}'>Download</a>\"\n )\n },\n EVENT_EXPORT_FAIL: {\n 'recipient': 'User',\n 'subject': u'Export of event {event_name} failed',\n 'message': (\n u\"The error was as follows - <br>\" +\n u\"<pre>{error_text}</pre>\"\n )\n },\n MAIL_TO_EXPIRED_ORDERS: {\n 'recipient': 'User',\n 'subject': u'Tickets for {event_name} are still available ',\n 'message': (\n u\"This is just a gentle reminder that the payment for your order {invoice_id} is still left.\" +\n u\"<br/> The tickets for this event are still available. <a href='{order_url}'>Click here</a> to \"\n u\"purchase your ticket for this event.\"\n u\"<br><br><em>Looking forward to seeing you at the event.</em>\"\n )\n },\n MONTHLY_PAYMENT_EMAIL: {\n 'recipient': 'Organizer',\n 'subject': u'{date} - Monthly service fee invoice for {event_name}',\n 'message': (\n u\"The total service fee for the ticket sales of {event_name} in the month of {date} is {amount}.\" +\n u\"<br/> That payment for the same has to be made in two weeks. <a href='{payment_url}'>Click here</a> to \"\n u\"view your invoice and complete the payment.\"\n u\"<br><br><em>Thank you for using {app_name}.</em>\"\n ),\n 'sent_at': '1st day of the month'\n },\n MONTHLY_PAYMENT_FOLLOWUP_EMAIL: {\n 'recipient': 'Organizer',\n 'subject': u'Past Due: {date} - Monthly service fee invoice for {event_name}',\n 'message': (\n u\"The total service fee for the ticket sales of {event_name} in the month of {date} is {amount}.\" +\n u\"<br/> That payment for the same is past the due date. <a href='{payment_url}'>Click here</a> to \"\n u\"view your invoice and complete the payment to prevent loss of functionality.\"\n u\"<br><br><em>Thank you for using {app_name}.</em>\"\n ),\n 'sent_at': '15th day of the month'\n },\n EVENT_IMPORTED: {\n 'recipient': 'User',\n 'subject': u'Event {event_name} has been imported',\n 'message': (\n u\"Click on the following link to manage your event\" +\n u\"<br> <a href='{event_url}'>Link</a>\"\n )\n },\n EVENT_IMPORT_FAIL: {\n 'recipient': 'User',\n 'subject': u'Import of event failed',\n 'message': (\n u\"The error was as follows - <br>\" +\n u\"<pre>{error_text}</pre>\"\n )\n }\n}\n", "path": "app/api/helpers/system_mails.py"}]}
| 3,534 | 136 |
gh_patches_debug_21885
|
rasdani/github-patches
|
git_diff
|
numba__numba-3578
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
simulate bug func_or_sig vs fn_or_sig named parameter
There seems to be a difference in the named parameter func_or_sig/fn_or_sig between the cuda.jit() in the simulator vs gpu code.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `numba/cuda/simulator/api.py`
Content:
```
1 '''
2 Contains CUDA API functions
3 '''
4 from __future__ import absolute_import
5
6 from contextlib import contextmanager
7 from .cudadrv.devices import require_context, reset, gpus
8 from .kernel import FakeCUDAKernel
9 from numba.typing import Signature
10 from warnings import warn
11 from ..args import In, Out, InOut
12
13
14 def select_device(dev=0):
15 assert dev == 0, 'Only a single device supported by the simulator'
16
17
18 class stream(object):
19 '''
20 The stream API is supported in the simulator - however, all execution
21 occurs synchronously, so synchronization requires no operation.
22 '''
23 @contextmanager
24 def auto_synchronize(self):
25 yield
26
27 def synchronize(self):
28 pass
29
30
31 def synchronize():
32 pass
33
34 def close():
35 gpus.closed = True
36
37
38 def declare_device(*args, **kwargs):
39 pass
40
41
42 def detect():
43 print('Found 1 CUDA devices')
44 print('id %d %20s %40s' % (0, 'SIMULATOR', '[SUPPORTED]'))
45 print('%40s: 5.2' % 'compute capability')
46
47
48 def list_devices():
49 return gpus
50
51
52 # Events
53
54 class Event(object):
55 '''
56 The simulator supports the event API, but they do not record timing info,
57 and all simulation is synchronous. Execution time is not recorded.
58 '''
59 def record(self, stream=0):
60 pass
61
62 def wait(self, stream=0):
63 pass
64
65 def synchronize(self):
66 pass
67
68 def elapsed_time(self, event):
69 warn('Simulator timings are bogus')
70 return 0.0
71
72 event = Event
73
74
75 def jit(fn_or_sig=None, device=False, debug=False, argtypes=None, inline=False, restype=None,
76 fastmath=False, link=None):
77 if link is not None:
78 raise NotImplementedError('Cannot link PTX in the simulator')
79 # Check for first argument specifying types - in that case the
80 # decorator is not being passed a function
81 if fn_or_sig is None or isinstance(fn_or_sig, (str, tuple, Signature)):
82 def jitwrapper(fn):
83 return FakeCUDAKernel(fn,
84 device=device,
85 fastmath=fastmath)
86 return jitwrapper
87 return FakeCUDAKernel(fn_or_sig, device=device)
88
89 autojit = jit
90
91
92 @contextmanager
93 def defer_cleanup():
94 # No effect for simulator
95 yield
96
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/numba/cuda/simulator/api.py b/numba/cuda/simulator/api.py
--- a/numba/cuda/simulator/api.py
+++ b/numba/cuda/simulator/api.py
@@ -72,19 +72,19 @@
event = Event
-def jit(fn_or_sig=None, device=False, debug=False, argtypes=None, inline=False, restype=None,
- fastmath=False, link=None):
+def jit(func_or_sig=None, device=False, debug=False, argtypes=None,
+ inline=False, restype=None, fastmath=False, link=None):
if link is not None:
raise NotImplementedError('Cannot link PTX in the simulator')
# Check for first argument specifying types - in that case the
# decorator is not being passed a function
- if fn_or_sig is None or isinstance(fn_or_sig, (str, tuple, Signature)):
+ if func_or_sig is None or isinstance(func_or_sig, (str, tuple, Signature)):
def jitwrapper(fn):
return FakeCUDAKernel(fn,
device=device,
fastmath=fastmath)
return jitwrapper
- return FakeCUDAKernel(fn_or_sig, device=device)
+ return FakeCUDAKernel(func_or_sig, device=device)
autojit = jit
|
{"golden_diff": "diff --git a/numba/cuda/simulator/api.py b/numba/cuda/simulator/api.py\n--- a/numba/cuda/simulator/api.py\n+++ b/numba/cuda/simulator/api.py\n@@ -72,19 +72,19 @@\n event = Event\n \n \n-def jit(fn_or_sig=None, device=False, debug=False, argtypes=None, inline=False, restype=None,\n- fastmath=False, link=None):\n+def jit(func_or_sig=None, device=False, debug=False, argtypes=None,\n+ inline=False, restype=None, fastmath=False, link=None):\n if link is not None:\n raise NotImplementedError('Cannot link PTX in the simulator')\n # Check for first argument specifying types - in that case the\n # decorator is not being passed a function\n- if fn_or_sig is None or isinstance(fn_or_sig, (str, tuple, Signature)):\n+ if func_or_sig is None or isinstance(func_or_sig, (str, tuple, Signature)):\n def jitwrapper(fn):\n return FakeCUDAKernel(fn,\n device=device,\n fastmath=fastmath)\n return jitwrapper\n- return FakeCUDAKernel(fn_or_sig, device=device)\n+ return FakeCUDAKernel(func_or_sig, device=device)\n \n autojit = jit\n", "issue": "simulate bug func_or_sig vs fn_or_sig named parameter\nThere seems to be a difference in the named parameter func_or_sig/fn_or_sig between the cuda.jit() in the simulator vs gpu code. \n", "before_files": [{"content": "'''\nContains CUDA API functions\n'''\nfrom __future__ import absolute_import\n\nfrom contextlib import contextmanager\nfrom .cudadrv.devices import require_context, reset, gpus\nfrom .kernel import FakeCUDAKernel\nfrom numba.typing import Signature\nfrom warnings import warn\nfrom ..args import In, Out, InOut\n\n\ndef select_device(dev=0):\n assert dev == 0, 'Only a single device supported by the simulator'\n\n\nclass stream(object):\n '''\n The stream API is supported in the simulator - however, all execution\n occurs synchronously, so synchronization requires no operation.\n '''\n @contextmanager\n def auto_synchronize(self):\n yield\n\n def synchronize(self):\n pass\n\n\ndef synchronize():\n pass\n\ndef close():\n gpus.closed = True\n\n\ndef declare_device(*args, **kwargs):\n pass\n\n\ndef detect():\n print('Found 1 CUDA devices')\n print('id %d %20s %40s' % (0, 'SIMULATOR', '[SUPPORTED]'))\n print('%40s: 5.2' % 'compute capability')\n\n\ndef list_devices():\n return gpus\n\n\n# Events\n\nclass Event(object):\n '''\n The simulator supports the event API, but they do not record timing info,\n and all simulation is synchronous. Execution time is not recorded.\n '''\n def record(self, stream=0):\n pass\n\n def wait(self, stream=0):\n pass\n\n def synchronize(self):\n pass\n\n def elapsed_time(self, event):\n warn('Simulator timings are bogus')\n return 0.0\n\nevent = Event\n\n\ndef jit(fn_or_sig=None, device=False, debug=False, argtypes=None, inline=False, restype=None,\n fastmath=False, link=None):\n if link is not None:\n raise NotImplementedError('Cannot link PTX in the simulator')\n # Check for first argument specifying types - in that case the\n # decorator is not being passed a function\n if fn_or_sig is None or isinstance(fn_or_sig, (str, tuple, Signature)):\n def jitwrapper(fn):\n return FakeCUDAKernel(fn,\n device=device,\n fastmath=fastmath)\n return jitwrapper\n return FakeCUDAKernel(fn_or_sig, device=device)\n\nautojit = jit\n\n\n@contextmanager\ndef defer_cleanup():\n # No effect for simulator\n yield\n", "path": "numba/cuda/simulator/api.py"}], "after_files": [{"content": "'''\nContains CUDA API functions\n'''\nfrom __future__ import absolute_import\n\nfrom contextlib import contextmanager\nfrom .cudadrv.devices import require_context, reset, gpus\nfrom .kernel import FakeCUDAKernel\nfrom numba.typing import Signature\nfrom warnings import warn\nfrom ..args import In, Out, InOut\n\n\ndef select_device(dev=0):\n assert dev == 0, 'Only a single device supported by the simulator'\n\n\nclass stream(object):\n '''\n The stream API is supported in the simulator - however, all execution\n occurs synchronously, so synchronization requires no operation.\n '''\n @contextmanager\n def auto_synchronize(self):\n yield\n\n def synchronize(self):\n pass\n\n\ndef synchronize():\n pass\n\ndef close():\n gpus.closed = True\n\n\ndef declare_device(*args, **kwargs):\n pass\n\n\ndef detect():\n print('Found 1 CUDA devices')\n print('id %d %20s %40s' % (0, 'SIMULATOR', '[SUPPORTED]'))\n print('%40s: 5.2' % 'compute capability')\n\n\ndef list_devices():\n return gpus\n\n\n# Events\n\nclass Event(object):\n '''\n The simulator supports the event API, but they do not record timing info,\n and all simulation is synchronous. Execution time is not recorded.\n '''\n def record(self, stream=0):\n pass\n\n def wait(self, stream=0):\n pass\n\n def synchronize(self):\n pass\n\n def elapsed_time(self, event):\n warn('Simulator timings are bogus')\n return 0.0\n\nevent = Event\n\n\ndef jit(func_or_sig=None, device=False, debug=False, argtypes=None,\n inline=False, restype=None, fastmath=False, link=None):\n if link is not None:\n raise NotImplementedError('Cannot link PTX in the simulator')\n # Check for first argument specifying types - in that case the\n # decorator is not being passed a function\n if func_or_sig is None or isinstance(func_or_sig, (str, tuple, Signature)):\n def jitwrapper(fn):\n return FakeCUDAKernel(fn,\n device=device,\n fastmath=fastmath)\n return jitwrapper\n return FakeCUDAKernel(func_or_sig, device=device)\n\nautojit = jit\n\n\n@contextmanager\ndef defer_cleanup():\n # No effect for simulator\n yield\n", "path": "numba/cuda/simulator/api.py"}]}
| 1,025 | 286 |
gh_patches_debug_31503
|
rasdani/github-patches
|
git_diff
|
translate__pootle-3631
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add default project descriptions
In order to get `initdb` running on `core` we had to drop the project [descriptions] since these move to staticpages on core. This bug is to add those back.
To achieve that initdb should create static pages for these default projects that contain the content previously held in the [descriptions](https://github.com/translate/pootle/blob/96edf539/pootle/core/initdb.py#L300-L306).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pootle/core/initdb.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 #
4 # Copyright 2013 Zuza Software Foundation
5 # Copyright 2014-2015 Evernote Corporation
6 #
7 # This file is part of Pootle.
8 #
9 # Pootle is free software; you can redistribute it and/or modify it under the
10 # terms of the GNU General Public License as published by the Free Software
11 # Foundation; either version 2 of the License, or (at your option) any later
12 # version.
13 #
14 # Pootle is distributed in the hope that it will be useful, but WITHOUT ANY
15 # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
16 # A PARTICULAR PURPOSE. See the GNU General Public License for more details.
17 #
18 # You should have received a copy of the GNU General Public License along with
19 # Pootle; if not, see <http://www.gnu.org/licenses/>.
20
21
22 from django.contrib.auth import get_user_model
23 from django.contrib.auth.models import Permission
24 from django.contrib.contenttypes.models import ContentType
25 from django.utils.translation import ugettext_noop as _
26
27 from pootle.core.models import Revision
28 from pootle_app.models import Directory
29 from pootle_app.models.permissions import PermissionSet, get_pootle_permission
30 from pootle_language.models import Language
31 from pootle_project.models import Project
32
33
34 def initdb():
35 """Populate the database with default initial data.
36
37 This creates the default database to get a working Pootle installation.
38 """
39 create_revision()
40 create_essential_users()
41 create_root_directories()
42 create_template_languages()
43 create_terminology_project()
44 create_pootle_permissions()
45 create_pootle_permission_sets()
46
47 create_default_projects()
48 create_default_languages()
49 create_default_admin()
50
51
52 def create_revision():
53 Revision.initialize()
54
55
56 def create_essential_users():
57 """Create the 'default' and 'nobody' User instances.
58
59 These users are required for Pootle's permission system.
60 """
61 User = get_user_model()
62
63 # The nobody user is used to represent an anonymous user in cases where
64 # we need to associate model information with such a user. An example is
65 # in the permission system: we need a way to store rights for anonymous
66 # users; thus we use the nobody user.
67 criteria = {
68 'username': u"nobody",
69 'full_name': u"any anonymous user",
70 'is_active': True,
71 }
72 nobody, created = User.objects.get_or_create(**criteria)
73 if created:
74 nobody.set_unusable_password()
75 nobody.save()
76
77 # The 'default' user represents any valid, non-anonymous user and is used
78 # to associate information any such user. An example is in the permission
79 # system: we need a way to store default rights for users. We use the
80 # 'default' user for this.
81 #
82 # In a future version of Pootle we should think about using Django's
83 # groups to do better permissions handling.
84 criteria = {
85 'username': u"default",
86 'full_name': u"any authenticated user",
87 'is_active': True,
88 }
89 default, created = User.objects.get_or_create(**criteria)
90 if created:
91 default.set_unusable_password()
92 default.save()
93
94 # The system user represents a system, and is used to
95 # associate updates done by bulk commands as update_stores.
96 criteria = {
97 'username': u"system",
98 'full_name': u"system user",
99 'is_active': True,
100 }
101 system, created = User.objects.get_or_create(**criteria)
102 if created:
103 system.set_unusable_password()
104 system.save()
105
106
107 def create_pootle_permissions():
108 """Create Pootle's directory level permissions."""
109
110 args = {
111 'app_label': "pootle_app",
112 'model': "directory",
113 }
114 pootle_content_type, created = ContentType.objects.get_or_create(**args)
115 pootle_content_type.name = 'pootle'
116 pootle_content_type.save()
117
118 # Create the permissions.
119 permissions = [
120 {
121 'name': _("Can access a project"),
122 'codename': "view",
123 },
124 {
125 'name': _("Cannot access a project"),
126 'codename': "hide",
127 },
128 {
129 'name': _("Can make a suggestion for a translation"),
130 'codename': "suggest",
131 },
132 {
133 'name': _("Can submit a translation"),
134 'codename': "translate",
135 },
136 {
137 'name': _("Can review translations"),
138 'codename': "review",
139 },
140 {
141 'name': _("Can administrate a translation project"),
142 'codename': "administrate",
143 },
144 ]
145
146 criteria = {
147 'content_type': pootle_content_type,
148 }
149
150 for permission in permissions:
151 criteria.update(permission)
152 obj, created = Permission.objects.get_or_create(**criteria)
153
154
155 def create_pootle_permission_sets():
156 """Create the default permission set for the 'nobody' and 'default' users.
157
158 'nobody' is the anonymous (non-logged in) user, and 'default' is the logged
159 in user.
160 """
161 User = get_user_model()
162
163 nobody = User.objects.get(username='nobody')
164 default = User.objects.get(username='default')
165
166 view = get_pootle_permission('view')
167 suggest = get_pootle_permission('suggest')
168 translate = get_pootle_permission('translate')
169
170 # Default permissions for tree root.
171 criteria = {
172 'user': nobody,
173 'directory': Directory.objects.root,
174 }
175 permission_set, created = PermissionSet.objects.get_or_create(**criteria)
176 if created:
177 permission_set.positive_permissions = [view, suggest]
178 permission_set.save()
179
180 criteria['user'] = default
181 permission_set, created = PermissionSet.objects.get_or_create(**criteria)
182 if created:
183 permission_set.positive_permissions = [view, suggest, translate]
184 permission_set.save()
185
186 # Default permissions for templates language.
187 # Override with no permissions for templates language.
188 criteria = {
189 'user': nobody,
190 'directory': Directory.objects.get(pootle_path="/templates/"),
191 }
192 permission_set, created = PermissionSet.objects.get_or_create(**criteria)
193 if created:
194 permission_set.positive_permissions = []
195 permission_set.save()
196
197 criteria['user'] = default
198 permission_set, created = PermissionSet.objects.get_or_create(**criteria)
199 if created:
200 permission_set.positive_permissions = []
201 permission_set.save()
202
203
204 def require_english():
205 """Create the English Language item."""
206 criteria = {
207 'code': "en",
208 'fullname': u"English",
209 'nplurals': 2,
210 'pluralequation': "(n != 1)",
211 }
212 en, created = Language.objects.get_or_create(**criteria)
213 return en
214
215
216 def create_root_directories():
217 """Create the root Directory items."""
218 root, created = Directory.objects.get_or_create(name='')
219 projects, created = Directory.objects.get_or_create(name='projects',
220 parent=root)
221
222
223 def create_template_languages():
224 """Create the 'templates' and English languages.
225
226 The 'templates' language is used to give users access to the untranslated
227 template files.
228 """
229 templates, created = Language.objects.get_or_create(code="templates",
230 fullname=u'Templates')
231 require_english()
232
233
234 def create_terminology_project():
235 """Create the terminology project.
236
237 The terminology project is used to display terminology suggestions while
238 translating.
239 """
240 criteria = {
241 'code': "terminology",
242 'fullname': u"Terminology",
243 'source_language': require_english(),
244 'checkstyle': "terminology",
245 }
246 terminology, created = Project.objects.get_or_create(**criteria)
247
248
249 def create_default_projects():
250 """Create the default projects that we host.
251
252 You might want to add your projects here, although you can also add things
253 through the web interface later.
254 """
255 from pootle_project.models import Project
256
257 en = require_english()
258
259 #criteria = {
260 # 'code': u"pootle",
261 # 'source_language': en,
262 # 'fullname': u"Pootle",
263 # 'description': ('<div dir="ltr" lang="en">Interface translations for '
264 # 'Pootle.<br />See the <a href="http://'
265 # 'pootle.locamotion.org">official Pootle server</a> '
266 # 'for the translations of Pootle.</div>')
267 # 'checkstyle': "standard",
268 # 'localfiletype': "po",
269 # 'treestyle': "auto",
270 #}
271 #pootle = Project(**criteria)
272 #pootle.save()
273
274 criteria = {
275 'code': u"tutorial",
276 'source_language': en,
277 'fullname': u"Tutorial",
278 'checkstyle': "standard",
279 'localfiletype': "po",
280 'treestyle': "auto",
281 }
282 tutorial = Project(**criteria)
283 tutorial.save()
284
285
286 def create_default_languages():
287 """Create the default languages."""
288 from translate.lang import data, factory
289
290 from pootle_language.models import Language
291
292 # import languages from toolkit
293 for code in data.languages.keys():
294 try:
295 tk_lang = factory.getlanguage(code)
296 criteria = {
297 'code': code,
298 'fullname': tk_lang.fullname,
299 'nplurals': tk_lang.nplurals,
300 'pluralequation': tk_lang.pluralequation,
301 }
302 try:
303 criteria['specialchars'] = tk_lang.specialchars
304 except AttributeError:
305 pass
306 lang, created = Language.objects.get_or_create(**criteria)
307 except:
308 pass
309
310
311 def create_default_admin():
312 """Create the default admin user for Pootle.
313
314 You definitely want to change the admin account so that your default
315 install is not accessible with the default credentials. The users 'noboby'
316 and 'default' should be left as is.
317 """
318 User = get_user_model()
319
320 criteria = {
321 'username': u"admin",
322 'full_name': u"Administrator",
323 'is_active': True,
324 'is_superuser': True,
325 }
326 admin = User(**criteria)
327 admin.set_password("admin")
328 admin.save()
329
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pootle/core/initdb.py b/pootle/core/initdb.py
--- a/pootle/core/initdb.py
+++ b/pootle/core/initdb.py
@@ -29,6 +29,7 @@
from pootle_app.models.permissions import PermissionSet, get_pootle_permission
from pootle_language.models import Language
from pootle_project.models import Project
+from staticpages.models import StaticPage as Announcement
def initdb():
@@ -256,21 +257,6 @@
en = require_english()
- #criteria = {
- # 'code': u"pootle",
- # 'source_language': en,
- # 'fullname': u"Pootle",
- # 'description': ('<div dir="ltr" lang="en">Interface translations for '
- # 'Pootle.<br />See the <a href="http://'
- # 'pootle.locamotion.org">official Pootle server</a> '
- # 'for the translations of Pootle.</div>')
- # 'checkstyle': "standard",
- # 'localfiletype': "po",
- # 'treestyle': "auto",
- #}
- #pootle = Project(**criteria)
- #pootle.save()
-
criteria = {
'code': u"tutorial",
'source_language': en,
@@ -282,6 +268,20 @@
tutorial = Project(**criteria)
tutorial.save()
+ criteria = {
+ 'active': True,
+ 'title': "Project instructions",
+ 'body': ('<div dir="ltr" lang="en">Tutorial project where users can '
+ 'play with Pootle and learn more about translation and '
+ 'localisation.<br />For more help on localisation, visit the '
+ '<a href="http://docs.translatehouse.org/projects/'
+ 'localization-guide/en/latest/guide/start.html">localisation '
+ 'guide</a>.</div>'),
+ 'virtual_path': "announcements/projects/"+tutorial.code,
+ }
+ ann = Announcement(**criteria)
+ ann.save()
+
def create_default_languages():
"""Create the default languages."""
|
{"golden_diff": "diff --git a/pootle/core/initdb.py b/pootle/core/initdb.py\n--- a/pootle/core/initdb.py\n+++ b/pootle/core/initdb.py\n@@ -29,6 +29,7 @@\n from pootle_app.models.permissions import PermissionSet, get_pootle_permission\n from pootle_language.models import Language\n from pootle_project.models import Project\n+from staticpages.models import StaticPage as Announcement\n \n \n def initdb():\n@@ -256,21 +257,6 @@\n \n en = require_english()\n \n- #criteria = {\n- # 'code': u\"pootle\",\n- # 'source_language': en,\n- # 'fullname': u\"Pootle\",\n- # 'description': ('<div dir=\"ltr\" lang=\"en\">Interface translations for '\n- # 'Pootle.<br />See the <a href=\"http://'\n- # 'pootle.locamotion.org\">official Pootle server</a> '\n- # 'for the translations of Pootle.</div>')\n- # 'checkstyle': \"standard\",\n- # 'localfiletype': \"po\",\n- # 'treestyle': \"auto\",\n- #}\n- #pootle = Project(**criteria)\n- #pootle.save()\n-\n criteria = {\n 'code': u\"tutorial\",\n 'source_language': en,\n@@ -282,6 +268,20 @@\n tutorial = Project(**criteria)\n tutorial.save()\n \n+ criteria = {\n+ 'active': True,\n+ 'title': \"Project instructions\",\n+ 'body': ('<div dir=\"ltr\" lang=\"en\">Tutorial project where users can '\n+ 'play with Pootle and learn more about translation and '\n+ 'localisation.<br />For more help on localisation, visit the '\n+ '<a href=\"http://docs.translatehouse.org/projects/'\n+ 'localization-guide/en/latest/guide/start.html\">localisation '\n+ 'guide</a>.</div>'),\n+ 'virtual_path': \"announcements/projects/\"+tutorial.code,\n+ }\n+ ann = Announcement(**criteria)\n+ ann.save()\n+\n \n def create_default_languages():\n \"\"\"Create the default languages.\"\"\"\n", "issue": "Add default project descriptions\nIn order to get `initdb` running on `core` we had to drop the project [descriptions] since these move to staticpages on core. This bug is to add those back.\n\nTo achieve that initdb should create static pages for these default projects that contain the content previously held in the [descriptions](https://github.com/translate/pootle/blob/96edf539/pootle/core/initdb.py#L300-L306).\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright 2013 Zuza Software Foundation\n# Copyright 2014-2015 Evernote Corporation\n#\n# This file is part of Pootle.\n#\n# Pootle is free software; you can redistribute it and/or modify it under the\n# terms of the GNU General Public License as published by the Free Software\n# Foundation; either version 2 of the License, or (at your option) any later\n# version.\n#\n# Pootle is distributed in the hope that it will be useful, but WITHOUT ANY\n# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR\n# A PARTICULAR PURPOSE. See the GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License along with\n# Pootle; if not, see <http://www.gnu.org/licenses/>.\n\n\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.models import Permission\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.utils.translation import ugettext_noop as _\n\nfrom pootle.core.models import Revision\nfrom pootle_app.models import Directory\nfrom pootle_app.models.permissions import PermissionSet, get_pootle_permission\nfrom pootle_language.models import Language\nfrom pootle_project.models import Project\n\n\ndef initdb():\n \"\"\"Populate the database with default initial data.\n\n This creates the default database to get a working Pootle installation.\n \"\"\"\n create_revision()\n create_essential_users()\n create_root_directories()\n create_template_languages()\n create_terminology_project()\n create_pootle_permissions()\n create_pootle_permission_sets()\n\n create_default_projects()\n create_default_languages()\n create_default_admin()\n\n\ndef create_revision():\n Revision.initialize()\n\n\ndef create_essential_users():\n \"\"\"Create the 'default' and 'nobody' User instances.\n\n These users are required for Pootle's permission system.\n \"\"\"\n User = get_user_model()\n\n # The nobody user is used to represent an anonymous user in cases where\n # we need to associate model information with such a user. An example is\n # in the permission system: we need a way to store rights for anonymous\n # users; thus we use the nobody user.\n criteria = {\n 'username': u\"nobody\",\n 'full_name': u\"any anonymous user\",\n 'is_active': True,\n }\n nobody, created = User.objects.get_or_create(**criteria)\n if created:\n nobody.set_unusable_password()\n nobody.save()\n\n # The 'default' user represents any valid, non-anonymous user and is used\n # to associate information any such user. An example is in the permission\n # system: we need a way to store default rights for users. We use the\n # 'default' user for this.\n #\n # In a future version of Pootle we should think about using Django's\n # groups to do better permissions handling.\n criteria = {\n 'username': u\"default\",\n 'full_name': u\"any authenticated user\",\n 'is_active': True,\n }\n default, created = User.objects.get_or_create(**criteria)\n if created:\n default.set_unusable_password()\n default.save()\n\n # The system user represents a system, and is used to\n # associate updates done by bulk commands as update_stores.\n criteria = {\n 'username': u\"system\",\n 'full_name': u\"system user\",\n 'is_active': True,\n }\n system, created = User.objects.get_or_create(**criteria)\n if created:\n system.set_unusable_password()\n system.save()\n\n\ndef create_pootle_permissions():\n \"\"\"Create Pootle's directory level permissions.\"\"\"\n\n args = {\n 'app_label': \"pootle_app\",\n 'model': \"directory\",\n }\n pootle_content_type, created = ContentType.objects.get_or_create(**args)\n pootle_content_type.name = 'pootle'\n pootle_content_type.save()\n\n # Create the permissions.\n permissions = [\n {\n 'name': _(\"Can access a project\"),\n 'codename': \"view\",\n },\n {\n 'name': _(\"Cannot access a project\"),\n 'codename': \"hide\",\n },\n {\n 'name': _(\"Can make a suggestion for a translation\"),\n 'codename': \"suggest\",\n },\n {\n 'name': _(\"Can submit a translation\"),\n 'codename': \"translate\",\n },\n {\n 'name': _(\"Can review translations\"),\n 'codename': \"review\",\n },\n {\n 'name': _(\"Can administrate a translation project\"),\n 'codename': \"administrate\",\n },\n ]\n\n criteria = {\n 'content_type': pootle_content_type,\n }\n\n for permission in permissions:\n criteria.update(permission)\n obj, created = Permission.objects.get_or_create(**criteria)\n\n\ndef create_pootle_permission_sets():\n \"\"\"Create the default permission set for the 'nobody' and 'default' users.\n\n 'nobody' is the anonymous (non-logged in) user, and 'default' is the logged\n in user.\n \"\"\"\n User = get_user_model()\n\n nobody = User.objects.get(username='nobody')\n default = User.objects.get(username='default')\n\n view = get_pootle_permission('view')\n suggest = get_pootle_permission('suggest')\n translate = get_pootle_permission('translate')\n\n # Default permissions for tree root.\n criteria = {\n 'user': nobody,\n 'directory': Directory.objects.root,\n }\n permission_set, created = PermissionSet.objects.get_or_create(**criteria)\n if created:\n permission_set.positive_permissions = [view, suggest]\n permission_set.save()\n\n criteria['user'] = default\n permission_set, created = PermissionSet.objects.get_or_create(**criteria)\n if created:\n permission_set.positive_permissions = [view, suggest, translate]\n permission_set.save()\n\n # Default permissions for templates language.\n # Override with no permissions for templates language.\n criteria = {\n 'user': nobody,\n 'directory': Directory.objects.get(pootle_path=\"/templates/\"),\n }\n permission_set, created = PermissionSet.objects.get_or_create(**criteria)\n if created:\n permission_set.positive_permissions = []\n permission_set.save()\n\n criteria['user'] = default\n permission_set, created = PermissionSet.objects.get_or_create(**criteria)\n if created:\n permission_set.positive_permissions = []\n permission_set.save()\n\n\ndef require_english():\n \"\"\"Create the English Language item.\"\"\"\n criteria = {\n 'code': \"en\",\n 'fullname': u\"English\",\n 'nplurals': 2,\n 'pluralequation': \"(n != 1)\",\n }\n en, created = Language.objects.get_or_create(**criteria)\n return en\n\n\ndef create_root_directories():\n \"\"\"Create the root Directory items.\"\"\"\n root, created = Directory.objects.get_or_create(name='')\n projects, created = Directory.objects.get_or_create(name='projects',\n parent=root)\n\n\ndef create_template_languages():\n \"\"\"Create the 'templates' and English languages.\n\n The 'templates' language is used to give users access to the untranslated\n template files.\n \"\"\"\n templates, created = Language.objects.get_or_create(code=\"templates\",\n fullname=u'Templates')\n require_english()\n\n\ndef create_terminology_project():\n \"\"\"Create the terminology project.\n\n The terminology project is used to display terminology suggestions while\n translating.\n \"\"\"\n criteria = {\n 'code': \"terminology\",\n 'fullname': u\"Terminology\",\n 'source_language': require_english(),\n 'checkstyle': \"terminology\",\n }\n terminology, created = Project.objects.get_or_create(**criteria)\n\n\ndef create_default_projects():\n \"\"\"Create the default projects that we host.\n\n You might want to add your projects here, although you can also add things\n through the web interface later.\n \"\"\"\n from pootle_project.models import Project\n\n en = require_english()\n\n #criteria = {\n # 'code': u\"pootle\",\n # 'source_language': en,\n # 'fullname': u\"Pootle\",\n # 'description': ('<div dir=\"ltr\" lang=\"en\">Interface translations for '\n # 'Pootle.<br />See the <a href=\"http://'\n # 'pootle.locamotion.org\">official Pootle server</a> '\n # 'for the translations of Pootle.</div>')\n # 'checkstyle': \"standard\",\n # 'localfiletype': \"po\",\n # 'treestyle': \"auto\",\n #}\n #pootle = Project(**criteria)\n #pootle.save()\n\n criteria = {\n 'code': u\"tutorial\",\n 'source_language': en,\n 'fullname': u\"Tutorial\",\n 'checkstyle': \"standard\",\n 'localfiletype': \"po\",\n 'treestyle': \"auto\",\n }\n tutorial = Project(**criteria)\n tutorial.save()\n\n\ndef create_default_languages():\n \"\"\"Create the default languages.\"\"\"\n from translate.lang import data, factory\n\n from pootle_language.models import Language\n\n # import languages from toolkit\n for code in data.languages.keys():\n try:\n tk_lang = factory.getlanguage(code)\n criteria = {\n 'code': code,\n 'fullname': tk_lang.fullname,\n 'nplurals': tk_lang.nplurals,\n 'pluralequation': tk_lang.pluralequation,\n }\n try:\n criteria['specialchars'] = tk_lang.specialchars\n except AttributeError:\n pass\n lang, created = Language.objects.get_or_create(**criteria)\n except:\n pass\n\n\ndef create_default_admin():\n \"\"\"Create the default admin user for Pootle.\n\n You definitely want to change the admin account so that your default\n install is not accessible with the default credentials. The users 'noboby'\n and 'default' should be left as is.\n \"\"\"\n User = get_user_model()\n\n criteria = {\n 'username': u\"admin\",\n 'full_name': u\"Administrator\",\n 'is_active': True,\n 'is_superuser': True,\n }\n admin = User(**criteria)\n admin.set_password(\"admin\")\n admin.save()\n", "path": "pootle/core/initdb.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright 2013 Zuza Software Foundation\n# Copyright 2014-2015 Evernote Corporation\n#\n# This file is part of Pootle.\n#\n# Pootle is free software; you can redistribute it and/or modify it under the\n# terms of the GNU General Public License as published by the Free Software\n# Foundation; either version 2 of the License, or (at your option) any later\n# version.\n#\n# Pootle is distributed in the hope that it will be useful, but WITHOUT ANY\n# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR\n# A PARTICULAR PURPOSE. See the GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License along with\n# Pootle; if not, see <http://www.gnu.org/licenses/>.\n\n\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.models import Permission\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.utils.translation import ugettext_noop as _\n\nfrom pootle.core.models import Revision\nfrom pootle_app.models import Directory\nfrom pootle_app.models.permissions import PermissionSet, get_pootle_permission\nfrom pootle_language.models import Language\nfrom pootle_project.models import Project\nfrom staticpages.models import StaticPage as Announcement\n\n\ndef initdb():\n \"\"\"Populate the database with default initial data.\n\n This creates the default database to get a working Pootle installation.\n \"\"\"\n create_revision()\n create_essential_users()\n create_root_directories()\n create_template_languages()\n create_terminology_project()\n create_pootle_permissions()\n create_pootle_permission_sets()\n\n create_default_projects()\n create_default_languages()\n create_default_admin()\n\n\ndef create_revision():\n Revision.initialize()\n\n\ndef create_essential_users():\n \"\"\"Create the 'default' and 'nobody' User instances.\n\n These users are required for Pootle's permission system.\n \"\"\"\n User = get_user_model()\n\n # The nobody user is used to represent an anonymous user in cases where\n # we need to associate model information with such a user. An example is\n # in the permission system: we need a way to store rights for anonymous\n # users; thus we use the nobody user.\n criteria = {\n 'username': u\"nobody\",\n 'full_name': u\"any anonymous user\",\n 'is_active': True,\n }\n nobody, created = User.objects.get_or_create(**criteria)\n if created:\n nobody.set_unusable_password()\n nobody.save()\n\n # The 'default' user represents any valid, non-anonymous user and is used\n # to associate information any such user. An example is in the permission\n # system: we need a way to store default rights for users. We use the\n # 'default' user for this.\n #\n # In a future version of Pootle we should think about using Django's\n # groups to do better permissions handling.\n criteria = {\n 'username': u\"default\",\n 'full_name': u\"any authenticated user\",\n 'is_active': True,\n }\n default, created = User.objects.get_or_create(**criteria)\n if created:\n default.set_unusable_password()\n default.save()\n\n # The system user represents a system, and is used to\n # associate updates done by bulk commands as update_stores.\n criteria = {\n 'username': u\"system\",\n 'full_name': u\"system user\",\n 'is_active': True,\n }\n system, created = User.objects.get_or_create(**criteria)\n if created:\n system.set_unusable_password()\n system.save()\n\n\ndef create_pootle_permissions():\n \"\"\"Create Pootle's directory level permissions.\"\"\"\n\n args = {\n 'app_label': \"pootle_app\",\n 'model': \"directory\",\n }\n pootle_content_type, created = ContentType.objects.get_or_create(**args)\n pootle_content_type.name = 'pootle'\n pootle_content_type.save()\n\n # Create the permissions.\n permissions = [\n {\n 'name': _(\"Can access a project\"),\n 'codename': \"view\",\n },\n {\n 'name': _(\"Cannot access a project\"),\n 'codename': \"hide\",\n },\n {\n 'name': _(\"Can make a suggestion for a translation\"),\n 'codename': \"suggest\",\n },\n {\n 'name': _(\"Can submit a translation\"),\n 'codename': \"translate\",\n },\n {\n 'name': _(\"Can review translations\"),\n 'codename': \"review\",\n },\n {\n 'name': _(\"Can administrate a translation project\"),\n 'codename': \"administrate\",\n },\n ]\n\n criteria = {\n 'content_type': pootle_content_type,\n }\n\n for permission in permissions:\n criteria.update(permission)\n obj, created = Permission.objects.get_or_create(**criteria)\n\n\ndef create_pootle_permission_sets():\n \"\"\"Create the default permission set for the 'nobody' and 'default' users.\n\n 'nobody' is the anonymous (non-logged in) user, and 'default' is the logged\n in user.\n \"\"\"\n User = get_user_model()\n\n nobody = User.objects.get(username='nobody')\n default = User.objects.get(username='default')\n\n view = get_pootle_permission('view')\n suggest = get_pootle_permission('suggest')\n translate = get_pootle_permission('translate')\n\n # Default permissions for tree root.\n criteria = {\n 'user': nobody,\n 'directory': Directory.objects.root,\n }\n permission_set, created = PermissionSet.objects.get_or_create(**criteria)\n if created:\n permission_set.positive_permissions = [view, suggest]\n permission_set.save()\n\n criteria['user'] = default\n permission_set, created = PermissionSet.objects.get_or_create(**criteria)\n if created:\n permission_set.positive_permissions = [view, suggest, translate]\n permission_set.save()\n\n # Default permissions for templates language.\n # Override with no permissions for templates language.\n criteria = {\n 'user': nobody,\n 'directory': Directory.objects.get(pootle_path=\"/templates/\"),\n }\n permission_set, created = PermissionSet.objects.get_or_create(**criteria)\n if created:\n permission_set.positive_permissions = []\n permission_set.save()\n\n criteria['user'] = default\n permission_set, created = PermissionSet.objects.get_or_create(**criteria)\n if created:\n permission_set.positive_permissions = []\n permission_set.save()\n\n\ndef require_english():\n \"\"\"Create the English Language item.\"\"\"\n criteria = {\n 'code': \"en\",\n 'fullname': u\"English\",\n 'nplurals': 2,\n 'pluralequation': \"(n != 1)\",\n }\n en, created = Language.objects.get_or_create(**criteria)\n return en\n\n\ndef create_root_directories():\n \"\"\"Create the root Directory items.\"\"\"\n root, created = Directory.objects.get_or_create(name='')\n projects, created = Directory.objects.get_or_create(name='projects',\n parent=root)\n\n\ndef create_template_languages():\n \"\"\"Create the 'templates' and English languages.\n\n The 'templates' language is used to give users access to the untranslated\n template files.\n \"\"\"\n templates, created = Language.objects.get_or_create(code=\"templates\",\n fullname=u'Templates')\n require_english()\n\n\ndef create_terminology_project():\n \"\"\"Create the terminology project.\n\n The terminology project is used to display terminology suggestions while\n translating.\n \"\"\"\n criteria = {\n 'code': \"terminology\",\n 'fullname': u\"Terminology\",\n 'source_language': require_english(),\n 'checkstyle': \"terminology\",\n }\n terminology, created = Project.objects.get_or_create(**criteria)\n\n\ndef create_default_projects():\n \"\"\"Create the default projects that we host.\n\n You might want to add your projects here, although you can also add things\n through the web interface later.\n \"\"\"\n from pootle_project.models import Project\n\n en = require_english()\n\n criteria = {\n 'code': u\"tutorial\",\n 'source_language': en,\n 'fullname': u\"Tutorial\",\n 'checkstyle': \"standard\",\n 'localfiletype': \"po\",\n 'treestyle': \"auto\",\n }\n tutorial = Project(**criteria)\n tutorial.save()\n\n criteria = {\n 'active': True,\n 'title': \"Project instructions\",\n 'body': ('<div dir=\"ltr\" lang=\"en\">Tutorial project where users can '\n 'play with Pootle and learn more about translation and '\n 'localisation.<br />For more help on localisation, visit the '\n '<a href=\"http://docs.translatehouse.org/projects/'\n 'localization-guide/en/latest/guide/start.html\">localisation '\n 'guide</a>.</div>'),\n 'virtual_path': \"announcements/projects/\"+tutorial.code,\n }\n ann = Announcement(**criteria)\n ann.save()\n\n\ndef create_default_languages():\n \"\"\"Create the default languages.\"\"\"\n from translate.lang import data, factory\n\n from pootle_language.models import Language\n\n # import languages from toolkit\n for code in data.languages.keys():\n try:\n tk_lang = factory.getlanguage(code)\n criteria = {\n 'code': code,\n 'fullname': tk_lang.fullname,\n 'nplurals': tk_lang.nplurals,\n 'pluralequation': tk_lang.pluralequation,\n }\n try:\n criteria['specialchars'] = tk_lang.specialchars\n except AttributeError:\n pass\n lang, created = Language.objects.get_or_create(**criteria)\n except:\n pass\n\n\ndef create_default_admin():\n \"\"\"Create the default admin user for Pootle.\n\n You definitely want to change the admin account so that your default\n install is not accessible with the default credentials. The users 'noboby'\n and 'default' should be left as is.\n \"\"\"\n User = get_user_model()\n\n criteria = {\n 'username': u\"admin\",\n 'full_name': u\"Administrator\",\n 'is_active': True,\n 'is_superuser': True,\n }\n admin = User(**criteria)\n admin.set_password(\"admin\")\n admin.save()\n", "path": "pootle/core/initdb.py"}]}
| 3,526 | 511 |
gh_patches_debug_26820
|
rasdani/github-patches
|
git_diff
|
googleapis__python-bigquery-571
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ConnectionError in Client.insert_rows_json()
We have a http Cloud Function that does some data processing and then streams to BQ. The function errors out sometimes because of either the bq client losing connection or it is the insert_rows that can't connect.
See below an example of a stack trace captured in the GCP logs.
```
File "/layers/google.python.pip/pip/lib/python3.8/site-packages/flask/app.py", line 2447, in wsgi_app
response = self.full_dispatch_request()
File "/layers/google.python.pip/pip/lib/python3.8/site-packages/flask/app.py", line 1952, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/layers/google.python.pip/pip/lib/python3.8/site-packages/flask/app.py", line 1821, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/layers/google.python.pip/pip/lib/python3.8/site-packages/flask/_compat.py", line 39, in reraise
raise value
File "/layers/google.python.pip/pip/lib/python3.8/site-packages/flask/app.py", line 1950, in full_dispatch_request
rv = self.dispatch_request()
File "/layers/google.python.pip/pip/lib/python3.8/site-packages/flask/app.py", line 1936, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/layers/google.python.functions-framework/functions-framework/lib/python3.8/site-packages/functions_framework/__init__.py", line 66, in view_func
return function(request._get_current_object())
File "/workspace/main.py", line 162, in stream_tax
errors = bq.insert_rows_json(table=dataset_table,
File "/layers/google.python.pip/pip/lib/python3.8/site-packages/google/cloud/bigquery/client.py", line 3013, in insert_rows_json
response = self._call_api(
File "/layers/google.python.pip/pip/lib/python3.8/site-packages/google/cloud/bigquery/client.py", line 636, in _call_api
return call()
File "/layers/google.python.pip/pip/lib/python3.8/site-packages/google/api_core/retry.py", line 281, in retry_wrapped_func
return retry_target(
File "/layers/google.python.pip/pip/lib/python3.8/site-packages/google/api_core/retry.py", line 184, in retry_target
return target()
File "/layers/google.python.pip/pip/lib/python3.8/site-packages/google/cloud/_http.py", line 427, in api_request
response = self._make_request(
File "/layers/google.python.pip/pip/lib/python3.8/site-packages/google/cloud/_http.py", line 291, in _make_request
return self._do_request(
File "/layers/google.python.pip/pip/lib/python3.8/site-packages/google/cloud/_http.py", line 329, in _do_request
return self.http.request(
File "/layers/google.python.pip/pip/lib/python3.8/site-packages/google/auth/transport/requests.py", line 464, in request
response = super(AuthorizedSession, self).request(
File "/layers/google.python.pip/pip/lib/python3.8/site-packages/requests/sessions.py", line 542, in request
resp = self.send(prep, **send_kwargs)
File "/layers/google.python.pip/pip/lib/python3.8/site-packages/requests/sessions.py", line 655, in send
r = adapter.send(request, **kwargs)
File "/layers/google.python.pip/pip/lib/python3.8/site-packages/requests/adapters.py", line 498, in send
raise ConnectionError(err, request=request)
requests.exceptions.ConnectionError: ('Connection aborted.', ConnectionResetError(104, 'Connection reset by peer'))
```
- `bq` (=`bigquery.Client()`) in the trace is instantiated as a global variable as recommended here: https://cloud.google.com/functions/docs/bestpractices/networking#accessing_google_apis
- error is logged 30 secs after function is invoked - so can't be the 60s default timeout in `-http`
Thoughts ?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # Copyright 2018 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import io
16 import os
17
18 import setuptools
19
20
21 # Package metadata.
22
23 name = "google-cloud-bigquery"
24 description = "Google BigQuery API client library"
25
26 # Should be one of:
27 # 'Development Status :: 3 - Alpha'
28 # 'Development Status :: 4 - Beta'
29 # 'Development Status :: 5 - Production/Stable'
30 release_status = "Development Status :: 5 - Production/Stable"
31 dependencies = [
32 "google-api-core[grpc] >= 1.23.0, < 2.0.0dev",
33 "proto-plus >= 1.10.0",
34 "google-cloud-core >= 1.4.1, < 2.0dev",
35 "google-resumable-media >= 0.6.0, < 2.0dev",
36 "packaging >= 14.3",
37 "protobuf >= 3.12.0",
38 ]
39 extras = {
40 "bqstorage": [
41 "google-cloud-bigquery-storage >= 2.0.0, <3.0.0dev",
42 # Due to an issue in pip's dependency resolver, the `grpc` extra is not
43 # installed, even though `google-cloud-bigquery-storage` specifies it
44 # as `google-api-core[grpc]`. We thus need to explicitly specify it here.
45 # See: https://github.com/googleapis/python-bigquery/issues/83 The
46 # grpc.Channel.close() method isn't added until 1.32.0.
47 # https://github.com/grpc/grpc/pull/15254
48 "grpcio >= 1.32.0, < 2.0dev",
49 "pyarrow >= 1.0.0, < 4.0dev",
50 ],
51 "pandas": ["pandas>=0.23.0", "pyarrow >= 1.0.0, < 4.0dev"],
52 "bignumeric_type": ["pyarrow >= 3.0.0, < 4.0dev"],
53 "tqdm": ["tqdm >= 4.7.4, <5.0.0dev"],
54 "opentelemetry": [
55 "opentelemetry-api==0.11b0",
56 "opentelemetry-sdk==0.11b0",
57 "opentelemetry-instrumentation==0.11b0",
58 ],
59 }
60
61 all_extras = []
62
63 for extra in extras:
64 # Exclude this extra from all to avoid overly strict dependencies on core
65 # libraries such as pyarrow.
66 # https://github.com/googleapis/python-bigquery/issues/563
67 if extra in {"bignumeric_type"}:
68 continue
69 all_extras.extend(extras[extra])
70
71 extras["all"] = all_extras
72
73 # Setup boilerplate below this line.
74
75 package_root = os.path.abspath(os.path.dirname(__file__))
76
77 readme_filename = os.path.join(package_root, "README.rst")
78 with io.open(readme_filename, encoding="utf-8") as readme_file:
79 readme = readme_file.read()
80
81 version = {}
82 with open(os.path.join(package_root, "google/cloud/bigquery/version.py")) as fp:
83 exec(fp.read(), version)
84 version = version["__version__"]
85
86 # Only include packages under the 'google' namespace. Do not include tests,
87 # benchmarks, etc.
88 packages = [
89 package
90 for package in setuptools.PEP420PackageFinder.find()
91 if package.startswith("google")
92 ]
93
94 # Determine which namespaces are needed.
95 namespaces = ["google"]
96 if "google.cloud" in packages:
97 namespaces.append("google.cloud")
98
99
100 setuptools.setup(
101 name=name,
102 version=version,
103 description=description,
104 long_description=readme,
105 author="Google LLC",
106 author_email="[email protected]",
107 license="Apache 2.0",
108 url="https://github.com/googleapis/python-bigquery",
109 classifiers=[
110 release_status,
111 "Intended Audience :: Developers",
112 "License :: OSI Approved :: Apache Software License",
113 "Programming Language :: Python",
114 "Programming Language :: Python :: 3",
115 "Programming Language :: Python :: 3.6",
116 "Programming Language :: Python :: 3.7",
117 "Programming Language :: Python :: 3.8",
118 "Programming Language :: Python :: 3.9",
119 "Operating System :: OS Independent",
120 "Topic :: Internet",
121 ],
122 platforms="Posix; MacOS X; Windows",
123 packages=packages,
124 namespace_packages=namespaces,
125 install_requires=dependencies,
126 extras_require=extras,
127 python_requires=">=3.6, <3.10",
128 include_package_data=True,
129 zip_safe=False,
130 )
131
```
Path: `google/cloud/bigquery/retry.py`
Content:
```
1 # Copyright 2018 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from google.api_core import exceptions
16 from google.api_core import retry
17
18
19 _RETRYABLE_REASONS = frozenset(
20 ["rateLimitExceeded", "backendError", "internalError", "badGateway"]
21 )
22
23 _UNSTRUCTURED_RETRYABLE_TYPES = (
24 exceptions.TooManyRequests,
25 exceptions.InternalServerError,
26 exceptions.BadGateway,
27 )
28
29
30 def _should_retry(exc):
31 """Predicate for determining when to retry.
32
33 We retry if and only if the 'reason' is 'backendError'
34 or 'rateLimitExceeded'.
35 """
36 if not hasattr(exc, "errors"):
37 return False
38
39 if len(exc.errors) == 0:
40 # Check for unstructured error returns, e.g. from GFE
41 return isinstance(exc, _UNSTRUCTURED_RETRYABLE_TYPES)
42
43 reason = exc.errors[0]["reason"]
44 return reason in _RETRYABLE_REASONS
45
46
47 DEFAULT_RETRY = retry.Retry(predicate=_should_retry)
48 """The default retry object.
49
50 Any method with a ``retry`` parameter will be retried automatically,
51 with reasonable defaults. To disable retry, pass ``retry=None``.
52 To modify the default retry behavior, call a ``with_XXX`` method
53 on ``DEFAULT_RETRY``. For example, to change the deadline to 30 seconds,
54 pass ``retry=bigquery.DEFAULT_RETRY.with_deadline(30)``.
55 """
56
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/google/cloud/bigquery/retry.py b/google/cloud/bigquery/retry.py
--- a/google/cloud/bigquery/retry.py
+++ b/google/cloud/bigquery/retry.py
@@ -14,6 +14,7 @@
from google.api_core import exceptions
from google.api_core import retry
+import requests.exceptions
_RETRYABLE_REASONS = frozenset(
@@ -21,9 +22,11 @@
)
_UNSTRUCTURED_RETRYABLE_TYPES = (
+ ConnectionError,
exceptions.TooManyRequests,
exceptions.InternalServerError,
exceptions.BadGateway,
+ requests.exceptions.ConnectionError,
)
@@ -33,10 +36,7 @@
We retry if and only if the 'reason' is 'backendError'
or 'rateLimitExceeded'.
"""
- if not hasattr(exc, "errors"):
- return False
-
- if len(exc.errors) == 0:
+ if not hasattr(exc, "errors") or len(exc.errors) == 0:
# Check for unstructured error returns, e.g. from GFE
return isinstance(exc, _UNSTRUCTURED_RETRYABLE_TYPES)
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -35,6 +35,7 @@
"google-resumable-media >= 0.6.0, < 2.0dev",
"packaging >= 14.3",
"protobuf >= 3.12.0",
+ "requests >= 2.18.0, < 3.0.0dev",
]
extras = {
"bqstorage": [
|
{"golden_diff": "diff --git a/google/cloud/bigquery/retry.py b/google/cloud/bigquery/retry.py\n--- a/google/cloud/bigquery/retry.py\n+++ b/google/cloud/bigquery/retry.py\n@@ -14,6 +14,7 @@\n \n from google.api_core import exceptions\n from google.api_core import retry\n+import requests.exceptions\n \n \n _RETRYABLE_REASONS = frozenset(\n@@ -21,9 +22,11 @@\n )\n \n _UNSTRUCTURED_RETRYABLE_TYPES = (\n+ ConnectionError,\n exceptions.TooManyRequests,\n exceptions.InternalServerError,\n exceptions.BadGateway,\n+ requests.exceptions.ConnectionError,\n )\n \n \n@@ -33,10 +36,7 @@\n We retry if and only if the 'reason' is 'backendError'\n or 'rateLimitExceeded'.\n \"\"\"\n- if not hasattr(exc, \"errors\"):\n- return False\n-\n- if len(exc.errors) == 0:\n+ if not hasattr(exc, \"errors\") or len(exc.errors) == 0:\n # Check for unstructured error returns, e.g. from GFE\n return isinstance(exc, _UNSTRUCTURED_RETRYABLE_TYPES)\n \ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -35,6 +35,7 @@\n \"google-resumable-media >= 0.6.0, < 2.0dev\",\n \"packaging >= 14.3\",\n \"protobuf >= 3.12.0\",\n+ \"requests >= 2.18.0, < 3.0.0dev\",\n ]\n extras = {\n \"bqstorage\": [\n", "issue": "ConnectionError in Client.insert_rows_json()\nWe have a http Cloud Function that does some data processing and then streams to BQ. The function errors out sometimes because of either the bq client losing connection or it is the insert_rows that can't connect. \r\nSee below an example of a stack trace captured in the GCP logs.\r\n\r\n\r\n```\r\n File \"/layers/google.python.pip/pip/lib/python3.8/site-packages/flask/app.py\", line 2447, in wsgi_app\r\n response = self.full_dispatch_request()\r\n File \"/layers/google.python.pip/pip/lib/python3.8/site-packages/flask/app.py\", line 1952, in full_dispatch_request\r\n rv = self.handle_user_exception(e)\r\n File \"/layers/google.python.pip/pip/lib/python3.8/site-packages/flask/app.py\", line 1821, in handle_user_exception\r\n reraise(exc_type, exc_value, tb)\r\n File \"/layers/google.python.pip/pip/lib/python3.8/site-packages/flask/_compat.py\", line 39, in reraise\r\n raise value\r\n File \"/layers/google.python.pip/pip/lib/python3.8/site-packages/flask/app.py\", line 1950, in full_dispatch_request\r\n rv = self.dispatch_request()\r\n File \"/layers/google.python.pip/pip/lib/python3.8/site-packages/flask/app.py\", line 1936, in dispatch_request\r\n return self.view_functions[rule.endpoint](**req.view_args)\r\n File \"/layers/google.python.functions-framework/functions-framework/lib/python3.8/site-packages/functions_framework/__init__.py\", line 66, in view_func\r\n return function(request._get_current_object())\r\n File \"/workspace/main.py\", line 162, in stream_tax\r\n errors = bq.insert_rows_json(table=dataset_table,\r\n File \"/layers/google.python.pip/pip/lib/python3.8/site-packages/google/cloud/bigquery/client.py\", line 3013, in insert_rows_json\r\n response = self._call_api(\r\n File \"/layers/google.python.pip/pip/lib/python3.8/site-packages/google/cloud/bigquery/client.py\", line 636, in _call_api\r\n return call()\r\n File \"/layers/google.python.pip/pip/lib/python3.8/site-packages/google/api_core/retry.py\", line 281, in retry_wrapped_func\r\n return retry_target(\r\n File \"/layers/google.python.pip/pip/lib/python3.8/site-packages/google/api_core/retry.py\", line 184, in retry_target\r\n return target()\r\n File \"/layers/google.python.pip/pip/lib/python3.8/site-packages/google/cloud/_http.py\", line 427, in api_request\r\n response = self._make_request(\r\n File \"/layers/google.python.pip/pip/lib/python3.8/site-packages/google/cloud/_http.py\", line 291, in _make_request\r\n return self._do_request(\r\n File \"/layers/google.python.pip/pip/lib/python3.8/site-packages/google/cloud/_http.py\", line 329, in _do_request\r\n return self.http.request(\r\n File \"/layers/google.python.pip/pip/lib/python3.8/site-packages/google/auth/transport/requests.py\", line 464, in request\r\n response = super(AuthorizedSession, self).request(\r\n File \"/layers/google.python.pip/pip/lib/python3.8/site-packages/requests/sessions.py\", line 542, in request\r\n resp = self.send(prep, **send_kwargs)\r\n File \"/layers/google.python.pip/pip/lib/python3.8/site-packages/requests/sessions.py\", line 655, in send\r\n r = adapter.send(request, **kwargs)\r\n File \"/layers/google.python.pip/pip/lib/python3.8/site-packages/requests/adapters.py\", line 498, in send\r\n raise ConnectionError(err, request=request)\r\nrequests.exceptions.ConnectionError: ('Connection aborted.', ConnectionResetError(104, 'Connection reset by peer'))\r\n```\r\n\r\n\r\n- `bq` (=`bigquery.Client()`) in the trace is instantiated as a global variable as recommended here: https://cloud.google.com/functions/docs/bestpractices/networking#accessing_google_apis\r\n\r\n- error is logged 30 secs after function is invoked - so can't be the 60s default timeout in `-http`\r\n\r\nThoughts ?\n", "before_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\nimport os\n\nimport setuptools\n\n\n# Package metadata.\n\nname = \"google-cloud-bigquery\"\ndescription = \"Google BigQuery API client library\"\n\n# Should be one of:\n# 'Development Status :: 3 - Alpha'\n# 'Development Status :: 4 - Beta'\n# 'Development Status :: 5 - Production/Stable'\nrelease_status = \"Development Status :: 5 - Production/Stable\"\ndependencies = [\n \"google-api-core[grpc] >= 1.23.0, < 2.0.0dev\",\n \"proto-plus >= 1.10.0\",\n \"google-cloud-core >= 1.4.1, < 2.0dev\",\n \"google-resumable-media >= 0.6.0, < 2.0dev\",\n \"packaging >= 14.3\",\n \"protobuf >= 3.12.0\",\n]\nextras = {\n \"bqstorage\": [\n \"google-cloud-bigquery-storage >= 2.0.0, <3.0.0dev\",\n # Due to an issue in pip's dependency resolver, the `grpc` extra is not\n # installed, even though `google-cloud-bigquery-storage` specifies it\n # as `google-api-core[grpc]`. We thus need to explicitly specify it here.\n # See: https://github.com/googleapis/python-bigquery/issues/83 The\n # grpc.Channel.close() method isn't added until 1.32.0.\n # https://github.com/grpc/grpc/pull/15254\n \"grpcio >= 1.32.0, < 2.0dev\",\n \"pyarrow >= 1.0.0, < 4.0dev\",\n ],\n \"pandas\": [\"pandas>=0.23.0\", \"pyarrow >= 1.0.0, < 4.0dev\"],\n \"bignumeric_type\": [\"pyarrow >= 3.0.0, < 4.0dev\"],\n \"tqdm\": [\"tqdm >= 4.7.4, <5.0.0dev\"],\n \"opentelemetry\": [\n \"opentelemetry-api==0.11b0\",\n \"opentelemetry-sdk==0.11b0\",\n \"opentelemetry-instrumentation==0.11b0\",\n ],\n}\n\nall_extras = []\n\nfor extra in extras:\n # Exclude this extra from all to avoid overly strict dependencies on core\n # libraries such as pyarrow.\n # https://github.com/googleapis/python-bigquery/issues/563\n if extra in {\"bignumeric_type\"}:\n continue\n all_extras.extend(extras[extra])\n\nextras[\"all\"] = all_extras\n\n# Setup boilerplate below this line.\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nreadme_filename = os.path.join(package_root, \"README.rst\")\nwith io.open(readme_filename, encoding=\"utf-8\") as readme_file:\n readme = readme_file.read()\n\nversion = {}\nwith open(os.path.join(package_root, \"google/cloud/bigquery/version.py\")) as fp:\n exec(fp.read(), version)\nversion = version[\"__version__\"]\n\n# Only include packages under the 'google' namespace. Do not include tests,\n# benchmarks, etc.\npackages = [\n package\n for package in setuptools.PEP420PackageFinder.find()\n if package.startswith(\"google\")\n]\n\n# Determine which namespaces are needed.\nnamespaces = [\"google\"]\nif \"google.cloud\" in packages:\n namespaces.append(\"google.cloud\")\n\n\nsetuptools.setup(\n name=name,\n version=version,\n description=description,\n long_description=readme,\n author=\"Google LLC\",\n author_email=\"[email protected]\",\n license=\"Apache 2.0\",\n url=\"https://github.com/googleapis/python-bigquery\",\n classifiers=[\n release_status,\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet\",\n ],\n platforms=\"Posix; MacOS X; Windows\",\n packages=packages,\n namespace_packages=namespaces,\n install_requires=dependencies,\n extras_require=extras,\n python_requires=\">=3.6, <3.10\",\n include_package_data=True,\n zip_safe=False,\n)\n", "path": "setup.py"}, {"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom google.api_core import exceptions\nfrom google.api_core import retry\n\n\n_RETRYABLE_REASONS = frozenset(\n [\"rateLimitExceeded\", \"backendError\", \"internalError\", \"badGateway\"]\n)\n\n_UNSTRUCTURED_RETRYABLE_TYPES = (\n exceptions.TooManyRequests,\n exceptions.InternalServerError,\n exceptions.BadGateway,\n)\n\n\ndef _should_retry(exc):\n \"\"\"Predicate for determining when to retry.\n\n We retry if and only if the 'reason' is 'backendError'\n or 'rateLimitExceeded'.\n \"\"\"\n if not hasattr(exc, \"errors\"):\n return False\n\n if len(exc.errors) == 0:\n # Check for unstructured error returns, e.g. from GFE\n return isinstance(exc, _UNSTRUCTURED_RETRYABLE_TYPES)\n\n reason = exc.errors[0][\"reason\"]\n return reason in _RETRYABLE_REASONS\n\n\nDEFAULT_RETRY = retry.Retry(predicate=_should_retry)\n\"\"\"The default retry object.\n\nAny method with a ``retry`` parameter will be retried automatically,\nwith reasonable defaults. To disable retry, pass ``retry=None``.\nTo modify the default retry behavior, call a ``with_XXX`` method\non ``DEFAULT_RETRY``. For example, to change the deadline to 30 seconds,\npass ``retry=bigquery.DEFAULT_RETRY.with_deadline(30)``.\n\"\"\"\n", "path": "google/cloud/bigquery/retry.py"}], "after_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\nimport os\n\nimport setuptools\n\n\n# Package metadata.\n\nname = \"google-cloud-bigquery\"\ndescription = \"Google BigQuery API client library\"\n\n# Should be one of:\n# 'Development Status :: 3 - Alpha'\n# 'Development Status :: 4 - Beta'\n# 'Development Status :: 5 - Production/Stable'\nrelease_status = \"Development Status :: 5 - Production/Stable\"\ndependencies = [\n \"google-api-core[grpc] >= 1.23.0, < 2.0.0dev\",\n \"proto-plus >= 1.10.0\",\n \"google-cloud-core >= 1.4.1, < 2.0dev\",\n \"google-resumable-media >= 0.6.0, < 2.0dev\",\n \"packaging >= 14.3\",\n \"protobuf >= 3.12.0\",\n \"requests >= 2.18.0, < 3.0.0dev\",\n]\nextras = {\n \"bqstorage\": [\n \"google-cloud-bigquery-storage >= 2.0.0, <3.0.0dev\",\n # Due to an issue in pip's dependency resolver, the `grpc` extra is not\n # installed, even though `google-cloud-bigquery-storage` specifies it\n # as `google-api-core[grpc]`. We thus need to explicitly specify it here.\n # See: https://github.com/googleapis/python-bigquery/issues/83 The\n # grpc.Channel.close() method isn't added until 1.32.0.\n # https://github.com/grpc/grpc/pull/15254\n \"grpcio >= 1.32.0, < 2.0dev\",\n \"pyarrow >= 1.0.0, < 4.0dev\",\n ],\n \"pandas\": [\"pandas>=0.23.0\", \"pyarrow >= 1.0.0, < 4.0dev\"],\n \"bignumeric_type\": [\"pyarrow >= 3.0.0, < 4.0dev\"],\n \"tqdm\": [\"tqdm >= 4.7.4, <5.0.0dev\"],\n \"opentelemetry\": [\n \"opentelemetry-api==0.11b0\",\n \"opentelemetry-sdk==0.11b0\",\n \"opentelemetry-instrumentation==0.11b0\",\n ],\n}\n\nall_extras = []\n\nfor extra in extras:\n # Exclude this extra from all to avoid overly strict dependencies on core\n # libraries such as pyarrow.\n # https://github.com/googleapis/python-bigquery/issues/563\n if extra in {\"bignumeric_type\"}:\n continue\n all_extras.extend(extras[extra])\n\nextras[\"all\"] = all_extras\n\n# Setup boilerplate below this line.\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nreadme_filename = os.path.join(package_root, \"README.rst\")\nwith io.open(readme_filename, encoding=\"utf-8\") as readme_file:\n readme = readme_file.read()\n\nversion = {}\nwith open(os.path.join(package_root, \"google/cloud/bigquery/version.py\")) as fp:\n exec(fp.read(), version)\nversion = version[\"__version__\"]\n\n# Only include packages under the 'google' namespace. Do not include tests,\n# benchmarks, etc.\npackages = [\n package\n for package in setuptools.PEP420PackageFinder.find()\n if package.startswith(\"google\")\n]\n\n# Determine which namespaces are needed.\nnamespaces = [\"google\"]\nif \"google.cloud\" in packages:\n namespaces.append(\"google.cloud\")\n\n\nsetuptools.setup(\n name=name,\n version=version,\n description=description,\n long_description=readme,\n author=\"Google LLC\",\n author_email=\"[email protected]\",\n license=\"Apache 2.0\",\n url=\"https://github.com/googleapis/python-bigquery\",\n classifiers=[\n release_status,\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet\",\n ],\n platforms=\"Posix; MacOS X; Windows\",\n packages=packages,\n namespace_packages=namespaces,\n install_requires=dependencies,\n extras_require=extras,\n python_requires=\">=3.6, <3.10\",\n include_package_data=True,\n zip_safe=False,\n)\n", "path": "setup.py"}, {"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom google.api_core import exceptions\nfrom google.api_core import retry\nimport requests.exceptions\n\n\n_RETRYABLE_REASONS = frozenset(\n [\"rateLimitExceeded\", \"backendError\", \"internalError\", \"badGateway\"]\n)\n\n_UNSTRUCTURED_RETRYABLE_TYPES = (\n ConnectionError,\n exceptions.TooManyRequests,\n exceptions.InternalServerError,\n exceptions.BadGateway,\n requests.exceptions.ConnectionError,\n)\n\n\ndef _should_retry(exc):\n \"\"\"Predicate for determining when to retry.\n\n We retry if and only if the 'reason' is 'backendError'\n or 'rateLimitExceeded'.\n \"\"\"\n if not hasattr(exc, \"errors\") or len(exc.errors) == 0:\n # Check for unstructured error returns, e.g. from GFE\n return isinstance(exc, _UNSTRUCTURED_RETRYABLE_TYPES)\n\n reason = exc.errors[0][\"reason\"]\n return reason in _RETRYABLE_REASONS\n\n\nDEFAULT_RETRY = retry.Retry(predicate=_should_retry)\n\"\"\"The default retry object.\n\nAny method with a ``retry`` parameter will be retried automatically,\nwith reasonable defaults. To disable retry, pass ``retry=None``.\nTo modify the default retry behavior, call a ``with_XXX`` method\non ``DEFAULT_RETRY``. For example, to change the deadline to 30 seconds,\npass ``retry=bigquery.DEFAULT_RETRY.with_deadline(30)``.\n\"\"\"\n", "path": "google/cloud/bigquery/retry.py"}]}
| 3,193 | 365 |
gh_patches_debug_17976
|
rasdani/github-patches
|
git_diff
|
projectmesa__mesa-1330
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
random.sample with a set is removed in Python 3.11
Using [`random.sample`](https://docs.python.org/3/library/random.html#random.sample) with a set has been depreciated in Python 3.9 and was removed in Python 3.11.
> Changed in version 3.11: The population must be a sequence. Automatic conversion of sets to lists is no longer supported.
Two cases of this function being used with a set have been detected by the CI in Mesa, both in example models:
1. https://github.com/projectmesa/mesa/blob/5135be8759d9fdac551c5c6ed13e9ae5205bce3e/examples/boltzmann_wealth_model_network/boltzmann_wealth_model_network/model.py#L28
2. https://github.com/projectmesa/mesa/blob/5135be8759d9fdac551c5c6ed13e9ae5205bce3e/examples/virus_on_network/virus_on_network/model.py#L84
What would be the most efficient way to get `self.G.nodes()` in a sequence form (so probably list or tuple)? I feel like just putting it inside a `list()` isn't the proper way to do it.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/virus_on_network/virus_on_network/model.py`
Content:
```
1 import math
2 from enum import Enum
3 import networkx as nx
4
5 from mesa import Agent, Model
6 from mesa.time import RandomActivation
7 from mesa.datacollection import DataCollector
8 from mesa.space import NetworkGrid
9
10
11 class State(Enum):
12 SUSCEPTIBLE = 0
13 INFECTED = 1
14 RESISTANT = 2
15
16
17 def number_state(model, state):
18 return sum(1 for a in model.grid.get_all_cell_contents() if a.state is state)
19
20
21 def number_infected(model):
22 return number_state(model, State.INFECTED)
23
24
25 def number_susceptible(model):
26 return number_state(model, State.SUSCEPTIBLE)
27
28
29 def number_resistant(model):
30 return number_state(model, State.RESISTANT)
31
32
33 class VirusOnNetwork(Model):
34 """A virus model with some number of agents"""
35
36 def __init__(
37 self,
38 num_nodes=10,
39 avg_node_degree=3,
40 initial_outbreak_size=1,
41 virus_spread_chance=0.4,
42 virus_check_frequency=0.4,
43 recovery_chance=0.3,
44 gain_resistance_chance=0.5,
45 ):
46
47 self.num_nodes = num_nodes
48 prob = avg_node_degree / self.num_nodes
49 self.G = nx.erdos_renyi_graph(n=self.num_nodes, p=prob)
50 self.grid = NetworkGrid(self.G)
51 self.schedule = RandomActivation(self)
52 self.initial_outbreak_size = (
53 initial_outbreak_size if initial_outbreak_size <= num_nodes else num_nodes
54 )
55 self.virus_spread_chance = virus_spread_chance
56 self.virus_check_frequency = virus_check_frequency
57 self.recovery_chance = recovery_chance
58 self.gain_resistance_chance = gain_resistance_chance
59
60 self.datacollector = DataCollector(
61 {
62 "Infected": number_infected,
63 "Susceptible": number_susceptible,
64 "Resistant": number_resistant,
65 }
66 )
67
68 # Create agents
69 for i, node in enumerate(self.G.nodes()):
70 a = VirusAgent(
71 i,
72 self,
73 State.SUSCEPTIBLE,
74 self.virus_spread_chance,
75 self.virus_check_frequency,
76 self.recovery_chance,
77 self.gain_resistance_chance,
78 )
79 self.schedule.add(a)
80 # Add the agent to the node
81 self.grid.place_agent(a, node)
82
83 # Infect some nodes
84 infected_nodes = self.random.sample(self.G.nodes(), self.initial_outbreak_size)
85 for a in self.grid.get_cell_list_contents(infected_nodes):
86 a.state = State.INFECTED
87
88 self.running = True
89 self.datacollector.collect(self)
90
91 def resistant_susceptible_ratio(self):
92 try:
93 return number_state(self, State.RESISTANT) / number_state(
94 self, State.SUSCEPTIBLE
95 )
96 except ZeroDivisionError:
97 return math.inf
98
99 def step(self):
100 self.schedule.step()
101 # collect data
102 self.datacollector.collect(self)
103
104 def run_model(self, n):
105 for i in range(n):
106 self.step()
107
108
109 class VirusAgent(Agent):
110 def __init__(
111 self,
112 unique_id,
113 model,
114 initial_state,
115 virus_spread_chance,
116 virus_check_frequency,
117 recovery_chance,
118 gain_resistance_chance,
119 ):
120 super().__init__(unique_id, model)
121
122 self.state = initial_state
123
124 self.virus_spread_chance = virus_spread_chance
125 self.virus_check_frequency = virus_check_frequency
126 self.recovery_chance = recovery_chance
127 self.gain_resistance_chance = gain_resistance_chance
128
129 def try_to_infect_neighbors(self):
130 neighbors_nodes = self.model.grid.get_neighbors(self.pos, include_center=False)
131 susceptible_neighbors = [
132 agent
133 for agent in self.model.grid.get_cell_list_contents(neighbors_nodes)
134 if agent.state is State.SUSCEPTIBLE
135 ]
136 for a in susceptible_neighbors:
137 if self.random.random() < self.virus_spread_chance:
138 a.state = State.INFECTED
139
140 def try_gain_resistance(self):
141 if self.random.random() < self.gain_resistance_chance:
142 self.state = State.RESISTANT
143
144 def try_remove_infection(self):
145 # Try to remove
146 if self.random.random() < self.recovery_chance:
147 # Success
148 self.state = State.SUSCEPTIBLE
149 self.try_gain_resistance()
150 else:
151 # Failed
152 self.state = State.INFECTED
153
154 def try_check_situation(self):
155 if self.random.random() < self.virus_check_frequency:
156 # Checking...
157 if self.state is State.INFECTED:
158 self.try_remove_infection()
159
160 def step(self):
161 if self.state is State.INFECTED:
162 self.try_to_infect_neighbors()
163 self.try_check_situation()
164
```
Path: `examples/boltzmann_wealth_model_network/boltzmann_wealth_model_network/model.py`
Content:
```
1 import mesa
2 import networkx as nx
3
4
5 def compute_gini(model):
6 agent_wealths = [agent.wealth for agent in model.schedule.agents]
7 x = sorted(agent_wealths)
8 N = model.num_agents
9 B = sum(xi * (N - i) for i, xi in enumerate(x)) / (N * sum(x))
10 return 1 + (1 / N) - 2 * B
11
12
13 class BoltzmannWealthModelNetwork(mesa.Model):
14 """A model with some number of agents."""
15
16 def __init__(self, num_agents=7, num_nodes=10):
17
18 self.num_agents = num_agents
19 self.num_nodes = num_nodes if num_nodes >= self.num_agents else self.num_agents
20 self.G = nx.erdos_renyi_graph(n=self.num_nodes, p=0.5)
21 self.grid = mesa.space.NetworkGrid(self.G)
22 self.schedule = mesa.time.RandomActivation(self)
23 self.datacollector = mesa.DataCollector(
24 model_reporters={"Gini": compute_gini},
25 agent_reporters={"Wealth": lambda _: _.wealth},
26 )
27
28 list_of_random_nodes = self.random.sample(self.G.nodes(), self.num_agents)
29
30 # Create agents
31 for i in range(self.num_agents):
32 a = MoneyAgent(i, self)
33 self.schedule.add(a)
34 # Add the agent to a random node
35 self.grid.place_agent(a, list_of_random_nodes[i])
36
37 self.running = True
38 self.datacollector.collect(self)
39
40 def step(self):
41 self.schedule.step()
42 # collect data
43 self.datacollector.collect(self)
44
45 def run_model(self, n):
46 for i in range(n):
47 self.step()
48
49
50 class MoneyAgent(mesa.Agent):
51 """An agent with fixed initial wealth."""
52
53 def __init__(self, unique_id, model):
54 super().__init__(unique_id, model)
55 self.wealth = 1
56
57 def move(self):
58 possible_steps = [
59 node
60 for node in self.model.grid.get_neighbors(self.pos, include_center=False)
61 if self.model.grid.is_cell_empty(node)
62 ]
63 if len(possible_steps) > 0:
64 new_position = self.random.choice(possible_steps)
65 self.model.grid.move_agent(self, new_position)
66
67 def give_money(self):
68
69 neighbors_nodes = self.model.grid.get_neighbors(self.pos, include_center=False)
70 neighbors = self.model.grid.get_cell_list_contents(neighbors_nodes)
71 if len(neighbors) > 0:
72 other = self.random.choice(neighbors)
73 other.wealth += 1
74 self.wealth -= 1
75
76 def step(self):
77 self.move()
78 if self.wealth > 0:
79 self.give_money()
80
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/examples/boltzmann_wealth_model_network/boltzmann_wealth_model_network/model.py b/examples/boltzmann_wealth_model_network/boltzmann_wealth_model_network/model.py
--- a/examples/boltzmann_wealth_model_network/boltzmann_wealth_model_network/model.py
+++ b/examples/boltzmann_wealth_model_network/boltzmann_wealth_model_network/model.py
@@ -25,7 +25,7 @@
agent_reporters={"Wealth": lambda _: _.wealth},
)
- list_of_random_nodes = self.random.sample(self.G.nodes(), self.num_agents)
+ list_of_random_nodes = self.random.sample(list(self.G), self.num_agents)
# Create agents
for i in range(self.num_agents):
diff --git a/examples/virus_on_network/virus_on_network/model.py b/examples/virus_on_network/virus_on_network/model.py
--- a/examples/virus_on_network/virus_on_network/model.py
+++ b/examples/virus_on_network/virus_on_network/model.py
@@ -81,7 +81,7 @@
self.grid.place_agent(a, node)
# Infect some nodes
- infected_nodes = self.random.sample(self.G.nodes(), self.initial_outbreak_size)
+ infected_nodes = self.random.sample(list(self.G), self.initial_outbreak_size)
for a in self.grid.get_cell_list_contents(infected_nodes):
a.state = State.INFECTED
|
{"golden_diff": "diff --git a/examples/boltzmann_wealth_model_network/boltzmann_wealth_model_network/model.py b/examples/boltzmann_wealth_model_network/boltzmann_wealth_model_network/model.py\n--- a/examples/boltzmann_wealth_model_network/boltzmann_wealth_model_network/model.py\n+++ b/examples/boltzmann_wealth_model_network/boltzmann_wealth_model_network/model.py\n@@ -25,7 +25,7 @@\n agent_reporters={\"Wealth\": lambda _: _.wealth},\n )\n \n- list_of_random_nodes = self.random.sample(self.G.nodes(), self.num_agents)\n+ list_of_random_nodes = self.random.sample(list(self.G), self.num_agents)\n \n # Create agents\n for i in range(self.num_agents):\ndiff --git a/examples/virus_on_network/virus_on_network/model.py b/examples/virus_on_network/virus_on_network/model.py\n--- a/examples/virus_on_network/virus_on_network/model.py\n+++ b/examples/virus_on_network/virus_on_network/model.py\n@@ -81,7 +81,7 @@\n self.grid.place_agent(a, node)\n \n # Infect some nodes\n- infected_nodes = self.random.sample(self.G.nodes(), self.initial_outbreak_size)\n+ infected_nodes = self.random.sample(list(self.G), self.initial_outbreak_size)\n for a in self.grid.get_cell_list_contents(infected_nodes):\n a.state = State.INFECTED\n", "issue": "random.sample with a set is removed in Python 3.11\nUsing [`random.sample`](https://docs.python.org/3/library/random.html#random.sample) with a set has been depreciated in Python 3.9 and was removed in Python 3.11.\r\n\r\n> Changed in version 3.11: The population must be a sequence. Automatic conversion of sets to lists is no longer supported.\r\n\r\nTwo cases of this function being used with a set have been detected by the CI in Mesa, both in example models:\r\n\r\n1. https://github.com/projectmesa/mesa/blob/5135be8759d9fdac551c5c6ed13e9ae5205bce3e/examples/boltzmann_wealth_model_network/boltzmann_wealth_model_network/model.py#L28\r\n2. https://github.com/projectmesa/mesa/blob/5135be8759d9fdac551c5c6ed13e9ae5205bce3e/examples/virus_on_network/virus_on_network/model.py#L84\r\n\r\nWhat would be the most efficient way to get `self.G.nodes()` in a sequence form (so probably list or tuple)? I feel like just putting it inside a `list()` isn't the proper way to do it.\n", "before_files": [{"content": "import math\nfrom enum import Enum\nimport networkx as nx\n\nfrom mesa import Agent, Model\nfrom mesa.time import RandomActivation\nfrom mesa.datacollection import DataCollector\nfrom mesa.space import NetworkGrid\n\n\nclass State(Enum):\n SUSCEPTIBLE = 0\n INFECTED = 1\n RESISTANT = 2\n\n\ndef number_state(model, state):\n return sum(1 for a in model.grid.get_all_cell_contents() if a.state is state)\n\n\ndef number_infected(model):\n return number_state(model, State.INFECTED)\n\n\ndef number_susceptible(model):\n return number_state(model, State.SUSCEPTIBLE)\n\n\ndef number_resistant(model):\n return number_state(model, State.RESISTANT)\n\n\nclass VirusOnNetwork(Model):\n \"\"\"A virus model with some number of agents\"\"\"\n\n def __init__(\n self,\n num_nodes=10,\n avg_node_degree=3,\n initial_outbreak_size=1,\n virus_spread_chance=0.4,\n virus_check_frequency=0.4,\n recovery_chance=0.3,\n gain_resistance_chance=0.5,\n ):\n\n self.num_nodes = num_nodes\n prob = avg_node_degree / self.num_nodes\n self.G = nx.erdos_renyi_graph(n=self.num_nodes, p=prob)\n self.grid = NetworkGrid(self.G)\n self.schedule = RandomActivation(self)\n self.initial_outbreak_size = (\n initial_outbreak_size if initial_outbreak_size <= num_nodes else num_nodes\n )\n self.virus_spread_chance = virus_spread_chance\n self.virus_check_frequency = virus_check_frequency\n self.recovery_chance = recovery_chance\n self.gain_resistance_chance = gain_resistance_chance\n\n self.datacollector = DataCollector(\n {\n \"Infected\": number_infected,\n \"Susceptible\": number_susceptible,\n \"Resistant\": number_resistant,\n }\n )\n\n # Create agents\n for i, node in enumerate(self.G.nodes()):\n a = VirusAgent(\n i,\n self,\n State.SUSCEPTIBLE,\n self.virus_spread_chance,\n self.virus_check_frequency,\n self.recovery_chance,\n self.gain_resistance_chance,\n )\n self.schedule.add(a)\n # Add the agent to the node\n self.grid.place_agent(a, node)\n\n # Infect some nodes\n infected_nodes = self.random.sample(self.G.nodes(), self.initial_outbreak_size)\n for a in self.grid.get_cell_list_contents(infected_nodes):\n a.state = State.INFECTED\n\n self.running = True\n self.datacollector.collect(self)\n\n def resistant_susceptible_ratio(self):\n try:\n return number_state(self, State.RESISTANT) / number_state(\n self, State.SUSCEPTIBLE\n )\n except ZeroDivisionError:\n return math.inf\n\n def step(self):\n self.schedule.step()\n # collect data\n self.datacollector.collect(self)\n\n def run_model(self, n):\n for i in range(n):\n self.step()\n\n\nclass VirusAgent(Agent):\n def __init__(\n self,\n unique_id,\n model,\n initial_state,\n virus_spread_chance,\n virus_check_frequency,\n recovery_chance,\n gain_resistance_chance,\n ):\n super().__init__(unique_id, model)\n\n self.state = initial_state\n\n self.virus_spread_chance = virus_spread_chance\n self.virus_check_frequency = virus_check_frequency\n self.recovery_chance = recovery_chance\n self.gain_resistance_chance = gain_resistance_chance\n\n def try_to_infect_neighbors(self):\n neighbors_nodes = self.model.grid.get_neighbors(self.pos, include_center=False)\n susceptible_neighbors = [\n agent\n for agent in self.model.grid.get_cell_list_contents(neighbors_nodes)\n if agent.state is State.SUSCEPTIBLE\n ]\n for a in susceptible_neighbors:\n if self.random.random() < self.virus_spread_chance:\n a.state = State.INFECTED\n\n def try_gain_resistance(self):\n if self.random.random() < self.gain_resistance_chance:\n self.state = State.RESISTANT\n\n def try_remove_infection(self):\n # Try to remove\n if self.random.random() < self.recovery_chance:\n # Success\n self.state = State.SUSCEPTIBLE\n self.try_gain_resistance()\n else:\n # Failed\n self.state = State.INFECTED\n\n def try_check_situation(self):\n if self.random.random() < self.virus_check_frequency:\n # Checking...\n if self.state is State.INFECTED:\n self.try_remove_infection()\n\n def step(self):\n if self.state is State.INFECTED:\n self.try_to_infect_neighbors()\n self.try_check_situation()\n", "path": "examples/virus_on_network/virus_on_network/model.py"}, {"content": "import mesa\nimport networkx as nx\n\n\ndef compute_gini(model):\n agent_wealths = [agent.wealth for agent in model.schedule.agents]\n x = sorted(agent_wealths)\n N = model.num_agents\n B = sum(xi * (N - i) for i, xi in enumerate(x)) / (N * sum(x))\n return 1 + (1 / N) - 2 * B\n\n\nclass BoltzmannWealthModelNetwork(mesa.Model):\n \"\"\"A model with some number of agents.\"\"\"\n\n def __init__(self, num_agents=7, num_nodes=10):\n\n self.num_agents = num_agents\n self.num_nodes = num_nodes if num_nodes >= self.num_agents else self.num_agents\n self.G = nx.erdos_renyi_graph(n=self.num_nodes, p=0.5)\n self.grid = mesa.space.NetworkGrid(self.G)\n self.schedule = mesa.time.RandomActivation(self)\n self.datacollector = mesa.DataCollector(\n model_reporters={\"Gini\": compute_gini},\n agent_reporters={\"Wealth\": lambda _: _.wealth},\n )\n\n list_of_random_nodes = self.random.sample(self.G.nodes(), self.num_agents)\n\n # Create agents\n for i in range(self.num_agents):\n a = MoneyAgent(i, self)\n self.schedule.add(a)\n # Add the agent to a random node\n self.grid.place_agent(a, list_of_random_nodes[i])\n\n self.running = True\n self.datacollector.collect(self)\n\n def step(self):\n self.schedule.step()\n # collect data\n self.datacollector.collect(self)\n\n def run_model(self, n):\n for i in range(n):\n self.step()\n\n\nclass MoneyAgent(mesa.Agent):\n \"\"\"An agent with fixed initial wealth.\"\"\"\n\n def __init__(self, unique_id, model):\n super().__init__(unique_id, model)\n self.wealth = 1\n\n def move(self):\n possible_steps = [\n node\n for node in self.model.grid.get_neighbors(self.pos, include_center=False)\n if self.model.grid.is_cell_empty(node)\n ]\n if len(possible_steps) > 0:\n new_position = self.random.choice(possible_steps)\n self.model.grid.move_agent(self, new_position)\n\n def give_money(self):\n\n neighbors_nodes = self.model.grid.get_neighbors(self.pos, include_center=False)\n neighbors = self.model.grid.get_cell_list_contents(neighbors_nodes)\n if len(neighbors) > 0:\n other = self.random.choice(neighbors)\n other.wealth += 1\n self.wealth -= 1\n\n def step(self):\n self.move()\n if self.wealth > 0:\n self.give_money()\n", "path": "examples/boltzmann_wealth_model_network/boltzmann_wealth_model_network/model.py"}], "after_files": [{"content": "import math\nfrom enum import Enum\nimport networkx as nx\n\nfrom mesa import Agent, Model\nfrom mesa.time import RandomActivation\nfrom mesa.datacollection import DataCollector\nfrom mesa.space import NetworkGrid\n\n\nclass State(Enum):\n SUSCEPTIBLE = 0\n INFECTED = 1\n RESISTANT = 2\n\n\ndef number_state(model, state):\n return sum(1 for a in model.grid.get_all_cell_contents() if a.state is state)\n\n\ndef number_infected(model):\n return number_state(model, State.INFECTED)\n\n\ndef number_susceptible(model):\n return number_state(model, State.SUSCEPTIBLE)\n\n\ndef number_resistant(model):\n return number_state(model, State.RESISTANT)\n\n\nclass VirusOnNetwork(Model):\n \"\"\"A virus model with some number of agents\"\"\"\n\n def __init__(\n self,\n num_nodes=10,\n avg_node_degree=3,\n initial_outbreak_size=1,\n virus_spread_chance=0.4,\n virus_check_frequency=0.4,\n recovery_chance=0.3,\n gain_resistance_chance=0.5,\n ):\n\n self.num_nodes = num_nodes\n prob = avg_node_degree / self.num_nodes\n self.G = nx.erdos_renyi_graph(n=self.num_nodes, p=prob)\n self.grid = NetworkGrid(self.G)\n self.schedule = RandomActivation(self)\n self.initial_outbreak_size = (\n initial_outbreak_size if initial_outbreak_size <= num_nodes else num_nodes\n )\n self.virus_spread_chance = virus_spread_chance\n self.virus_check_frequency = virus_check_frequency\n self.recovery_chance = recovery_chance\n self.gain_resistance_chance = gain_resistance_chance\n\n self.datacollector = DataCollector(\n {\n \"Infected\": number_infected,\n \"Susceptible\": number_susceptible,\n \"Resistant\": number_resistant,\n }\n )\n\n # Create agents\n for i, node in enumerate(self.G.nodes()):\n a = VirusAgent(\n i,\n self,\n State.SUSCEPTIBLE,\n self.virus_spread_chance,\n self.virus_check_frequency,\n self.recovery_chance,\n self.gain_resistance_chance,\n )\n self.schedule.add(a)\n # Add the agent to the node\n self.grid.place_agent(a, node)\n\n # Infect some nodes\n infected_nodes = self.random.sample(list(self.G), self.initial_outbreak_size)\n for a in self.grid.get_cell_list_contents(infected_nodes):\n a.state = State.INFECTED\n\n self.running = True\n self.datacollector.collect(self)\n\n def resistant_susceptible_ratio(self):\n try:\n return number_state(self, State.RESISTANT) / number_state(\n self, State.SUSCEPTIBLE\n )\n except ZeroDivisionError:\n return math.inf\n\n def step(self):\n self.schedule.step()\n # collect data\n self.datacollector.collect(self)\n\n def run_model(self, n):\n for i in range(n):\n self.step()\n\n\nclass VirusAgent(Agent):\n def __init__(\n self,\n unique_id,\n model,\n initial_state,\n virus_spread_chance,\n virus_check_frequency,\n recovery_chance,\n gain_resistance_chance,\n ):\n super().__init__(unique_id, model)\n\n self.state = initial_state\n\n self.virus_spread_chance = virus_spread_chance\n self.virus_check_frequency = virus_check_frequency\n self.recovery_chance = recovery_chance\n self.gain_resistance_chance = gain_resistance_chance\n\n def try_to_infect_neighbors(self):\n neighbors_nodes = self.model.grid.get_neighbors(self.pos, include_center=False)\n susceptible_neighbors = [\n agent\n for agent in self.model.grid.get_cell_list_contents(neighbors_nodes)\n if agent.state is State.SUSCEPTIBLE\n ]\n for a in susceptible_neighbors:\n if self.random.random() < self.virus_spread_chance:\n a.state = State.INFECTED\n\n def try_gain_resistance(self):\n if self.random.random() < self.gain_resistance_chance:\n self.state = State.RESISTANT\n\n def try_remove_infection(self):\n # Try to remove\n if self.random.random() < self.recovery_chance:\n # Success\n self.state = State.SUSCEPTIBLE\n self.try_gain_resistance()\n else:\n # Failed\n self.state = State.INFECTED\n\n def try_check_situation(self):\n if self.random.random() < self.virus_check_frequency:\n # Checking...\n if self.state is State.INFECTED:\n self.try_remove_infection()\n\n def step(self):\n if self.state is State.INFECTED:\n self.try_to_infect_neighbors()\n self.try_check_situation()\n", "path": "examples/virus_on_network/virus_on_network/model.py"}, {"content": "import mesa\nimport networkx as nx\n\n\ndef compute_gini(model):\n agent_wealths = [agent.wealth for agent in model.schedule.agents]\n x = sorted(agent_wealths)\n N = model.num_agents\n B = sum(xi * (N - i) for i, xi in enumerate(x)) / (N * sum(x))\n return 1 + (1 / N) - 2 * B\n\n\nclass BoltzmannWealthModelNetwork(mesa.Model):\n \"\"\"A model with some number of agents.\"\"\"\n\n def __init__(self, num_agents=7, num_nodes=10):\n\n self.num_agents = num_agents\n self.num_nodes = num_nodes if num_nodes >= self.num_agents else self.num_agents\n self.G = nx.erdos_renyi_graph(n=self.num_nodes, p=0.5)\n self.grid = mesa.space.NetworkGrid(self.G)\n self.schedule = mesa.time.RandomActivation(self)\n self.datacollector = mesa.DataCollector(\n model_reporters={\"Gini\": compute_gini},\n agent_reporters={\"Wealth\": lambda _: _.wealth},\n )\n\n list_of_random_nodes = self.random.sample(list(self.G), self.num_agents)\n\n # Create agents\n for i in range(self.num_agents):\n a = MoneyAgent(i, self)\n self.schedule.add(a)\n # Add the agent to a random node\n self.grid.place_agent(a, list_of_random_nodes[i])\n\n self.running = True\n self.datacollector.collect(self)\n\n def step(self):\n self.schedule.step()\n # collect data\n self.datacollector.collect(self)\n\n def run_model(self, n):\n for i in range(n):\n self.step()\n\n\nclass MoneyAgent(mesa.Agent):\n \"\"\"An agent with fixed initial wealth.\"\"\"\n\n def __init__(self, unique_id, model):\n super().__init__(unique_id, model)\n self.wealth = 1\n\n def move(self):\n possible_steps = [\n node\n for node in self.model.grid.get_neighbors(self.pos, include_center=False)\n if self.model.grid.is_cell_empty(node)\n ]\n if len(possible_steps) > 0:\n new_position = self.random.choice(possible_steps)\n self.model.grid.move_agent(self, new_position)\n\n def give_money(self):\n\n neighbors_nodes = self.model.grid.get_neighbors(self.pos, include_center=False)\n neighbors = self.model.grid.get_cell_list_contents(neighbors_nodes)\n if len(neighbors) > 0:\n other = self.random.choice(neighbors)\n other.wealth += 1\n self.wealth -= 1\n\n def step(self):\n self.move()\n if self.wealth > 0:\n self.give_money()\n", "path": "examples/boltzmann_wealth_model_network/boltzmann_wealth_model_network/model.py"}]}
| 2,774 | 312 |
gh_patches_debug_25919
|
rasdani/github-patches
|
git_diff
|
archlinux__archinstall-823
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
mkinitcpio.conf generated incorrectly for AMDGPU.
As the archwiki installation guide states [https://wiki.archlinux.org/title/AMDGPU#Specify_the_correct_module_order](https://wiki.archlinux.org/title/AMDGPU#Specify_the_correct_module_order), you must ensure that the amdgpu module is loaded before the radeon one: `MODULES=(amdgpu radeon)`
Otherwise the DM will fail to start at boot.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `profiles/xorg.py`
Content:
```
1 # A system with "xorg" installed
2
3 import archinstall
4 import logging
5
6 is_top_level_profile = True
7
8 __description__ = 'Installs a minimal system as well as xorg and graphics drivers.'
9
10 __packages__ = [
11 'dkms',
12 'xorg-server',
13 'xorg-xinit',
14 'nvidia-dkms',
15 *archinstall.lib.hardware.__packages__,
16 ]
17
18
19 def _prep_function(*args, **kwargs):
20 """
21 Magic function called by the importing installer
22 before continuing any further. It also avoids executing any
23 other code in this stage. So it's a safe way to ask the user
24 for more input before any other installer steps start.
25 """
26
27 archinstall.storage["gfx_driver_packages"] = archinstall.select_driver()
28
29 # TODO: Add language section and/or merge it with the locale selected
30 # earlier in for instance guided.py installer.
31
32 return True
33
34
35 # Ensures that this code only gets executed if executed
36 # through importlib.util.spec_from_file_location("xorg", "/somewhere/xorg.py")
37 # or through conventional import xorg
38 if __name__ == 'xorg':
39 try:
40 if "nvidia" in archinstall.storage.get("gfx_driver_packages", []):
41 if "linux-zen" in archinstall.storage['installation_session'].base_packages or "linux-lts" in archinstall.storage['installation_session'].base_packages:
42 for kernel in archinstall.storage['installation_session'].kernels:
43 archinstall.storage['installation_session'].add_additional_packages(f"{kernel}-headers") # Fixes https://github.com/archlinux/archinstall/issues/585
44 archinstall.storage['installation_session'].add_additional_packages("dkms") # I've had kernel regen fail if it wasn't installed before nvidia-dkms
45 archinstall.storage['installation_session'].add_additional_packages("xorg-server xorg-xinit nvidia-dkms")
46 else:
47 archinstall.storage['installation_session'].add_additional_packages(f"xorg-server xorg-xinit {' '.join(archinstall.storage.get('gfx_driver_packages', []))}")
48 else:
49 archinstall.storage['installation_session'].add_additional_packages(f"xorg-server xorg-xinit {' '.join(archinstall.storage.get('gfx_driver_packages', []))}")
50 except Exception as err:
51 archinstall.log(f"Could not handle nvidia and linuz-zen specific situations during xorg installation: {err}", level=logging.WARNING, fg="yellow")
52 archinstall.storage['installation_session'].add_additional_packages("xorg-server xorg-xinit") # Prep didn't run, so there's no driver to install
53
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/profiles/xorg.py b/profiles/xorg.py
--- a/profiles/xorg.py
+++ b/profiles/xorg.py
@@ -45,6 +45,17 @@
archinstall.storage['installation_session'].add_additional_packages("xorg-server xorg-xinit nvidia-dkms")
else:
archinstall.storage['installation_session'].add_additional_packages(f"xorg-server xorg-xinit {' '.join(archinstall.storage.get('gfx_driver_packages', []))}")
+ elif 'amdgpu' in archinstall.storage.get("gfx_driver_packages", []):
+ # The order of these two are important if amdgpu is installed #808
+ if 'amdgpu' in archinstall.storage['installation_session'].MODULES:
+ archinstall.storage['installation_session'].MODULES.remove('amdgpu')
+ archinstall.storage['installation_session'].MODULES.append('amdgpu')
+
+ if 'radeon' in archinstall.storage['installation_session'].MODULES:
+ archinstall.storage['installation_session'].MODULES.remove('radeon')
+ archinstall.storage['installation_session'].MODULES.append('radeon')
+
+ archinstall.storage['installation_session'].add_additional_packages(f"xorg-server xorg-xinit {' '.join(archinstall.storage.get('gfx_driver_packages', []))}")
else:
archinstall.storage['installation_session'].add_additional_packages(f"xorg-server xorg-xinit {' '.join(archinstall.storage.get('gfx_driver_packages', []))}")
except Exception as err:
|
{"golden_diff": "diff --git a/profiles/xorg.py b/profiles/xorg.py\n--- a/profiles/xorg.py\n+++ b/profiles/xorg.py\n@@ -45,6 +45,17 @@\n \t\t\t\tarchinstall.storage['installation_session'].add_additional_packages(\"xorg-server xorg-xinit nvidia-dkms\")\n \t\t\telse:\n \t\t\t\tarchinstall.storage['installation_session'].add_additional_packages(f\"xorg-server xorg-xinit {' '.join(archinstall.storage.get('gfx_driver_packages', []))}\")\n+\t\telif 'amdgpu' in archinstall.storage.get(\"gfx_driver_packages\", []):\n+\t\t\t# The order of these two are important if amdgpu is installed #808\n+\t\t\tif 'amdgpu' in archinstall.storage['installation_session'].MODULES:\n+\t\t\t\tarchinstall.storage['installation_session'].MODULES.remove('amdgpu')\n+\t\t\tarchinstall.storage['installation_session'].MODULES.append('amdgpu')\n+\n+\t\t\tif 'radeon' in archinstall.storage['installation_session'].MODULES:\n+\t\t\t\tarchinstall.storage['installation_session'].MODULES.remove('radeon')\n+\t\t\tarchinstall.storage['installation_session'].MODULES.append('radeon')\n+\n+\t\t\tarchinstall.storage['installation_session'].add_additional_packages(f\"xorg-server xorg-xinit {' '.join(archinstall.storage.get('gfx_driver_packages', []))}\")\n \t\telse:\n \t\t\tarchinstall.storage['installation_session'].add_additional_packages(f\"xorg-server xorg-xinit {' '.join(archinstall.storage.get('gfx_driver_packages', []))}\")\n \texcept Exception as err:\n", "issue": "mkinitcpio.conf generated incorrectly for AMDGPU.\nAs the archwiki installation guide states [https://wiki.archlinux.org/title/AMDGPU#Specify_the_correct_module_order](https://wiki.archlinux.org/title/AMDGPU#Specify_the_correct_module_order), you must ensure that the amdgpu module is loaded before the radeon one: `MODULES=(amdgpu radeon)`\r\nOtherwise the DM will fail to start at boot.\n", "before_files": [{"content": "# A system with \"xorg\" installed\n\nimport archinstall\nimport logging\n\nis_top_level_profile = True\n\n__description__ = 'Installs a minimal system as well as xorg and graphics drivers.'\n\n__packages__ = [\n\t'dkms',\n\t'xorg-server',\n\t'xorg-xinit',\n\t'nvidia-dkms',\n\t*archinstall.lib.hardware.__packages__,\n]\n\n\ndef _prep_function(*args, **kwargs):\n\t\"\"\"\n\tMagic function called by the importing installer\n\tbefore continuing any further. It also avoids executing any\n\tother code in this stage. So it's a safe way to ask the user\n\tfor more input before any other installer steps start.\n\t\"\"\"\n\n\tarchinstall.storage[\"gfx_driver_packages\"] = archinstall.select_driver()\n\n\t# TODO: Add language section and/or merge it with the locale selected\n\t# earlier in for instance guided.py installer.\n\n\treturn True\n\n\n# Ensures that this code only gets executed if executed\n# through importlib.util.spec_from_file_location(\"xorg\", \"/somewhere/xorg.py\")\n# or through conventional import xorg\nif __name__ == 'xorg':\n\ttry:\n\t\tif \"nvidia\" in archinstall.storage.get(\"gfx_driver_packages\", []):\n\t\t\tif \"linux-zen\" in archinstall.storage['installation_session'].base_packages or \"linux-lts\" in archinstall.storage['installation_session'].base_packages:\n\t\t\t\tfor kernel in archinstall.storage['installation_session'].kernels:\n\t\t\t\t\tarchinstall.storage['installation_session'].add_additional_packages(f\"{kernel}-headers\") # Fixes https://github.com/archlinux/archinstall/issues/585\n\t\t\t\tarchinstall.storage['installation_session'].add_additional_packages(\"dkms\") # I've had kernel regen fail if it wasn't installed before nvidia-dkms\n\t\t\t\tarchinstall.storage['installation_session'].add_additional_packages(\"xorg-server xorg-xinit nvidia-dkms\")\n\t\t\telse:\n\t\t\t\tarchinstall.storage['installation_session'].add_additional_packages(f\"xorg-server xorg-xinit {' '.join(archinstall.storage.get('gfx_driver_packages', []))}\")\n\t\telse:\n\t\t\tarchinstall.storage['installation_session'].add_additional_packages(f\"xorg-server xorg-xinit {' '.join(archinstall.storage.get('gfx_driver_packages', []))}\")\n\texcept Exception as err:\n\t\tarchinstall.log(f\"Could not handle nvidia and linuz-zen specific situations during xorg installation: {err}\", level=logging.WARNING, fg=\"yellow\")\n\t\tarchinstall.storage['installation_session'].add_additional_packages(\"xorg-server xorg-xinit\") # Prep didn't run, so there's no driver to install\n", "path": "profiles/xorg.py"}], "after_files": [{"content": "# A system with \"xorg\" installed\n\nimport archinstall\nimport logging\n\nis_top_level_profile = True\n\n__description__ = 'Installs a minimal system as well as xorg and graphics drivers.'\n\n__packages__ = [\n\t'dkms',\n\t'xorg-server',\n\t'xorg-xinit',\n\t'nvidia-dkms',\n\t*archinstall.lib.hardware.__packages__,\n]\n\n\ndef _prep_function(*args, **kwargs):\n\t\"\"\"\n\tMagic function called by the importing installer\n\tbefore continuing any further. It also avoids executing any\n\tother code in this stage. So it's a safe way to ask the user\n\tfor more input before any other installer steps start.\n\t\"\"\"\n\n\tarchinstall.storage[\"gfx_driver_packages\"] = archinstall.select_driver()\n\n\t# TODO: Add language section and/or merge it with the locale selected\n\t# earlier in for instance guided.py installer.\n\n\treturn True\n\n\n# Ensures that this code only gets executed if executed\n# through importlib.util.spec_from_file_location(\"xorg\", \"/somewhere/xorg.py\")\n# or through conventional import xorg\nif __name__ == 'xorg':\n\ttry:\n\t\tif \"nvidia\" in archinstall.storage.get(\"gfx_driver_packages\", []):\n\t\t\tif \"linux-zen\" in archinstall.storage['installation_session'].base_packages or \"linux-lts\" in archinstall.storage['installation_session'].base_packages:\n\t\t\t\tfor kernel in archinstall.storage['installation_session'].kernels:\n\t\t\t\t\tarchinstall.storage['installation_session'].add_additional_packages(f\"{kernel}-headers\") # Fixes https://github.com/archlinux/archinstall/issues/585\n\t\t\t\tarchinstall.storage['installation_session'].add_additional_packages(\"dkms\") # I've had kernel regen fail if it wasn't installed before nvidia-dkms\n\t\t\t\tarchinstall.storage['installation_session'].add_additional_packages(\"xorg-server xorg-xinit nvidia-dkms\")\n\t\t\telse:\n\t\t\t\tarchinstall.storage['installation_session'].add_additional_packages(f\"xorg-server xorg-xinit {' '.join(archinstall.storage.get('gfx_driver_packages', []))}\")\n\t\telif 'amdgpu' in archinstall.storage.get(\"gfx_driver_packages\", []):\n\t\t\t# The order of these two are important if amdgpu is installed #808\n\t\t\tif 'amdgpu' in archinstall.storage['installation_session'].MODULES:\n\t\t\t\tarchinstall.storage['installation_session'].MODULES.remove('amdgpu')\n\t\t\tarchinstall.storage['installation_session'].MODULES.append('amdgpu')\n\n\t\t\tif 'radeon' in archinstall.storage['installation_session'].MODULES:\n\t\t\t\tarchinstall.storage['installation_session'].MODULES.remove('radeon')\n\t\t\tarchinstall.storage['installation_session'].MODULES.append('radeon')\n\n\t\t\tarchinstall.storage['installation_session'].add_additional_packages(f\"xorg-server xorg-xinit {' '.join(archinstall.storage.get('gfx_driver_packages', []))}\")\n\t\telse:\n\t\t\tarchinstall.storage['installation_session'].add_additional_packages(f\"xorg-server xorg-xinit {' '.join(archinstall.storage.get('gfx_driver_packages', []))}\")\n\texcept Exception as err:\n\t\tarchinstall.log(f\"Could not handle nvidia and linuz-zen specific situations during xorg installation: {err}\", level=logging.WARNING, fg=\"yellow\")\n\t\tarchinstall.storage['installation_session'].add_additional_packages(\"xorg-server xorg-xinit\") # Prep didn't run, so there's no driver to install\n", "path": "profiles/xorg.py"}]}
| 1,032 | 343 |
gh_patches_debug_2112
|
rasdani/github-patches
|
git_diff
|
Qiskit__qiskit-1940
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
rzz gate
<!-- ⚠️ If you do not respect this template, your issue will be closed -->
<!-- ⚠️ Make sure to browse the opened and closed issues -->
### Information
- **Qiskit Terra version**: 0.7.2
- **Python version**: 3.6.6
- **Operating system**: Windows 10
### What is the current behavior?
rzz gate appears to give incorrect results
### Steps to reproduce the problem
rzz gate rule defined in https://github.com/Qiskit/qiskit-terra/blob/master/qiskit/extensions/standard/rzz.py
```
CnotGate(q[0], q[1]),
U1Gate(self.params[0], q[0]),
CnotGate(q[0], q[1])
```
### What is the expected behavior?
I think it should be
```
CnotGate(q[0], q[1]),
U1Gate(self.params[0], q[1]),
CnotGate(q[0], q[1])
```
the u1 phase should be on the target instead of control
### Suggested solutions
modify rzz gate definition to give the right behavior.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `qiskit/extensions/standard/rzz.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # Copyright 2017, IBM.
4 #
5 # This source code is licensed under the Apache License, Version 2.0 found in
6 # the LICENSE.txt file in the root directory of this source tree.
7
8 """
9 two-qubit ZZ-rotation gate.
10 """
11 from qiskit.circuit import CompositeGate
12 from qiskit.circuit import Gate
13 from qiskit.circuit import QuantumCircuit
14 from qiskit.circuit import QuantumRegister
15 from qiskit.circuit.decorators import _op_expand
16 from qiskit.dagcircuit import DAGCircuit
17 from qiskit.extensions.standard.u1 import U1Gate
18 from qiskit.extensions.standard.cx import CnotGate
19
20
21 class RZZGate(Gate):
22 """Two-qubit ZZ-rotation gate."""
23
24 def __init__(self, theta, ctl, tgt, circ=None):
25 """Create new rzz gate."""
26 super().__init__("rzz", [theta], [ctl, tgt], circ)
27
28 def _define_decompositions(self):
29 """
30 gate rzz(theta) a, b { cx a, b; u1(theta) b; cx a, b; }
31 """
32 decomposition = DAGCircuit()
33 q = QuantumRegister(2, "q")
34 decomposition.add_qreg(q)
35 rule = [
36 CnotGate(q[0], q[1]),
37 U1Gate(self.params[0], q[0]),
38 CnotGate(q[0], q[1])
39 ]
40 for inst in rule:
41 decomposition.apply_operation_back(inst)
42 self._decompositions = [decomposition]
43
44 def inverse(self):
45 """Invert this gate."""
46 self.params[0] = -self.params[0]
47 self._decompositions = None
48 return self
49
50 def reapply(self, circ):
51 """Reapply this gate to corresponding qubits in circ."""
52 self._modifiers(circ.rzz(self.params[0], self.qargs[0], self.qargs[1]))
53
54
55 @_op_expand(2, broadcastable=[False, False])
56 def rzz(self, theta, qubit1, qubit2):
57 """Apply RZZ to circuit."""
58 self._check_qubit(qubit1)
59 self._check_qubit(qubit2)
60 self._check_dups([qubit1, qubit2])
61 return self._attach(RZZGate(theta, qubit1, qubit2, self))
62
63
64 # Add to QuantumCircuit and CompositeGate classes
65 QuantumCircuit.rzz = rzz
66 CompositeGate.rzz = rzz
67
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/qiskit/extensions/standard/rzz.py b/qiskit/extensions/standard/rzz.py
--- a/qiskit/extensions/standard/rzz.py
+++ b/qiskit/extensions/standard/rzz.py
@@ -34,7 +34,7 @@
decomposition.add_qreg(q)
rule = [
CnotGate(q[0], q[1]),
- U1Gate(self.params[0], q[0]),
+ U1Gate(self.params[0], q[1]),
CnotGate(q[0], q[1])
]
for inst in rule:
|
{"golden_diff": "diff --git a/qiskit/extensions/standard/rzz.py b/qiskit/extensions/standard/rzz.py\n--- a/qiskit/extensions/standard/rzz.py\n+++ b/qiskit/extensions/standard/rzz.py\n@@ -34,7 +34,7 @@\n decomposition.add_qreg(q)\n rule = [\n CnotGate(q[0], q[1]),\n- U1Gate(self.params[0], q[0]),\n+ U1Gate(self.params[0], q[1]),\n CnotGate(q[0], q[1])\n ]\n for inst in rule:\n", "issue": "rzz gate\n<!-- \u26a0\ufe0f If you do not respect this template, your issue will be closed -->\r\n<!-- \u26a0\ufe0f Make sure to browse the opened and closed issues -->\r\n\r\n### Information\r\n\r\n- **Qiskit Terra version**: 0.7.2\r\n- **Python version**: 3.6.6\r\n- **Operating system**: Windows 10\r\n\r\n### What is the current behavior?\r\n\r\nrzz gate appears to give incorrect results\r\n\r\n### Steps to reproduce the problem\r\n\r\nrzz gate rule defined in https://github.com/Qiskit/qiskit-terra/blob/master/qiskit/extensions/standard/rzz.py\r\n\r\n```\r\n CnotGate(q[0], q[1]),\r\n U1Gate(self.params[0], q[0]),\r\n CnotGate(q[0], q[1])\r\n```\r\n\r\n### What is the expected behavior?\r\n\r\nI think it should be\r\n```\r\n CnotGate(q[0], q[1]),\r\n U1Gate(self.params[0], q[1]),\r\n CnotGate(q[0], q[1])\r\n```\r\nthe u1 phase should be on the target instead of control\r\n\r\n### Suggested solutions\r\n\r\nmodify rzz gate definition to give the right behavior.\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright 2017, IBM.\n#\n# This source code is licensed under the Apache License, Version 2.0 found in\n# the LICENSE.txt file in the root directory of this source tree.\n\n\"\"\"\ntwo-qubit ZZ-rotation gate.\n\"\"\"\nfrom qiskit.circuit import CompositeGate\nfrom qiskit.circuit import Gate\nfrom qiskit.circuit import QuantumCircuit\nfrom qiskit.circuit import QuantumRegister\nfrom qiskit.circuit.decorators import _op_expand\nfrom qiskit.dagcircuit import DAGCircuit\nfrom qiskit.extensions.standard.u1 import U1Gate\nfrom qiskit.extensions.standard.cx import CnotGate\n\n\nclass RZZGate(Gate):\n \"\"\"Two-qubit ZZ-rotation gate.\"\"\"\n\n def __init__(self, theta, ctl, tgt, circ=None):\n \"\"\"Create new rzz gate.\"\"\"\n super().__init__(\"rzz\", [theta], [ctl, tgt], circ)\n\n def _define_decompositions(self):\n \"\"\"\n gate rzz(theta) a, b { cx a, b; u1(theta) b; cx a, b; }\n \"\"\"\n decomposition = DAGCircuit()\n q = QuantumRegister(2, \"q\")\n decomposition.add_qreg(q)\n rule = [\n CnotGate(q[0], q[1]),\n U1Gate(self.params[0], q[0]),\n CnotGate(q[0], q[1])\n ]\n for inst in rule:\n decomposition.apply_operation_back(inst)\n self._decompositions = [decomposition]\n\n def inverse(self):\n \"\"\"Invert this gate.\"\"\"\n self.params[0] = -self.params[0]\n self._decompositions = None\n return self\n\n def reapply(self, circ):\n \"\"\"Reapply this gate to corresponding qubits in circ.\"\"\"\n self._modifiers(circ.rzz(self.params[0], self.qargs[0], self.qargs[1]))\n\n\n@_op_expand(2, broadcastable=[False, False])\ndef rzz(self, theta, qubit1, qubit2):\n \"\"\"Apply RZZ to circuit.\"\"\"\n self._check_qubit(qubit1)\n self._check_qubit(qubit2)\n self._check_dups([qubit1, qubit2])\n return self._attach(RZZGate(theta, qubit1, qubit2, self))\n\n\n# Add to QuantumCircuit and CompositeGate classes\nQuantumCircuit.rzz = rzz\nCompositeGate.rzz = rzz\n", "path": "qiskit/extensions/standard/rzz.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright 2017, IBM.\n#\n# This source code is licensed under the Apache License, Version 2.0 found in\n# the LICENSE.txt file in the root directory of this source tree.\n\n\"\"\"\ntwo-qubit ZZ-rotation gate.\n\"\"\"\nfrom qiskit.circuit import CompositeGate\nfrom qiskit.circuit import Gate\nfrom qiskit.circuit import QuantumCircuit\nfrom qiskit.circuit import QuantumRegister\nfrom qiskit.circuit.decorators import _op_expand\nfrom qiskit.dagcircuit import DAGCircuit\nfrom qiskit.extensions.standard.u1 import U1Gate\nfrom qiskit.extensions.standard.cx import CnotGate\n\n\nclass RZZGate(Gate):\n \"\"\"Two-qubit ZZ-rotation gate.\"\"\"\n\n def __init__(self, theta, ctl, tgt, circ=None):\n \"\"\"Create new rzz gate.\"\"\"\n super().__init__(\"rzz\", [theta], [ctl, tgt], circ)\n\n def _define_decompositions(self):\n \"\"\"\n gate rzz(theta) a, b { cx a, b; u1(theta) b; cx a, b; }\n \"\"\"\n decomposition = DAGCircuit()\n q = QuantumRegister(2, \"q\")\n decomposition.add_qreg(q)\n rule = [\n CnotGate(q[0], q[1]),\n U1Gate(self.params[0], q[1]),\n CnotGate(q[0], q[1])\n ]\n for inst in rule:\n decomposition.apply_operation_back(inst)\n self._decompositions = [decomposition]\n\n def inverse(self):\n \"\"\"Invert this gate.\"\"\"\n self.params[0] = -self.params[0]\n self._decompositions = None\n return self\n\n def reapply(self, circ):\n \"\"\"Reapply this gate to corresponding qubits in circ.\"\"\"\n self._modifiers(circ.rzz(self.params[0], self.qargs[0], self.qargs[1]))\n\n\n@_op_expand(2, broadcastable=[False, False])\ndef rzz(self, theta, qubit1, qubit2):\n \"\"\"Apply RZZ to circuit.\"\"\"\n self._check_qubit(qubit1)\n self._check_qubit(qubit2)\n self._check_dups([qubit1, qubit2])\n return self._attach(RZZGate(theta, qubit1, qubit2, self))\n\n\n# Add to QuantumCircuit and CompositeGate classes\nQuantumCircuit.rzz = rzz\nCompositeGate.rzz = rzz\n", "path": "qiskit/extensions/standard/rzz.py"}]}
| 1,209 | 131 |
gh_patches_debug_10367
|
rasdani/github-patches
|
git_diff
|
saleor__saleor-705
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
are product class labels switched?
Hello,
I'm looking at the new product class feature.
About the labels ... should not be the other way around?
https://github.com/mirumee/saleor/blob/master/saleor/dashboard/product/forms.py#L53-L59


--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `saleor/dashboard/product/forms.py`
Content:
```
1 from __future__ import unicode_literals
2
3 from django import forms
4 from django.db import transaction
5 from django.db.models import Count
6 from django.forms.models import ModelChoiceIterator, inlineformset_factory
7 from django.utils.encoding import smart_text
8 from django.utils.translation import pgettext_lazy
9
10 from ...product.models import (AttributeChoiceValue, Product, ProductAttribute,
11 ProductClass, ProductImage, ProductVariant,
12 Stock, StockLocation, VariantImage)
13 from .widgets import ImagePreviewWidget
14 from ...search import index as search_index
15
16
17 class ProductClassSelectorForm(forms.Form):
18 MAX_RADIO_SELECT_ITEMS = 5
19
20 def __init__(self, *args, **kwargs):
21 product_classes = kwargs.pop('product_classes', [])
22 super(ProductClassSelectorForm, self).__init__(*args, **kwargs)
23 choices = [(obj.pk, obj.name) for obj in product_classes]
24 if len(product_classes) > self.MAX_RADIO_SELECT_ITEMS:
25 widget = forms.Select
26 else:
27 widget = forms.RadioSelect
28 self.fields['product_cls'] = forms.ChoiceField(
29 label=pgettext_lazy('Product class form label', 'Product type'),
30 choices=choices, widget=widget)
31
32
33 class StockForm(forms.ModelForm):
34 class Meta:
35 model = Stock
36 exclude = ['quantity_allocated']
37
38 def __init__(self, *args, **kwargs):
39 product = kwargs.pop('product')
40 super(StockForm, self).__init__(*args, **kwargs)
41 if not product.product_class.has_variants:
42 initial = product.variants.first()
43 else:
44 initial = None
45 self.fields['variant'] = forms.ModelChoiceField(
46 queryset=product.variants, initial=initial)
47
48
49 class ProductClassForm(forms.ModelForm):
50 class Meta:
51 model = ProductClass
52 exclude = []
53 labels = {
54 'product_attributes': pgettext_lazy(
55 'Product class form label',
56 'Attributes specific to each variant'),
57 'variant_attributes': pgettext_lazy(
58 'Product class form label',
59 'Attributes common to all variants')}
60
61 def clean(self):
62 data = super(ProductClassForm, self).clean()
63 has_variants = self.cleaned_data['has_variants']
64 product_attr = set(self.cleaned_data['product_attributes'])
65 variant_attr = set(self.cleaned_data['variant_attributes'])
66 if not has_variants and len(variant_attr) > 0:
67 msg = pgettext_lazy(
68 'Product Class Errors',
69 'Product variants are disabled.')
70 self.add_error('variant_attributes', msg)
71 if len(product_attr & variant_attr) > 0:
72 msg = pgettext_lazy(
73 'Product Class Errors',
74 'A single attribute can\'t belong to both a product '
75 'and its variant.')
76 self.add_error('variant_attributes', msg)
77
78 if self.instance.pk:
79 variants_changed = not (self.fields['has_variants'].initial ==
80 has_variants)
81 if variants_changed:
82 query = self.instance.products.all()
83 query = query.annotate(variants_counter=Count('variants'))
84 query = query.filter(variants_counter__gt=1)
85 if query.exists():
86 msg = pgettext_lazy(
87 'Product Class Errors',
88 'Some products of this type have more than '
89 'one variant.')
90 self.add_error('has_variants', msg)
91 return data
92
93
94 class ProductForm(forms.ModelForm):
95
96 class Meta:
97 model = Product
98 exclude = ['attributes', 'product_class']
99
100 def __init__(self, *args, **kwargs):
101 self.product_attributes = []
102 super(ProductForm, self).__init__(*args, **kwargs)
103 field = self.fields['name']
104 field.widget.attrs['placeholder'] = pgettext_lazy(
105 'Product form labels', 'Give your awesome product a name')
106 field = self.fields['categories']
107 field.widget.attrs['data-placeholder'] = pgettext_lazy(
108 'Product form labels', 'Search')
109 product_class = self.instance.product_class
110 self.product_attributes = product_class.product_attributes.all()
111 self.product_attributes = self.product_attributes.prefetch_related(
112 'values')
113 self.prepare_fields_for_attributes()
114
115 def prepare_fields_for_attributes(self):
116 for attribute in self.product_attributes:
117 field_defaults = {
118 'label': attribute.display,
119 'required': False,
120 'initial': self.instance.get_attribute(attribute.pk)}
121 if attribute.has_values():
122 field = CachingModelChoiceField(
123 queryset=attribute.values.all(), **field_defaults)
124 else:
125 field = forms.CharField(**field_defaults)
126 self.fields[attribute.get_formfield_name()] = field
127
128 def iter_attribute_fields(self):
129 for attr in self.product_attributes:
130 yield self[attr.get_formfield_name()]
131
132 def save(self, commit=True):
133 attributes = {}
134 for attr in self.product_attributes:
135 value = self.cleaned_data.pop(attr.get_formfield_name())
136 if isinstance(value, AttributeChoiceValue):
137 attributes[smart_text(attr.pk)] = smart_text(value.pk)
138 else:
139 attributes[smart_text(attr.pk)] = value
140 self.instance.attributes = attributes
141 instance = super(ProductForm, self).save(commit=commit)
142 search_index.insert_or_update_object(instance)
143 return instance
144
145
146 class ProductVariantForm(forms.ModelForm):
147 class Meta:
148 model = ProductVariant
149 exclude = ['attributes', 'product', 'images']
150
151 def __init__(self, *args, **kwargs):
152 super(ProductVariantForm, self).__init__(*args, **kwargs)
153 if self.instance.product.pk:
154 self.fields['price_override'].widget.attrs[
155 'placeholder'] = self.instance.product.price.gross
156 self.fields['weight_override'].widget.attrs[
157 'placeholder'] = self.instance.product.weight
158
159
160 class CachingModelChoiceIterator(ModelChoiceIterator):
161 def __iter__(self):
162 if self.field.empty_label is not None:
163 yield ('', self.field.empty_label)
164 for obj in self.queryset:
165 yield self.choice(obj)
166
167
168 class CachingModelChoiceField(forms.ModelChoiceField):
169 def _get_choices(self):
170 if hasattr(self, '_choices'):
171 return self._choices
172 return CachingModelChoiceIterator(self)
173 choices = property(_get_choices, forms.ChoiceField._set_choices)
174
175
176 class VariantAttributeForm(forms.ModelForm):
177 class Meta:
178 model = ProductVariant
179 fields = []
180
181 def __init__(self, *args, **kwargs):
182 super(VariantAttributeForm, self).__init__(*args, **kwargs)
183 attrs = self.instance.product.product_class.variant_attributes.all()
184 self.available_attrs = attrs.prefetch_related('values')
185 for attr in self.available_attrs:
186 field_defaults = {'label': attr.display,
187 'required': True,
188 'initial': self.instance.get_attribute(attr.pk)}
189 if attr.has_values():
190 field = CachingModelChoiceField(
191 queryset=attr.values.all(), **field_defaults)
192 else:
193 field = forms.CharField(**field_defaults)
194 self.fields[attr.get_formfield_name()] = field
195
196 def save(self, commit=True):
197 attributes = {}
198 for attr in self.available_attrs:
199 value = self.cleaned_data.pop(attr.get_formfield_name())
200 if isinstance(value, AttributeChoiceValue):
201 attributes[smart_text(attr.pk)] = smart_text(value.pk)
202 else:
203 attributes[smart_text(attr.pk)] = value
204 self.instance.attributes = attributes
205 return super(VariantAttributeForm, self).save(commit=commit)
206
207
208 class VariantBulkDeleteForm(forms.Form):
209 items = forms.ModelMultipleChoiceField(queryset=ProductVariant.objects)
210
211 def delete(self):
212 items = ProductVariant.objects.filter(
213 pk__in=self.cleaned_data['items'])
214 items.delete()
215
216
217 class StockBulkDeleteForm(forms.Form):
218 items = forms.ModelMultipleChoiceField(queryset=Stock.objects)
219
220 def delete(self):
221 items = Stock.objects.filter(pk__in=self.cleaned_data['items'])
222 items.delete()
223
224
225 class ProductImageForm(forms.ModelForm):
226 variants = forms.ModelMultipleChoiceField(
227 queryset=ProductVariant.objects.none(),
228 widget=forms.CheckboxSelectMultiple, required=False)
229
230 class Meta:
231 model = ProductImage
232 exclude = ('product', 'order')
233
234 def __init__(self, *args, **kwargs):
235 super(ProductImageForm, self).__init__(*args, **kwargs)
236 show_variants = self.instance.product.product_class.has_variants
237 if self.instance.product and show_variants:
238 variants = self.fields['variants']
239 variants.queryset = self.instance.product.variants.all()
240 variants.initial = self.instance.variant_images.values_list(
241 'variant', flat=True)
242 if self.instance.image:
243 self.fields['image'].widget = ImagePreviewWidget()
244
245 @transaction.atomic
246 def save_variant_images(self, instance):
247 variant_images = []
248 # Clean up old mapping
249 instance.variant_images.all().delete()
250 for variant in self.cleaned_data['variants']:
251 variant_images.append(
252 VariantImage(variant=variant, image=instance))
253 VariantImage.objects.bulk_create(variant_images)
254
255 def save(self, commit=True):
256 instance = super(ProductImageForm, self).save(commit=commit)
257 self.save_variant_images(instance)
258 return instance
259
260
261 class ProductAttributeForm(forms.ModelForm):
262 class Meta:
263 model = ProductAttribute
264 exclude = []
265
266
267 class StockLocationForm(forms.ModelForm):
268 class Meta:
269 model = StockLocation
270 exclude = []
271
272
273 AttributeChoiceValueFormset = inlineformset_factory(
274 ProductAttribute, AttributeChoiceValue, exclude=(), extra=1)
275
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/saleor/dashboard/product/forms.py b/saleor/dashboard/product/forms.py
--- a/saleor/dashboard/product/forms.py
+++ b/saleor/dashboard/product/forms.py
@@ -51,10 +51,10 @@
model = ProductClass
exclude = []
labels = {
- 'product_attributes': pgettext_lazy(
+ 'variant_attributes': pgettext_lazy(
'Product class form label',
'Attributes specific to each variant'),
- 'variant_attributes': pgettext_lazy(
+ 'product_attributes': pgettext_lazy(
'Product class form label',
'Attributes common to all variants')}
|
{"golden_diff": "diff --git a/saleor/dashboard/product/forms.py b/saleor/dashboard/product/forms.py\n--- a/saleor/dashboard/product/forms.py\n+++ b/saleor/dashboard/product/forms.py\n@@ -51,10 +51,10 @@\n model = ProductClass\n exclude = []\n labels = {\n- 'product_attributes': pgettext_lazy(\n+ 'variant_attributes': pgettext_lazy(\n 'Product class form label',\n 'Attributes specific to each variant'),\n- 'variant_attributes': pgettext_lazy(\n+ 'product_attributes': pgettext_lazy(\n 'Product class form label',\n 'Attributes common to all variants')}\n", "issue": "are product class labels switched?\nHello,\r\nI'm looking at the new product class feature.\r\n\r\nAbout the labels ... should not be the other way around?\r\nhttps://github.com/mirumee/saleor/blob/master/saleor/dashboard/product/forms.py#L53-L59\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nfrom django import forms\nfrom django.db import transaction\nfrom django.db.models import Count\nfrom django.forms.models import ModelChoiceIterator, inlineformset_factory\nfrom django.utils.encoding import smart_text\nfrom django.utils.translation import pgettext_lazy\n\nfrom ...product.models import (AttributeChoiceValue, Product, ProductAttribute,\n ProductClass, ProductImage, ProductVariant,\n Stock, StockLocation, VariantImage)\nfrom .widgets import ImagePreviewWidget\nfrom ...search import index as search_index\n\n\nclass ProductClassSelectorForm(forms.Form):\n MAX_RADIO_SELECT_ITEMS = 5\n\n def __init__(self, *args, **kwargs):\n product_classes = kwargs.pop('product_classes', [])\n super(ProductClassSelectorForm, self).__init__(*args, **kwargs)\n choices = [(obj.pk, obj.name) for obj in product_classes]\n if len(product_classes) > self.MAX_RADIO_SELECT_ITEMS:\n widget = forms.Select\n else:\n widget = forms.RadioSelect\n self.fields['product_cls'] = forms.ChoiceField(\n label=pgettext_lazy('Product class form label', 'Product type'),\n choices=choices, widget=widget)\n\n\nclass StockForm(forms.ModelForm):\n class Meta:\n model = Stock\n exclude = ['quantity_allocated']\n\n def __init__(self, *args, **kwargs):\n product = kwargs.pop('product')\n super(StockForm, self).__init__(*args, **kwargs)\n if not product.product_class.has_variants:\n initial = product.variants.first()\n else:\n initial = None\n self.fields['variant'] = forms.ModelChoiceField(\n queryset=product.variants, initial=initial)\n\n\nclass ProductClassForm(forms.ModelForm):\n class Meta:\n model = ProductClass\n exclude = []\n labels = {\n 'product_attributes': pgettext_lazy(\n 'Product class form label',\n 'Attributes specific to each variant'),\n 'variant_attributes': pgettext_lazy(\n 'Product class form label',\n 'Attributes common to all variants')}\n\n def clean(self):\n data = super(ProductClassForm, self).clean()\n has_variants = self.cleaned_data['has_variants']\n product_attr = set(self.cleaned_data['product_attributes'])\n variant_attr = set(self.cleaned_data['variant_attributes'])\n if not has_variants and len(variant_attr) > 0:\n msg = pgettext_lazy(\n 'Product Class Errors',\n 'Product variants are disabled.')\n self.add_error('variant_attributes', msg)\n if len(product_attr & variant_attr) > 0:\n msg = pgettext_lazy(\n 'Product Class Errors',\n 'A single attribute can\\'t belong to both a product '\n 'and its variant.')\n self.add_error('variant_attributes', msg)\n\n if self.instance.pk:\n variants_changed = not (self.fields['has_variants'].initial ==\n has_variants)\n if variants_changed:\n query = self.instance.products.all()\n query = query.annotate(variants_counter=Count('variants'))\n query = query.filter(variants_counter__gt=1)\n if query.exists():\n msg = pgettext_lazy(\n 'Product Class Errors',\n 'Some products of this type have more than '\n 'one variant.')\n self.add_error('has_variants', msg)\n return data\n\n\nclass ProductForm(forms.ModelForm):\n\n class Meta:\n model = Product\n exclude = ['attributes', 'product_class']\n\n def __init__(self, *args, **kwargs):\n self.product_attributes = []\n super(ProductForm, self).__init__(*args, **kwargs)\n field = self.fields['name']\n field.widget.attrs['placeholder'] = pgettext_lazy(\n 'Product form labels', 'Give your awesome product a name')\n field = self.fields['categories']\n field.widget.attrs['data-placeholder'] = pgettext_lazy(\n 'Product form labels', 'Search')\n product_class = self.instance.product_class\n self.product_attributes = product_class.product_attributes.all()\n self.product_attributes = self.product_attributes.prefetch_related(\n 'values')\n self.prepare_fields_for_attributes()\n\n def prepare_fields_for_attributes(self):\n for attribute in self.product_attributes:\n field_defaults = {\n 'label': attribute.display,\n 'required': False,\n 'initial': self.instance.get_attribute(attribute.pk)}\n if attribute.has_values():\n field = CachingModelChoiceField(\n queryset=attribute.values.all(), **field_defaults)\n else:\n field = forms.CharField(**field_defaults)\n self.fields[attribute.get_formfield_name()] = field\n\n def iter_attribute_fields(self):\n for attr in self.product_attributes:\n yield self[attr.get_formfield_name()]\n\n def save(self, commit=True):\n attributes = {}\n for attr in self.product_attributes:\n value = self.cleaned_data.pop(attr.get_formfield_name())\n if isinstance(value, AttributeChoiceValue):\n attributes[smart_text(attr.pk)] = smart_text(value.pk)\n else:\n attributes[smart_text(attr.pk)] = value\n self.instance.attributes = attributes\n instance = super(ProductForm, self).save(commit=commit)\n search_index.insert_or_update_object(instance)\n return instance\n\n\nclass ProductVariantForm(forms.ModelForm):\n class Meta:\n model = ProductVariant\n exclude = ['attributes', 'product', 'images']\n\n def __init__(self, *args, **kwargs):\n super(ProductVariantForm, self).__init__(*args, **kwargs)\n if self.instance.product.pk:\n self.fields['price_override'].widget.attrs[\n 'placeholder'] = self.instance.product.price.gross\n self.fields['weight_override'].widget.attrs[\n 'placeholder'] = self.instance.product.weight\n\n\nclass CachingModelChoiceIterator(ModelChoiceIterator):\n def __iter__(self):\n if self.field.empty_label is not None:\n yield ('', self.field.empty_label)\n for obj in self.queryset:\n yield self.choice(obj)\n\n\nclass CachingModelChoiceField(forms.ModelChoiceField):\n def _get_choices(self):\n if hasattr(self, '_choices'):\n return self._choices\n return CachingModelChoiceIterator(self)\n choices = property(_get_choices, forms.ChoiceField._set_choices)\n\n\nclass VariantAttributeForm(forms.ModelForm):\n class Meta:\n model = ProductVariant\n fields = []\n\n def __init__(self, *args, **kwargs):\n super(VariantAttributeForm, self).__init__(*args, **kwargs)\n attrs = self.instance.product.product_class.variant_attributes.all()\n self.available_attrs = attrs.prefetch_related('values')\n for attr in self.available_attrs:\n field_defaults = {'label': attr.display,\n 'required': True,\n 'initial': self.instance.get_attribute(attr.pk)}\n if attr.has_values():\n field = CachingModelChoiceField(\n queryset=attr.values.all(), **field_defaults)\n else:\n field = forms.CharField(**field_defaults)\n self.fields[attr.get_formfield_name()] = field\n\n def save(self, commit=True):\n attributes = {}\n for attr in self.available_attrs:\n value = self.cleaned_data.pop(attr.get_formfield_name())\n if isinstance(value, AttributeChoiceValue):\n attributes[smart_text(attr.pk)] = smart_text(value.pk)\n else:\n attributes[smart_text(attr.pk)] = value\n self.instance.attributes = attributes\n return super(VariantAttributeForm, self).save(commit=commit)\n\n\nclass VariantBulkDeleteForm(forms.Form):\n items = forms.ModelMultipleChoiceField(queryset=ProductVariant.objects)\n\n def delete(self):\n items = ProductVariant.objects.filter(\n pk__in=self.cleaned_data['items'])\n items.delete()\n\n\nclass StockBulkDeleteForm(forms.Form):\n items = forms.ModelMultipleChoiceField(queryset=Stock.objects)\n\n def delete(self):\n items = Stock.objects.filter(pk__in=self.cleaned_data['items'])\n items.delete()\n\n\nclass ProductImageForm(forms.ModelForm):\n variants = forms.ModelMultipleChoiceField(\n queryset=ProductVariant.objects.none(),\n widget=forms.CheckboxSelectMultiple, required=False)\n\n class Meta:\n model = ProductImage\n exclude = ('product', 'order')\n\n def __init__(self, *args, **kwargs):\n super(ProductImageForm, self).__init__(*args, **kwargs)\n show_variants = self.instance.product.product_class.has_variants\n if self.instance.product and show_variants:\n variants = self.fields['variants']\n variants.queryset = self.instance.product.variants.all()\n variants.initial = self.instance.variant_images.values_list(\n 'variant', flat=True)\n if self.instance.image:\n self.fields['image'].widget = ImagePreviewWidget()\n\n @transaction.atomic\n def save_variant_images(self, instance):\n variant_images = []\n # Clean up old mapping\n instance.variant_images.all().delete()\n for variant in self.cleaned_data['variants']:\n variant_images.append(\n VariantImage(variant=variant, image=instance))\n VariantImage.objects.bulk_create(variant_images)\n\n def save(self, commit=True):\n instance = super(ProductImageForm, self).save(commit=commit)\n self.save_variant_images(instance)\n return instance\n\n\nclass ProductAttributeForm(forms.ModelForm):\n class Meta:\n model = ProductAttribute\n exclude = []\n\n\nclass StockLocationForm(forms.ModelForm):\n class Meta:\n model = StockLocation\n exclude = []\n\n\nAttributeChoiceValueFormset = inlineformset_factory(\n ProductAttribute, AttributeChoiceValue, exclude=(), extra=1)\n", "path": "saleor/dashboard/product/forms.py"}], "after_files": [{"content": "from __future__ import unicode_literals\n\nfrom django import forms\nfrom django.db import transaction\nfrom django.db.models import Count\nfrom django.forms.models import ModelChoiceIterator, inlineformset_factory\nfrom django.utils.encoding import smart_text\nfrom django.utils.translation import pgettext_lazy\n\nfrom ...product.models import (AttributeChoiceValue, Product, ProductAttribute,\n ProductClass, ProductImage, ProductVariant,\n Stock, StockLocation, VariantImage)\nfrom .widgets import ImagePreviewWidget\nfrom ...search import index as search_index\n\n\nclass ProductClassSelectorForm(forms.Form):\n MAX_RADIO_SELECT_ITEMS = 5\n\n def __init__(self, *args, **kwargs):\n product_classes = kwargs.pop('product_classes', [])\n super(ProductClassSelectorForm, self).__init__(*args, **kwargs)\n choices = [(obj.pk, obj.name) for obj in product_classes]\n if len(product_classes) > self.MAX_RADIO_SELECT_ITEMS:\n widget = forms.Select\n else:\n widget = forms.RadioSelect\n self.fields['product_cls'] = forms.ChoiceField(\n label=pgettext_lazy('Product class form label', 'Product type'),\n choices=choices, widget=widget)\n\n\nclass StockForm(forms.ModelForm):\n class Meta:\n model = Stock\n exclude = ['quantity_allocated']\n\n def __init__(self, *args, **kwargs):\n product = kwargs.pop('product')\n super(StockForm, self).__init__(*args, **kwargs)\n if not product.product_class.has_variants:\n initial = product.variants.first()\n else:\n initial = None\n self.fields['variant'] = forms.ModelChoiceField(\n queryset=product.variants, initial=initial)\n\n\nclass ProductClassForm(forms.ModelForm):\n class Meta:\n model = ProductClass\n exclude = []\n labels = {\n 'variant_attributes': pgettext_lazy(\n 'Product class form label',\n 'Attributes specific to each variant'),\n 'product_attributes': pgettext_lazy(\n 'Product class form label',\n 'Attributes common to all variants')}\n\n def clean(self):\n data = super(ProductClassForm, self).clean()\n has_variants = self.cleaned_data['has_variants']\n product_attr = set(self.cleaned_data['product_attributes'])\n variant_attr = set(self.cleaned_data['variant_attributes'])\n if not has_variants and len(variant_attr) > 0:\n msg = pgettext_lazy(\n 'Product Class Errors',\n 'Product variants are disabled.')\n self.add_error('variant_attributes', msg)\n if len(product_attr & variant_attr) > 0:\n msg = pgettext_lazy(\n 'Product Class Errors',\n 'A single attribute can\\'t belong to both a product '\n 'and its variant.')\n self.add_error('variant_attributes', msg)\n\n if self.instance.pk:\n variants_changed = not (self.fields['has_variants'].initial ==\n has_variants)\n if variants_changed:\n query = self.instance.products.all()\n query = query.annotate(variants_counter=Count('variants'))\n query = query.filter(variants_counter__gt=1)\n if query.exists():\n msg = pgettext_lazy(\n 'Product Class Errors',\n 'Some products of this type have more than '\n 'one variant.')\n self.add_error('has_variants', msg)\n return data\n\n\nclass ProductForm(forms.ModelForm):\n\n class Meta:\n model = Product\n exclude = ['attributes', 'product_class']\n\n def __init__(self, *args, **kwargs):\n self.product_attributes = []\n super(ProductForm, self).__init__(*args, **kwargs)\n field = self.fields['name']\n field.widget.attrs['placeholder'] = pgettext_lazy(\n 'Product form labels', 'Give your awesome product a name')\n field = self.fields['categories']\n field.widget.attrs['data-placeholder'] = pgettext_lazy(\n 'Product form labels', 'Search')\n product_class = self.instance.product_class\n self.product_attributes = product_class.product_attributes.all()\n self.product_attributes = self.product_attributes.prefetch_related(\n 'values')\n self.prepare_fields_for_attributes()\n\n def prepare_fields_for_attributes(self):\n for attribute in self.product_attributes:\n field_defaults = {\n 'label': attribute.display,\n 'required': False,\n 'initial': self.instance.get_attribute(attribute.pk)}\n if attribute.has_values():\n field = CachingModelChoiceField(\n queryset=attribute.values.all(), **field_defaults)\n else:\n field = forms.CharField(**field_defaults)\n self.fields[attribute.get_formfield_name()] = field\n\n def iter_attribute_fields(self):\n for attr in self.product_attributes:\n yield self[attr.get_formfield_name()]\n\n def save(self, commit=True):\n attributes = {}\n for attr in self.product_attributes:\n value = self.cleaned_data.pop(attr.get_formfield_name())\n if isinstance(value, AttributeChoiceValue):\n attributes[smart_text(attr.pk)] = smart_text(value.pk)\n else:\n attributes[smart_text(attr.pk)] = value\n self.instance.attributes = attributes\n instance = super(ProductForm, self).save(commit=commit)\n search_index.insert_or_update_object(instance)\n return instance\n\n\nclass ProductVariantForm(forms.ModelForm):\n class Meta:\n model = ProductVariant\n exclude = ['attributes', 'product', 'images']\n\n def __init__(self, *args, **kwargs):\n super(ProductVariantForm, self).__init__(*args, **kwargs)\n if self.instance.product.pk:\n self.fields['price_override'].widget.attrs[\n 'placeholder'] = self.instance.product.price.gross\n self.fields['weight_override'].widget.attrs[\n 'placeholder'] = self.instance.product.weight\n\n\nclass CachingModelChoiceIterator(ModelChoiceIterator):\n def __iter__(self):\n if self.field.empty_label is not None:\n yield ('', self.field.empty_label)\n for obj in self.queryset:\n yield self.choice(obj)\n\n\nclass CachingModelChoiceField(forms.ModelChoiceField):\n def _get_choices(self):\n if hasattr(self, '_choices'):\n return self._choices\n return CachingModelChoiceIterator(self)\n choices = property(_get_choices, forms.ChoiceField._set_choices)\n\n\nclass VariantAttributeForm(forms.ModelForm):\n class Meta:\n model = ProductVariant\n fields = []\n\n def __init__(self, *args, **kwargs):\n super(VariantAttributeForm, self).__init__(*args, **kwargs)\n attrs = self.instance.product.product_class.variant_attributes.all()\n self.available_attrs = attrs.prefetch_related('values')\n for attr in self.available_attrs:\n field_defaults = {'label': attr.display,\n 'required': True,\n 'initial': self.instance.get_attribute(attr.pk)}\n if attr.has_values():\n field = CachingModelChoiceField(\n queryset=attr.values.all(), **field_defaults)\n else:\n field = forms.CharField(**field_defaults)\n self.fields[attr.get_formfield_name()] = field\n\n def save(self, commit=True):\n attributes = {}\n for attr in self.available_attrs:\n value = self.cleaned_data.pop(attr.get_formfield_name())\n if isinstance(value, AttributeChoiceValue):\n attributes[smart_text(attr.pk)] = smart_text(value.pk)\n else:\n attributes[smart_text(attr.pk)] = value\n self.instance.attributes = attributes\n return super(VariantAttributeForm, self).save(commit=commit)\n\n\nclass VariantBulkDeleteForm(forms.Form):\n items = forms.ModelMultipleChoiceField(queryset=ProductVariant.objects)\n\n def delete(self):\n items = ProductVariant.objects.filter(\n pk__in=self.cleaned_data['items'])\n items.delete()\n\n\nclass StockBulkDeleteForm(forms.Form):\n items = forms.ModelMultipleChoiceField(queryset=Stock.objects)\n\n def delete(self):\n items = Stock.objects.filter(pk__in=self.cleaned_data['items'])\n items.delete()\n\n\nclass ProductImageForm(forms.ModelForm):\n variants = forms.ModelMultipleChoiceField(\n queryset=ProductVariant.objects.none(),\n widget=forms.CheckboxSelectMultiple, required=False)\n\n class Meta:\n model = ProductImage\n exclude = ('product', 'order')\n\n def __init__(self, *args, **kwargs):\n super(ProductImageForm, self).__init__(*args, **kwargs)\n show_variants = self.instance.product.product_class.has_variants\n if self.instance.product and show_variants:\n variants = self.fields['variants']\n variants.queryset = self.instance.product.variants.all()\n variants.initial = self.instance.variant_images.values_list(\n 'variant', flat=True)\n if self.instance.image:\n self.fields['image'].widget = ImagePreviewWidget()\n\n @transaction.atomic\n def save_variant_images(self, instance):\n variant_images = []\n # Clean up old mapping\n instance.variant_images.all().delete()\n for variant in self.cleaned_data['variants']:\n variant_images.append(\n VariantImage(variant=variant, image=instance))\n VariantImage.objects.bulk_create(variant_images)\n\n def save(self, commit=True):\n instance = super(ProductImageForm, self).save(commit=commit)\n self.save_variant_images(instance)\n return instance\n\n\nclass ProductAttributeForm(forms.ModelForm):\n class Meta:\n model = ProductAttribute\n exclude = []\n\n\nclass StockLocationForm(forms.ModelForm):\n class Meta:\n model = StockLocation\n exclude = []\n\n\nAttributeChoiceValueFormset = inlineformset_factory(\n ProductAttribute, AttributeChoiceValue, exclude=(), extra=1)\n", "path": "saleor/dashboard/product/forms.py"}]}
| 3,234 | 138 |
gh_patches_debug_18887
|
rasdani/github-patches
|
git_diff
|
quantumlib__Cirq-4443
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
eval(repr(measurement_gate)) fails for keys of the type `'a:b:c'`
**Description of the issue**
repr(measurement_gate) prints the wrong repr when using a nested measurement key of the form `a:b:c`. This is not catched in the json serialization tests because we explicitly call `MeasurementKey.parse_serialized(key)` in the `_from_json_dict_` function of `MeasurementGate` class.
**How to reproduce the issue**
```python
In [1]: import cirq
...: g = cirq.MeasurementGate(2, cirq.MeasurementKey.parse_serialized('a:b:c'))
...: print(g, repr(g))
...: eval(repr(g))
cirq.MeasurementGate(2, 'a:b:c', ()) cirq.MeasurementGate(2, 'a:b:c', ())
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-7-0da6052a742e> in <module>
2 g = cirq.MeasurementGate(2, cirq.MeasurementKey.parse_serialized('a:b:c'))
3 print(g, repr(g))
----> 4 eval(repr(g))
<string> in <module>
~/opt/anaconda3/envs/cirq/lib/python3.8/site-packages/cirq/ops/measurement_gate.py in __init__(self, num_qubits, key, invert_mask, qid_shape)
66 if len(self._qid_shape) != num_qubits:
67 raise ValueError('len(qid_shape) != num_qubits')
---> 68 self.key = key # type: ignore
69 self.invert_mask = invert_mask or ()
70 if self.invert_mask is not None and len(self.invert_mask) > self.num_qubits():
~/opt/anaconda3/envs/cirq/lib/python3.8/site-packages/cirq/ops/measurement_gate.py in key(self, key)
80 self.mkey = key
81 else:
---> 82 self.mkey = value.MeasurementKey(name=key)
83
84 def _qid_shape_(self) -> Tuple[int, ...]:
~/opt/anaconda3/envs/cirq/lib/python3.8/site-packages/cirq/value/measurement_key.py in __init__(self, name, path)
~/opt/anaconda3/envs/cirq/lib/python3.8/site-packages/cirq/value/measurement_key.py in __post_init__(self)
45 raise ValueError("Measurement key name cannot be empty")
46 if MEASUREMENT_KEY_SEPARATOR in self.name:
---> 47 raise ValueError(
48 f'Invalid key name: {self.name}\n{MEASUREMENT_KEY_SEPARATOR} is not allowed in '
49 'MeasurementKey. If this is a nested key string, use '
ValueError: Invalid key name: a:b:c
: is not allowed in MeasurementKey. If this is a nested key string, use `MeasurementKey.parse_serialized` for correct behavior.
```
**Cirq version**
0.12.0.dev
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cirq-core/cirq/ops/measurement_gate.py`
Content:
```
1 # Copyright 2018 The Cirq Developers
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from typing import Any, Dict, Iterable, Optional, Tuple, Sequence, TYPE_CHECKING, Union
16
17 import numpy as np
18
19 from cirq import protocols, value
20 from cirq.ops import raw_types
21
22 if TYPE_CHECKING:
23 import cirq
24
25
26 @value.value_equality
27 class MeasurementGate(raw_types.Gate):
28 """A gate that measures qubits in the computational basis.
29
30 The measurement gate contains a key that is used to identify results
31 of measurements.
32 """
33
34 def __init__(
35 self,
36 num_qubits: Optional[int] = None,
37 key: Union[str, value.MeasurementKey] = '',
38 invert_mask: Tuple[bool, ...] = (),
39 qid_shape: Tuple[int, ...] = None,
40 ) -> None:
41 """Inits MeasurementGate.
42
43 Args:
44 num_qubits: The number of qubits to act upon.
45 key: The string key of the measurement.
46 invert_mask: A list of values indicating whether the corresponding
47 qubits should be flipped. The list's length must not be longer
48 than the number of qubits, but it is permitted to be shorter.
49 Qubits with indices past the end of the mask are not flipped.
50 qid_shape: Specifies the dimension of each qid the measurement
51 applies to. The default is 2 for every qubit.
52
53 Raises:
54 ValueError: If the length of invert_mask is greater than num_qubits.
55 or if the length of qid_shape doesn't equal num_qubits.
56 """
57 if qid_shape is None:
58 if num_qubits is None:
59 raise ValueError('Specify either the num_qubits or qid_shape argument.')
60 qid_shape = (2,) * num_qubits
61 elif num_qubits is None:
62 num_qubits = len(qid_shape)
63 if num_qubits == 0:
64 raise ValueError('Measuring an empty set of qubits.')
65 self._qid_shape = qid_shape
66 if len(self._qid_shape) != num_qubits:
67 raise ValueError('len(qid_shape) != num_qubits')
68 self.key = key # type: ignore
69 self.invert_mask = invert_mask or ()
70 if self.invert_mask is not None and len(self.invert_mask) > self.num_qubits():
71 raise ValueError('len(invert_mask) > num_qubits')
72
73 @property
74 def key(self) -> str:
75 return str(self.mkey)
76
77 @key.setter
78 def key(self, key: Union[str, value.MeasurementKey]):
79 if isinstance(key, value.MeasurementKey):
80 self.mkey = key
81 else:
82 self.mkey = value.MeasurementKey(name=key)
83
84 def _qid_shape_(self) -> Tuple[int, ...]:
85 return self._qid_shape
86
87 def with_key(self, key: Union[str, value.MeasurementKey]) -> 'MeasurementGate':
88 """Creates a measurement gate with a new key but otherwise identical."""
89 if key == self.key:
90 return self
91 return MeasurementGate(
92 self.num_qubits(), key=key, invert_mask=self.invert_mask, qid_shape=self._qid_shape
93 )
94
95 def _with_key_path_(self, path: Tuple[str, ...]):
96 return self.with_key(self.mkey._with_key_path_(path))
97
98 def _with_measurement_key_mapping_(self, key_map: Dict[str, str]):
99 return self.with_key(protocols.with_measurement_key_mapping(self.mkey, key_map))
100
101 def with_bits_flipped(self, *bit_positions: int) -> 'MeasurementGate':
102 """Toggles whether or not the measurement inverts various outputs."""
103 old_mask = self.invert_mask or ()
104 n = max(len(old_mask) - 1, *bit_positions) + 1
105 new_mask = [k < len(old_mask) and old_mask[k] for k in range(n)]
106 for b in bit_positions:
107 new_mask[b] = not new_mask[b]
108 return MeasurementGate(
109 self.num_qubits(), key=self.key, invert_mask=tuple(new_mask), qid_shape=self._qid_shape
110 )
111
112 def full_invert_mask(self):
113 """Returns the invert mask for all qubits.
114
115 If the user supplies a partial invert_mask, this returns that mask
116 padded by False.
117
118 Similarly if no invert_mask is supplies this returns a tuple
119 of size equal to the number of qubits with all entries False.
120 """
121 mask = self.invert_mask or self.num_qubits() * (False,)
122 deficit = self.num_qubits() - len(mask)
123 mask += (False,) * deficit
124 return mask
125
126 def _is_measurement_(self) -> bool:
127 return True
128
129 def _measurement_key_name_(self):
130 return self.key
131
132 def _kraus_(self):
133 size = np.prod(self._qid_shape, dtype=np.int64)
134
135 def delta(i):
136 result = np.zeros((size, size))
137 result[i][i] = 1
138 return result
139
140 return tuple(delta(i) for i in range(size))
141
142 def _has_kraus_(self):
143 return True
144
145 def _circuit_diagram_info_(
146 self, args: 'cirq.CircuitDiagramInfoArgs'
147 ) -> 'cirq.CircuitDiagramInfo':
148 symbols = ['M'] * self.num_qubits()
149
150 # Show which output bits are negated.
151 if self.invert_mask:
152 for i, b in enumerate(self.invert_mask):
153 if b:
154 symbols[i] = '!M'
155
156 # Mention the measurement key.
157 if not args.known_qubits or self.key != _default_measurement_key(args.known_qubits):
158 symbols[0] += f"('{self.key}')"
159
160 return protocols.CircuitDiagramInfo(tuple(symbols))
161
162 def _qasm_(self, args: 'cirq.QasmArgs', qubits: Tuple['cirq.Qid', ...]) -> Optional[str]:
163 if not all(d == 2 for d in self._qid_shape):
164 return NotImplemented
165 args.validate_version('2.0')
166 invert_mask = self.invert_mask
167 if len(invert_mask) < len(qubits):
168 invert_mask = invert_mask + (False,) * (len(qubits) - len(invert_mask))
169 lines = []
170 for i, (qubit, inv) in enumerate(zip(qubits, invert_mask)):
171 if inv:
172 lines.append(args.format('x {0}; // Invert the following measurement\n', qubit))
173 lines.append(args.format('measure {0} -> {1:meas}[{2}];\n', qubit, self.key, i))
174 return ''.join(lines)
175
176 def _quil_(
177 self, qubits: Tuple['cirq.Qid', ...], formatter: 'cirq.QuilFormatter'
178 ) -> Optional[str]:
179 if not all(d == 2 for d in self._qid_shape):
180 return NotImplemented
181 invert_mask = self.invert_mask
182 if len(invert_mask) < len(qubits):
183 invert_mask = invert_mask + (False,) * (len(qubits) - len(invert_mask))
184 lines = []
185 for i, (qubit, inv) in enumerate(zip(qubits, invert_mask)):
186 if inv:
187 lines.append(
188 formatter.format('X {0} # Inverting for following measurement\n', qubit)
189 )
190 lines.append(formatter.format('MEASURE {0} {1:meas}[{2}]\n', qubit, self.key, i))
191 return ''.join(lines)
192
193 def _op_repr_(self, qubits: Sequence['cirq.Qid']) -> str:
194 args = list(repr(q) for q in qubits)
195 if self.key != _default_measurement_key(qubits):
196 args.append(f'key={self.key!r}')
197 if self.invert_mask:
198 args.append(f'invert_mask={self.invert_mask!r}')
199 arg_list = ', '.join(args)
200 return f'cirq.measure({arg_list})'
201
202 def __repr__(self):
203 qid_shape_arg = ''
204 if any(d != 2 for d in self._qid_shape):
205 qid_shape_arg = f', {self._qid_shape!r}'
206 return (
207 f'cirq.MeasurementGate('
208 f'{self.num_qubits()!r}, '
209 f'{self.key!r}, '
210 f'{self.invert_mask}'
211 f'{qid_shape_arg})'
212 )
213
214 def _value_equality_values_(self) -> Any:
215 return self.key, self.invert_mask, self._qid_shape
216
217 def _json_dict_(self) -> Dict[str, Any]:
218 other = {}
219 if not all(d == 2 for d in self._qid_shape):
220 other['qid_shape'] = self._qid_shape
221 return {
222 'cirq_type': self.__class__.__name__,
223 'num_qubits': len(self._qid_shape),
224 'key': self.key,
225 'invert_mask': self.invert_mask,
226 **other,
227 }
228
229 @classmethod
230 def _from_json_dict_(cls, num_qubits, key, invert_mask, qid_shape=None, **kwargs):
231 return cls(
232 num_qubits=num_qubits,
233 key=value.MeasurementKey.parse_serialized(key),
234 invert_mask=tuple(invert_mask),
235 qid_shape=None if qid_shape is None else tuple(qid_shape),
236 )
237
238 def _has_stabilizer_effect_(self) -> Optional[bool]:
239 return True
240
241 def _act_on_(self, args: 'cirq.OperationTarget', qubits: Sequence['cirq.Qid']) -> bool:
242 from cirq.sim import ActOnArgs
243
244 if not isinstance(args, ActOnArgs):
245 return NotImplemented
246 args.measure(qubits, self.key, self.full_invert_mask())
247 return True
248
249
250 def _default_measurement_key(qubits: Iterable[raw_types.Qid]) -> str:
251 return ','.join(str(q) for q in qubits)
252
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/cirq-core/cirq/ops/measurement_gate.py b/cirq-core/cirq/ops/measurement_gate.py
--- a/cirq-core/cirq/ops/measurement_gate.py
+++ b/cirq-core/cirq/ops/measurement_gate.py
@@ -193,7 +193,7 @@
def _op_repr_(self, qubits: Sequence['cirq.Qid']) -> str:
args = list(repr(q) for q in qubits)
if self.key != _default_measurement_key(qubits):
- args.append(f'key={self.key!r}')
+ args.append(f'key={self.mkey!r}')
if self.invert_mask:
args.append(f'invert_mask={self.invert_mask!r}')
arg_list = ', '.join(args)
@@ -206,7 +206,7 @@
return (
f'cirq.MeasurementGate('
f'{self.num_qubits()!r}, '
- f'{self.key!r}, '
+ f'{self.mkey!r}, '
f'{self.invert_mask}'
f'{qid_shape_arg})'
)
|
{"golden_diff": "diff --git a/cirq-core/cirq/ops/measurement_gate.py b/cirq-core/cirq/ops/measurement_gate.py\n--- a/cirq-core/cirq/ops/measurement_gate.py\n+++ b/cirq-core/cirq/ops/measurement_gate.py\n@@ -193,7 +193,7 @@\n def _op_repr_(self, qubits: Sequence['cirq.Qid']) -> str:\n args = list(repr(q) for q in qubits)\n if self.key != _default_measurement_key(qubits):\n- args.append(f'key={self.key!r}')\n+ args.append(f'key={self.mkey!r}')\n if self.invert_mask:\n args.append(f'invert_mask={self.invert_mask!r}')\n arg_list = ', '.join(args)\n@@ -206,7 +206,7 @@\n return (\n f'cirq.MeasurementGate('\n f'{self.num_qubits()!r}, '\n- f'{self.key!r}, '\n+ f'{self.mkey!r}, '\n f'{self.invert_mask}'\n f'{qid_shape_arg})'\n )\n", "issue": "eval(repr(measurement_gate)) fails for keys of the type `'a:b:c'`\n**Description of the issue**\r\nrepr(measurement_gate) prints the wrong repr when using a nested measurement key of the form `a:b:c`. This is not catched in the json serialization tests because we explicitly call `MeasurementKey.parse_serialized(key)` in the `_from_json_dict_` function of `MeasurementGate` class. \r\n\r\n**How to reproduce the issue**\r\n\r\n```python\r\nIn [1]: import cirq\r\n ...: g = cirq.MeasurementGate(2, cirq.MeasurementKey.parse_serialized('a:b:c'))\r\n ...: print(g, repr(g))\r\n ...: eval(repr(g))\r\ncirq.MeasurementGate(2, 'a:b:c', ()) cirq.MeasurementGate(2, 'a:b:c', ())\r\n---------------------------------------------------------------------------\r\nValueError Traceback (most recent call last)\r\n<ipython-input-7-0da6052a742e> in <module>\r\n 2 g = cirq.MeasurementGate(2, cirq.MeasurementKey.parse_serialized('a:b:c'))\r\n 3 print(g, repr(g))\r\n----> 4 eval(repr(g))\r\n\r\n<string> in <module>\r\n\r\n~/opt/anaconda3/envs/cirq/lib/python3.8/site-packages/cirq/ops/measurement_gate.py in __init__(self, num_qubits, key, invert_mask, qid_shape)\r\n 66 if len(self._qid_shape) != num_qubits:\r\n 67 raise ValueError('len(qid_shape) != num_qubits')\r\n---> 68 self.key = key # type: ignore\r\n 69 self.invert_mask = invert_mask or ()\r\n 70 if self.invert_mask is not None and len(self.invert_mask) > self.num_qubits():\r\n\r\n~/opt/anaconda3/envs/cirq/lib/python3.8/site-packages/cirq/ops/measurement_gate.py in key(self, key)\r\n 80 self.mkey = key\r\n 81 else:\r\n---> 82 self.mkey = value.MeasurementKey(name=key)\r\n 83\r\n 84 def _qid_shape_(self) -> Tuple[int, ...]:\r\n\r\n~/opt/anaconda3/envs/cirq/lib/python3.8/site-packages/cirq/value/measurement_key.py in __init__(self, name, path)\r\n\r\n~/opt/anaconda3/envs/cirq/lib/python3.8/site-packages/cirq/value/measurement_key.py in __post_init__(self)\r\n 45 raise ValueError(\"Measurement key name cannot be empty\")\r\n 46 if MEASUREMENT_KEY_SEPARATOR in self.name:\r\n---> 47 raise ValueError(\r\n 48 f'Invalid key name: {self.name}\\n{MEASUREMENT_KEY_SEPARATOR} is not allowed in '\r\n 49 'MeasurementKey. If this is a nested key string, use '\r\n\r\nValueError: Invalid key name: a:b:c\r\n: is not allowed in MeasurementKey. If this is a nested key string, use `MeasurementKey.parse_serialized` for correct behavior.\r\n\r\n```\r\n\r\n**Cirq version**\r\n0.12.0.dev\r\n\n", "before_files": [{"content": "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Any, Dict, Iterable, Optional, Tuple, Sequence, TYPE_CHECKING, Union\n\nimport numpy as np\n\nfrom cirq import protocols, value\nfrom cirq.ops import raw_types\n\nif TYPE_CHECKING:\n import cirq\n\n\[email protected]_equality\nclass MeasurementGate(raw_types.Gate):\n \"\"\"A gate that measures qubits in the computational basis.\n\n The measurement gate contains a key that is used to identify results\n of measurements.\n \"\"\"\n\n def __init__(\n self,\n num_qubits: Optional[int] = None,\n key: Union[str, value.MeasurementKey] = '',\n invert_mask: Tuple[bool, ...] = (),\n qid_shape: Tuple[int, ...] = None,\n ) -> None:\n \"\"\"Inits MeasurementGate.\n\n Args:\n num_qubits: The number of qubits to act upon.\n key: The string key of the measurement.\n invert_mask: A list of values indicating whether the corresponding\n qubits should be flipped. The list's length must not be longer\n than the number of qubits, but it is permitted to be shorter.\n Qubits with indices past the end of the mask are not flipped.\n qid_shape: Specifies the dimension of each qid the measurement\n applies to. The default is 2 for every qubit.\n\n Raises:\n ValueError: If the length of invert_mask is greater than num_qubits.\n or if the length of qid_shape doesn't equal num_qubits.\n \"\"\"\n if qid_shape is None:\n if num_qubits is None:\n raise ValueError('Specify either the num_qubits or qid_shape argument.')\n qid_shape = (2,) * num_qubits\n elif num_qubits is None:\n num_qubits = len(qid_shape)\n if num_qubits == 0:\n raise ValueError('Measuring an empty set of qubits.')\n self._qid_shape = qid_shape\n if len(self._qid_shape) != num_qubits:\n raise ValueError('len(qid_shape) != num_qubits')\n self.key = key # type: ignore\n self.invert_mask = invert_mask or ()\n if self.invert_mask is not None and len(self.invert_mask) > self.num_qubits():\n raise ValueError('len(invert_mask) > num_qubits')\n\n @property\n def key(self) -> str:\n return str(self.mkey)\n\n @key.setter\n def key(self, key: Union[str, value.MeasurementKey]):\n if isinstance(key, value.MeasurementKey):\n self.mkey = key\n else:\n self.mkey = value.MeasurementKey(name=key)\n\n def _qid_shape_(self) -> Tuple[int, ...]:\n return self._qid_shape\n\n def with_key(self, key: Union[str, value.MeasurementKey]) -> 'MeasurementGate':\n \"\"\"Creates a measurement gate with a new key but otherwise identical.\"\"\"\n if key == self.key:\n return self\n return MeasurementGate(\n self.num_qubits(), key=key, invert_mask=self.invert_mask, qid_shape=self._qid_shape\n )\n\n def _with_key_path_(self, path: Tuple[str, ...]):\n return self.with_key(self.mkey._with_key_path_(path))\n\n def _with_measurement_key_mapping_(self, key_map: Dict[str, str]):\n return self.with_key(protocols.with_measurement_key_mapping(self.mkey, key_map))\n\n def with_bits_flipped(self, *bit_positions: int) -> 'MeasurementGate':\n \"\"\"Toggles whether or not the measurement inverts various outputs.\"\"\"\n old_mask = self.invert_mask or ()\n n = max(len(old_mask) - 1, *bit_positions) + 1\n new_mask = [k < len(old_mask) and old_mask[k] for k in range(n)]\n for b in bit_positions:\n new_mask[b] = not new_mask[b]\n return MeasurementGate(\n self.num_qubits(), key=self.key, invert_mask=tuple(new_mask), qid_shape=self._qid_shape\n )\n\n def full_invert_mask(self):\n \"\"\"Returns the invert mask for all qubits.\n\n If the user supplies a partial invert_mask, this returns that mask\n padded by False.\n\n Similarly if no invert_mask is supplies this returns a tuple\n of size equal to the number of qubits with all entries False.\n \"\"\"\n mask = self.invert_mask or self.num_qubits() * (False,)\n deficit = self.num_qubits() - len(mask)\n mask += (False,) * deficit\n return mask\n\n def _is_measurement_(self) -> bool:\n return True\n\n def _measurement_key_name_(self):\n return self.key\n\n def _kraus_(self):\n size = np.prod(self._qid_shape, dtype=np.int64)\n\n def delta(i):\n result = np.zeros((size, size))\n result[i][i] = 1\n return result\n\n return tuple(delta(i) for i in range(size))\n\n def _has_kraus_(self):\n return True\n\n def _circuit_diagram_info_(\n self, args: 'cirq.CircuitDiagramInfoArgs'\n ) -> 'cirq.CircuitDiagramInfo':\n symbols = ['M'] * self.num_qubits()\n\n # Show which output bits are negated.\n if self.invert_mask:\n for i, b in enumerate(self.invert_mask):\n if b:\n symbols[i] = '!M'\n\n # Mention the measurement key.\n if not args.known_qubits or self.key != _default_measurement_key(args.known_qubits):\n symbols[0] += f\"('{self.key}')\"\n\n return protocols.CircuitDiagramInfo(tuple(symbols))\n\n def _qasm_(self, args: 'cirq.QasmArgs', qubits: Tuple['cirq.Qid', ...]) -> Optional[str]:\n if not all(d == 2 for d in self._qid_shape):\n return NotImplemented\n args.validate_version('2.0')\n invert_mask = self.invert_mask\n if len(invert_mask) < len(qubits):\n invert_mask = invert_mask + (False,) * (len(qubits) - len(invert_mask))\n lines = []\n for i, (qubit, inv) in enumerate(zip(qubits, invert_mask)):\n if inv:\n lines.append(args.format('x {0}; // Invert the following measurement\\n', qubit))\n lines.append(args.format('measure {0} -> {1:meas}[{2}];\\n', qubit, self.key, i))\n return ''.join(lines)\n\n def _quil_(\n self, qubits: Tuple['cirq.Qid', ...], formatter: 'cirq.QuilFormatter'\n ) -> Optional[str]:\n if not all(d == 2 for d in self._qid_shape):\n return NotImplemented\n invert_mask = self.invert_mask\n if len(invert_mask) < len(qubits):\n invert_mask = invert_mask + (False,) * (len(qubits) - len(invert_mask))\n lines = []\n for i, (qubit, inv) in enumerate(zip(qubits, invert_mask)):\n if inv:\n lines.append(\n formatter.format('X {0} # Inverting for following measurement\\n', qubit)\n )\n lines.append(formatter.format('MEASURE {0} {1:meas}[{2}]\\n', qubit, self.key, i))\n return ''.join(lines)\n\n def _op_repr_(self, qubits: Sequence['cirq.Qid']) -> str:\n args = list(repr(q) for q in qubits)\n if self.key != _default_measurement_key(qubits):\n args.append(f'key={self.key!r}')\n if self.invert_mask:\n args.append(f'invert_mask={self.invert_mask!r}')\n arg_list = ', '.join(args)\n return f'cirq.measure({arg_list})'\n\n def __repr__(self):\n qid_shape_arg = ''\n if any(d != 2 for d in self._qid_shape):\n qid_shape_arg = f', {self._qid_shape!r}'\n return (\n f'cirq.MeasurementGate('\n f'{self.num_qubits()!r}, '\n f'{self.key!r}, '\n f'{self.invert_mask}'\n f'{qid_shape_arg})'\n )\n\n def _value_equality_values_(self) -> Any:\n return self.key, self.invert_mask, self._qid_shape\n\n def _json_dict_(self) -> Dict[str, Any]:\n other = {}\n if not all(d == 2 for d in self._qid_shape):\n other['qid_shape'] = self._qid_shape\n return {\n 'cirq_type': self.__class__.__name__,\n 'num_qubits': len(self._qid_shape),\n 'key': self.key,\n 'invert_mask': self.invert_mask,\n **other,\n }\n\n @classmethod\n def _from_json_dict_(cls, num_qubits, key, invert_mask, qid_shape=None, **kwargs):\n return cls(\n num_qubits=num_qubits,\n key=value.MeasurementKey.parse_serialized(key),\n invert_mask=tuple(invert_mask),\n qid_shape=None if qid_shape is None else tuple(qid_shape),\n )\n\n def _has_stabilizer_effect_(self) -> Optional[bool]:\n return True\n\n def _act_on_(self, args: 'cirq.OperationTarget', qubits: Sequence['cirq.Qid']) -> bool:\n from cirq.sim import ActOnArgs\n\n if not isinstance(args, ActOnArgs):\n return NotImplemented\n args.measure(qubits, self.key, self.full_invert_mask())\n return True\n\n\ndef _default_measurement_key(qubits: Iterable[raw_types.Qid]) -> str:\n return ','.join(str(q) for q in qubits)\n", "path": "cirq-core/cirq/ops/measurement_gate.py"}], "after_files": [{"content": "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Any, Dict, Iterable, Optional, Tuple, Sequence, TYPE_CHECKING, Union\n\nimport numpy as np\n\nfrom cirq import protocols, value\nfrom cirq.ops import raw_types\n\nif TYPE_CHECKING:\n import cirq\n\n\[email protected]_equality\nclass MeasurementGate(raw_types.Gate):\n \"\"\"A gate that measures qubits in the computational basis.\n\n The measurement gate contains a key that is used to identify results\n of measurements.\n \"\"\"\n\n def __init__(\n self,\n num_qubits: Optional[int] = None,\n key: Union[str, value.MeasurementKey] = '',\n invert_mask: Tuple[bool, ...] = (),\n qid_shape: Tuple[int, ...] = None,\n ) -> None:\n \"\"\"Inits MeasurementGate.\n\n Args:\n num_qubits: The number of qubits to act upon.\n key: The string key of the measurement.\n invert_mask: A list of values indicating whether the corresponding\n qubits should be flipped. The list's length must not be longer\n than the number of qubits, but it is permitted to be shorter.\n Qubits with indices past the end of the mask are not flipped.\n qid_shape: Specifies the dimension of each qid the measurement\n applies to. The default is 2 for every qubit.\n\n Raises:\n ValueError: If the length of invert_mask is greater than num_qubits.\n or if the length of qid_shape doesn't equal num_qubits.\n \"\"\"\n if qid_shape is None:\n if num_qubits is None:\n raise ValueError('Specify either the num_qubits or qid_shape argument.')\n qid_shape = (2,) * num_qubits\n elif num_qubits is None:\n num_qubits = len(qid_shape)\n if num_qubits == 0:\n raise ValueError('Measuring an empty set of qubits.')\n self._qid_shape = qid_shape\n if len(self._qid_shape) != num_qubits:\n raise ValueError('len(qid_shape) != num_qubits')\n self.key = key # type: ignore\n self.invert_mask = invert_mask or ()\n if self.invert_mask is not None and len(self.invert_mask) > self.num_qubits():\n raise ValueError('len(invert_mask) > num_qubits')\n\n @property\n def key(self) -> str:\n return str(self.mkey)\n\n @key.setter\n def key(self, key: Union[str, value.MeasurementKey]):\n if isinstance(key, value.MeasurementKey):\n self.mkey = key\n else:\n self.mkey = value.MeasurementKey(name=key)\n\n def _qid_shape_(self) -> Tuple[int, ...]:\n return self._qid_shape\n\n def with_key(self, key: Union[str, value.MeasurementKey]) -> 'MeasurementGate':\n \"\"\"Creates a measurement gate with a new key but otherwise identical.\"\"\"\n if key == self.key:\n return self\n return MeasurementGate(\n self.num_qubits(), key=key, invert_mask=self.invert_mask, qid_shape=self._qid_shape\n )\n\n def _with_key_path_(self, path: Tuple[str, ...]):\n return self.with_key(self.mkey._with_key_path_(path))\n\n def _with_measurement_key_mapping_(self, key_map: Dict[str, str]):\n return self.with_key(protocols.with_measurement_key_mapping(self.mkey, key_map))\n\n def with_bits_flipped(self, *bit_positions: int) -> 'MeasurementGate':\n \"\"\"Toggles whether or not the measurement inverts various outputs.\"\"\"\n old_mask = self.invert_mask or ()\n n = max(len(old_mask) - 1, *bit_positions) + 1\n new_mask = [k < len(old_mask) and old_mask[k] for k in range(n)]\n for b in bit_positions:\n new_mask[b] = not new_mask[b]\n return MeasurementGate(\n self.num_qubits(), key=self.key, invert_mask=tuple(new_mask), qid_shape=self._qid_shape\n )\n\n def full_invert_mask(self):\n \"\"\"Returns the invert mask for all qubits.\n\n If the user supplies a partial invert_mask, this returns that mask\n padded by False.\n\n Similarly if no invert_mask is supplies this returns a tuple\n of size equal to the number of qubits with all entries False.\n \"\"\"\n mask = self.invert_mask or self.num_qubits() * (False,)\n deficit = self.num_qubits() - len(mask)\n mask += (False,) * deficit\n return mask\n\n def _is_measurement_(self) -> bool:\n return True\n\n def _measurement_key_name_(self):\n return self.key\n\n def _kraus_(self):\n size = np.prod(self._qid_shape, dtype=np.int64)\n\n def delta(i):\n result = np.zeros((size, size))\n result[i][i] = 1\n return result\n\n return tuple(delta(i) for i in range(size))\n\n def _has_kraus_(self):\n return True\n\n def _circuit_diagram_info_(\n self, args: 'cirq.CircuitDiagramInfoArgs'\n ) -> 'cirq.CircuitDiagramInfo':\n symbols = ['M'] * self.num_qubits()\n\n # Show which output bits are negated.\n if self.invert_mask:\n for i, b in enumerate(self.invert_mask):\n if b:\n symbols[i] = '!M'\n\n # Mention the measurement key.\n if not args.known_qubits or self.key != _default_measurement_key(args.known_qubits):\n symbols[0] += f\"('{self.key}')\"\n\n return protocols.CircuitDiagramInfo(tuple(symbols))\n\n def _qasm_(self, args: 'cirq.QasmArgs', qubits: Tuple['cirq.Qid', ...]) -> Optional[str]:\n if not all(d == 2 for d in self._qid_shape):\n return NotImplemented\n args.validate_version('2.0')\n invert_mask = self.invert_mask\n if len(invert_mask) < len(qubits):\n invert_mask = invert_mask + (False,) * (len(qubits) - len(invert_mask))\n lines = []\n for i, (qubit, inv) in enumerate(zip(qubits, invert_mask)):\n if inv:\n lines.append(args.format('x {0}; // Invert the following measurement\\n', qubit))\n lines.append(args.format('measure {0} -> {1:meas}[{2}];\\n', qubit, self.key, i))\n return ''.join(lines)\n\n def _quil_(\n self, qubits: Tuple['cirq.Qid', ...], formatter: 'cirq.QuilFormatter'\n ) -> Optional[str]:\n if not all(d == 2 for d in self._qid_shape):\n return NotImplemented\n invert_mask = self.invert_mask\n if len(invert_mask) < len(qubits):\n invert_mask = invert_mask + (False,) * (len(qubits) - len(invert_mask))\n lines = []\n for i, (qubit, inv) in enumerate(zip(qubits, invert_mask)):\n if inv:\n lines.append(\n formatter.format('X {0} # Inverting for following measurement\\n', qubit)\n )\n lines.append(formatter.format('MEASURE {0} {1:meas}[{2}]\\n', qubit, self.key, i))\n return ''.join(lines)\n\n def _op_repr_(self, qubits: Sequence['cirq.Qid']) -> str:\n args = list(repr(q) for q in qubits)\n if self.key != _default_measurement_key(qubits):\n args.append(f'key={self.mkey!r}')\n if self.invert_mask:\n args.append(f'invert_mask={self.invert_mask!r}')\n arg_list = ', '.join(args)\n return f'cirq.measure({arg_list})'\n\n def __repr__(self):\n qid_shape_arg = ''\n if any(d != 2 for d in self._qid_shape):\n qid_shape_arg = f', {self._qid_shape!r}'\n return (\n f'cirq.MeasurementGate('\n f'{self.num_qubits()!r}, '\n f'{self.mkey!r}, '\n f'{self.invert_mask}'\n f'{qid_shape_arg})'\n )\n\n def _value_equality_values_(self) -> Any:\n return self.key, self.invert_mask, self._qid_shape\n\n def _json_dict_(self) -> Dict[str, Any]:\n other = {}\n if not all(d == 2 for d in self._qid_shape):\n other['qid_shape'] = self._qid_shape\n return {\n 'cirq_type': self.__class__.__name__,\n 'num_qubits': len(self._qid_shape),\n 'key': self.key,\n 'invert_mask': self.invert_mask,\n **other,\n }\n\n @classmethod\n def _from_json_dict_(cls, num_qubits, key, invert_mask, qid_shape=None, **kwargs):\n return cls(\n num_qubits=num_qubits,\n key=value.MeasurementKey.parse_serialized(key),\n invert_mask=tuple(invert_mask),\n qid_shape=None if qid_shape is None else tuple(qid_shape),\n )\n\n def _has_stabilizer_effect_(self) -> Optional[bool]:\n return True\n\n def _act_on_(self, args: 'cirq.OperationTarget', qubits: Sequence['cirq.Qid']) -> bool:\n from cirq.sim import ActOnArgs\n\n if not isinstance(args, ActOnArgs):\n return NotImplemented\n args.measure(qubits, self.key, self.full_invert_mask())\n return True\n\n\ndef _default_measurement_key(qubits: Iterable[raw_types.Qid]) -> str:\n return ','.join(str(q) for q in qubits)\n", "path": "cirq-core/cirq/ops/measurement_gate.py"}]}
| 3,924 | 253 |
gh_patches_debug_35923
|
rasdani/github-patches
|
git_diff
|
weecology__retriever-933
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Some new datasets failing on 2.0.0
Two or three of the new datasets that got added today (hooray!) appearing to having issues on `2.0.0`.
```
ethan@croryx:~$ pip install retriever --upgrade
...
ethan@croryx:~$ retriever reset all
...
ethan@croryx:~$ retriever update
Downloading scripts...
Download Progress: [##############################] 100.00%
The retriever is up-to-date
Failed to load script: socean_diet_data (/home/ethan/.retriever/scripts/)
Exception: 'main' object has no attribute 'keywords'
Failed to load script: flensburg_food_web (/home/ethan/.retriever/scripts/)
Exception: 'main' object has no attribute 'keywords'
ethan@croryx:~$ retriever ls
Failed to load script: socean_diet_data (/home/ethan/.retriever/scripts/)
Exception: 'main' object has no attribute 'keywords'
Failed to load script: flensburg_food_web (/home/ethan/.retriever/scripts/)
Exception: 'main' object has no attribute 'keywords'
Failed to load script: socean_diet_data (/home/ethan/.retriever/scripts/)
Exception: 'main' object has no attribute 'keywords'
Failed to load script: flensburg_food_web (/home/ethan/.retriever/scripts/)
Exception: 'main' object has no attribute 'keywords'
Available datasets : 78
abalone-age breast-cancer-wi fish-parasite-hosts home-ranges mammal-life-hist mediter-basin-plant-traits plant-life-hist-eu prism-climate vertnet-amphibians
amniote-life-hist breed-bird-survey forest-biomass-china intertidal-abund-me mammal-masses mt-st-helens-veg plant-occur-oosting species-exctinction-rates vertnet-birds
antarctic-breed-bird breed-bird-survey-50stop forest-fires-portugal iris mammal-metabolic-rate nematode-traits plant-taxonomy-us streamflow-conditions vertnet-fishes
aquatic-animal-excretion butterfly-population-network forest-inventory-analysis la-selva-trees mammal-super-tree ngreatplains-flowering-dates poker-hands tree-canopy-geometries vertnet-mammals
bioclim car-eval forest-plots-michigan leaf-herbivory mapped-plant-quads-co NPN portal tree-demog-wghats vertnet-reptiles
biodiversity-response chytr-disease-distr forest-plots-wghats macroalgal_communities mapped-plant-quads-id nyc-tree-count portal-dev turtle-offspring-nesting wine-composition
biomass-allometry-db community-abundance-misc fray-jorge-ecology macrocystis-variation mapped-plant-quads-ks pantheria predator-prey-body-ratio veg-plots-sdl wine-quality
bird-migration-data dicerandra-frutescens gentry-forest-transects mammal-community-db mapped-plant-quads-mt phytoplankton-size predator-prey-size-marine vertnet wood-density
bird-size elton-traits great-basin-mammal-abundance mammal-diet marine-recruitment-data plant-comp-ok
```
@henrykironde suspects that these datasets only work due to improvements in `master` that were made to get this whole set of datasets running, so we may just need to update their `retriever_minimum_version` values to `2.1.dev0`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scripts/flensburg_food_web.py`
Content:
```
1 # -*- coding: latin-1 -*-
2 #retriever
3 from retriever.lib.templates import Script
4 from retriever.lib.models import Table, Cleanup, correct_invalid_value
5 from retriever import VERSION
6 from pkg_resources import parse_version
7
8 class main(Script):
9 def __init__(self, **kwargs):
10 Script.__init__(self, **kwargs)
11 self.title="Food web including metazoan parasites for a brackish shallow water ecosystem in Germany and Denmark"
12 self.citation="C. Dieter Zander, Neri Josten, Kim C. Detloff, Robert Poulin, John P. McLaughlin, and David W. Thieltges. 2011. Food web including metazoan parasites for a brackish shallow water ecosystem in Germany and Denmark. Ecology 92:2007."
13 self.name="flensburg-food-web"
14 self.shortname="flensburg-food-web"
15 self.ref="https://figshare.com/articles/Full_Archive/3552066"
16 self.description="This data is of a food web for the Flensburg Fjord, a brackish shallow water inlet on the Baltic Sea, between Germany and Denmark."
17 self.retriever_minimum_version='2.0.dev'
18 self.version='1.0.0'
19 self.urls={"zip": "https://ndownloader.figshare.com/files/5620326"}
20 self.cleanup_func_table = Cleanup(correct_invalid_value, missing_values=[''])
21
22 if parse_version(VERSION) <= parse_version("2.0.0"):
23 self.shortname = self.name
24 self.name = self.title
25 self.tags = self.keywords
26 self.cleanup_func_table = Cleanup(correct_invalid_value, nulls=['', 'unknown'])
27
28 def download(self, engine=None, debug=False):
29 Script.download(self, engine, debug)
30 engine = self.engine
31 file_names = [ ('Flensburg_Data_Links.csv','links'),
32 ('Flensburg_Data_Nodes.csv','nodes')
33 ]
34
35 engine.download_files_from_archive(self.urls["zip"], [i[0] for i in file_names], filetype="zip", archivename="ECOL_92_174")
36
37 for(filename,tablename) in file_names:
38 data_path = self.engine.format_filename(filename)
39 self.engine.auto_create_table(Table(str(tablename), cleanup=self.cleanup_func_table),filename=filename)
40 self.engine.insert_data_from_file(data_path)
41
42 SCRIPT = main()
```
Path: `scripts/socean_diet_data.py`
Content:
```
1 # -*- coding: latin-1 -*-
2 #retriever
3 from retriever.lib.templates import Script
4 from retriever.lib.models import Table, Cleanup, correct_invalid_value
5 from retriever import VERSION
6 from pkg_resources import parse_version
7
8 class main(Script):
9 def __init__(self, **kwargs):
10 Script.__init__(self, **kwargs)
11 self.title="A Southern Ocean dietary database"
12 self.citation="Ben Raymond, Michelle Marshall, Gabrielle Nevitt, Chris L. Gillies, John van den Hoff, Jonathan S. Stark, Marcel Losekoot, Eric J. Woehler, and Andrew J. Constable. 2011. A Southern Ocean dietary database. Ecology 92:1188."
13 self.name="socean-diet-data"
14 self.shortname="socean-diet-data"
15 self.ref="https://figshare.com/articles/Full_Archive/3551304"
16 self.description="Diet-related data from published and unpublished data sets and studies"
17 self.retriever_minimum_version='2.0.dev'
18 self.version='1.0.0'
19 self.urls={"zip": "https://ndownloader.figshare.com/files/5618823"}
20 self.cleanup_func_table = Cleanup(correct_invalid_value, missing_values=['', 'unknown'])
21
22 if parse_version(VERSION) <= parse_version("2.0.0"):
23 self.shortname = self.name
24 self.name = self.title
25 self.tags = self.keywords
26 self.cleanup_func_table = Cleanup(correct_invalid_value, nulls=['', 'unknown'])
27
28 def download(self, engine=None, debug=False):
29 Script.download(self, engine, debug)
30 engine = self.engine
31 file_names = [ ('isotopes.csv','isotopes'),
32 ('sources.csv','sources'),
33 ('diet.csv', 'diet')
34 ]
35
36 engine.download_files_from_archive(self.urls["zip"], [i[0] for i in file_names], filetype="zip", archivename="ECOL_92_97")
37
38 for(filename,tablename) in file_names:
39 data_path = self.engine.format_filename(filename)
40 self.engine.auto_create_table(Table(str(tablename), cleanup=self.cleanup_func_table),filename=filename)
41 self.engine.insert_data_from_file(data_path)
42
43 SCRIPT = main()
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/scripts/flensburg_food_web.py b/scripts/flensburg_food_web.py
--- a/scripts/flensburg_food_web.py
+++ b/scripts/flensburg_food_web.py
@@ -14,8 +14,9 @@
self.shortname="flensburg-food-web"
self.ref="https://figshare.com/articles/Full_Archive/3552066"
self.description="This data is of a food web for the Flensburg Fjord, a brackish shallow water inlet on the Baltic Sea, between Germany and Denmark."
+ self.keywords = []
self.retriever_minimum_version='2.0.dev'
- self.version='1.0.0'
+ self.version='1.0.1'
self.urls={"zip": "https://ndownloader.figshare.com/files/5620326"}
self.cleanup_func_table = Cleanup(correct_invalid_value, missing_values=[''])
@@ -39,4 +40,4 @@
self.engine.auto_create_table(Table(str(tablename), cleanup=self.cleanup_func_table),filename=filename)
self.engine.insert_data_from_file(data_path)
-SCRIPT = main()
\ No newline at end of file
+SCRIPT = main()
diff --git a/scripts/socean_diet_data.py b/scripts/socean_diet_data.py
--- a/scripts/socean_diet_data.py
+++ b/scripts/socean_diet_data.py
@@ -14,8 +14,9 @@
self.shortname="socean-diet-data"
self.ref="https://figshare.com/articles/Full_Archive/3551304"
self.description="Diet-related data from published and unpublished data sets and studies"
+ self.keywords = []
self.retriever_minimum_version='2.0.dev'
- self.version='1.0.0'
+ self.version='1.0.1'
self.urls={"zip": "https://ndownloader.figshare.com/files/5618823"}
self.cleanup_func_table = Cleanup(correct_invalid_value, missing_values=['', 'unknown'])
@@ -40,4 +41,4 @@
self.engine.auto_create_table(Table(str(tablename), cleanup=self.cleanup_func_table),filename=filename)
self.engine.insert_data_from_file(data_path)
-SCRIPT = main()
\ No newline at end of file
+SCRIPT = main()
|
{"golden_diff": "diff --git a/scripts/flensburg_food_web.py b/scripts/flensburg_food_web.py\n--- a/scripts/flensburg_food_web.py\n+++ b/scripts/flensburg_food_web.py\n@@ -14,8 +14,9 @@\n self.shortname=\"flensburg-food-web\"\n self.ref=\"https://figshare.com/articles/Full_Archive/3552066\"\n self.description=\"This data is of a food web for the Flensburg Fjord, a brackish shallow water inlet on the Baltic Sea, between Germany and Denmark.\"\n+ self.keywords = []\n self.retriever_minimum_version='2.0.dev'\n- self.version='1.0.0'\n+ self.version='1.0.1'\n self.urls={\"zip\": \"https://ndownloader.figshare.com/files/5620326\"}\n self.cleanup_func_table = Cleanup(correct_invalid_value, missing_values=[''])\n \n@@ -39,4 +40,4 @@\n self.engine.auto_create_table(Table(str(tablename), cleanup=self.cleanup_func_table),filename=filename)\n self.engine.insert_data_from_file(data_path)\n \n-SCRIPT = main()\n\\ No newline at end of file\n+SCRIPT = main()\ndiff --git a/scripts/socean_diet_data.py b/scripts/socean_diet_data.py\n--- a/scripts/socean_diet_data.py\n+++ b/scripts/socean_diet_data.py\n@@ -14,8 +14,9 @@\n self.shortname=\"socean-diet-data\"\n self.ref=\"https://figshare.com/articles/Full_Archive/3551304\"\n self.description=\"Diet-related data from published and unpublished data sets and studies\"\n+ self.keywords = []\n self.retriever_minimum_version='2.0.dev'\n- self.version='1.0.0'\n+ self.version='1.0.1'\n self.urls={\"zip\": \"https://ndownloader.figshare.com/files/5618823\"}\n self.cleanup_func_table = Cleanup(correct_invalid_value, missing_values=['', 'unknown'])\n \n@@ -40,4 +41,4 @@\n self.engine.auto_create_table(Table(str(tablename), cleanup=self.cleanup_func_table),filename=filename)\n self.engine.insert_data_from_file(data_path)\n \n-SCRIPT = main()\n\\ No newline at end of file\n+SCRIPT = main()\n", "issue": "Some new datasets failing on 2.0.0\nTwo or three of the new datasets that got added today (hooray!) appearing to having issues on `2.0.0`.\r\n\r\n```\r\nethan@croryx:~$ pip install retriever --upgrade\r\n...\r\nethan@croryx:~$ retriever reset all\r\n...\r\nethan@croryx:~$ retriever update\r\nDownloading scripts...\r\nDownload Progress: [##############################] 100.00%\r\nThe retriever is up-to-date\r\nFailed to load script: socean_diet_data (/home/ethan/.retriever/scripts/)\r\nException: 'main' object has no attribute 'keywords' \r\nFailed to load script: flensburg_food_web (/home/ethan/.retriever/scripts/)\r\nException: 'main' object has no attribute 'keywords' \r\n\r\nethan@croryx:~$ retriever ls\r\nFailed to load script: socean_diet_data (/home/ethan/.retriever/scripts/)\r\nException: 'main' object has no attribute 'keywords' \r\nFailed to load script: flensburg_food_web (/home/ethan/.retriever/scripts/)\r\nException: 'main' object has no attribute 'keywords' \r\nFailed to load script: socean_diet_data (/home/ethan/.retriever/scripts/)\r\nException: 'main' object has no attribute 'keywords' \r\nFailed to load script: flensburg_food_web (/home/ethan/.retriever/scripts/)\r\nException: 'main' object has no attribute 'keywords' \r\nAvailable datasets : 78\r\n\r\nabalone-age breast-cancer-wi fish-parasite-hosts home-ranges mammal-life-hist mediter-basin-plant-traits plant-life-hist-eu prism-climate vertnet-amphibians \r\namniote-life-hist breed-bird-survey forest-biomass-china intertidal-abund-me mammal-masses mt-st-helens-veg plant-occur-oosting species-exctinction-rates vertnet-birds \r\nantarctic-breed-bird breed-bird-survey-50stop forest-fires-portugal iris mammal-metabolic-rate nematode-traits plant-taxonomy-us streamflow-conditions vertnet-fishes \r\naquatic-animal-excretion butterfly-population-network forest-inventory-analysis la-selva-trees mammal-super-tree ngreatplains-flowering-dates poker-hands tree-canopy-geometries vertnet-mammals \r\nbioclim car-eval forest-plots-michigan leaf-herbivory mapped-plant-quads-co NPN portal tree-demog-wghats vertnet-reptiles \r\nbiodiversity-response chytr-disease-distr forest-plots-wghats macroalgal_communities mapped-plant-quads-id nyc-tree-count portal-dev turtle-offspring-nesting wine-composition \r\nbiomass-allometry-db community-abundance-misc fray-jorge-ecology macrocystis-variation mapped-plant-quads-ks pantheria predator-prey-body-ratio veg-plots-sdl wine-quality \r\nbird-migration-data dicerandra-frutescens gentry-forest-transects mammal-community-db mapped-plant-quads-mt phytoplankton-size predator-prey-size-marine vertnet wood-density \r\nbird-size elton-traits great-basin-mammal-abundance mammal-diet marine-recruitment-data plant-comp-ok\r\n```\r\n\r\n@henrykironde suspects that these datasets only work due to improvements in `master` that were made to get this whole set of datasets running, so we may just need to update their `retriever_minimum_version` values to `2.1.dev0`.\n", "before_files": [{"content": "# -*- coding: latin-1 -*-\n#retriever\nfrom retriever.lib.templates import Script\nfrom retriever.lib.models import Table, Cleanup, correct_invalid_value\nfrom retriever import VERSION\nfrom pkg_resources import parse_version\n\nclass main(Script):\n def __init__(self, **kwargs):\n Script.__init__(self, **kwargs)\n self.title=\"Food web including metazoan parasites for a brackish shallow water ecosystem in Germany and Denmark\"\n self.citation=\"C. Dieter Zander, Neri Josten, Kim C. Detloff, Robert Poulin, John P. McLaughlin, and David W. Thieltges. 2011. Food web including metazoan parasites for a brackish shallow water ecosystem in Germany and Denmark. Ecology 92:2007.\"\n self.name=\"flensburg-food-web\"\n self.shortname=\"flensburg-food-web\"\n self.ref=\"https://figshare.com/articles/Full_Archive/3552066\"\n self.description=\"This data is of a food web for the Flensburg Fjord, a brackish shallow water inlet on the Baltic Sea, between Germany and Denmark.\"\n self.retriever_minimum_version='2.0.dev'\n self.version='1.0.0'\n self.urls={\"zip\": \"https://ndownloader.figshare.com/files/5620326\"}\n self.cleanup_func_table = Cleanup(correct_invalid_value, missing_values=[''])\n\n if parse_version(VERSION) <= parse_version(\"2.0.0\"):\n self.shortname = self.name\n self.name = self.title\n self.tags = self.keywords\n self.cleanup_func_table = Cleanup(correct_invalid_value, nulls=['', 'unknown'])\n\n def download(self, engine=None, debug=False):\n Script.download(self, engine, debug)\n engine = self.engine\n file_names = [ ('Flensburg_Data_Links.csv','links'),\n ('Flensburg_Data_Nodes.csv','nodes')\n ]\n\n engine.download_files_from_archive(self.urls[\"zip\"], [i[0] for i in file_names], filetype=\"zip\", archivename=\"ECOL_92_174\")\n \n for(filename,tablename) in file_names:\n data_path = self.engine.format_filename(filename)\n self.engine.auto_create_table(Table(str(tablename), cleanup=self.cleanup_func_table),filename=filename)\n self.engine.insert_data_from_file(data_path)\n\nSCRIPT = main()", "path": "scripts/flensburg_food_web.py"}, {"content": "# -*- coding: latin-1 -*-\n#retriever\nfrom retriever.lib.templates import Script\nfrom retriever.lib.models import Table, Cleanup, correct_invalid_value\nfrom retriever import VERSION\nfrom pkg_resources import parse_version\n\nclass main(Script):\n def __init__(self, **kwargs):\n Script.__init__(self, **kwargs)\n self.title=\"A Southern Ocean dietary database\"\n self.citation=\"Ben Raymond, Michelle Marshall, Gabrielle Nevitt, Chris L. Gillies, John van den Hoff, Jonathan S. Stark, Marcel Losekoot, Eric J. Woehler, and Andrew J. Constable. 2011. A Southern Ocean dietary database. Ecology 92:1188.\"\n self.name=\"socean-diet-data\"\n self.shortname=\"socean-diet-data\"\n self.ref=\"https://figshare.com/articles/Full_Archive/3551304\"\n self.description=\"Diet-related data from published and unpublished data sets and studies\"\n self.retriever_minimum_version='2.0.dev'\n self.version='1.0.0'\n self.urls={\"zip\": \"https://ndownloader.figshare.com/files/5618823\"}\n self.cleanup_func_table = Cleanup(correct_invalid_value, missing_values=['', 'unknown'])\n\n if parse_version(VERSION) <= parse_version(\"2.0.0\"):\n self.shortname = self.name\n self.name = self.title\n self.tags = self.keywords\n self.cleanup_func_table = Cleanup(correct_invalid_value, nulls=['', 'unknown'])\n\n def download(self, engine=None, debug=False):\n Script.download(self, engine, debug)\n engine = self.engine\n file_names = [ ('isotopes.csv','isotopes'),\n ('sources.csv','sources'), \n ('diet.csv', 'diet')\n ]\n\n engine.download_files_from_archive(self.urls[\"zip\"], [i[0] for i in file_names], filetype=\"zip\", archivename=\"ECOL_92_97\")\n \n for(filename,tablename) in file_names:\n data_path = self.engine.format_filename(filename)\n self.engine.auto_create_table(Table(str(tablename), cleanup=self.cleanup_func_table),filename=filename)\n self.engine.insert_data_from_file(data_path)\n\nSCRIPT = main()", "path": "scripts/socean_diet_data.py"}], "after_files": [{"content": "# -*- coding: latin-1 -*-\n#retriever\nfrom retriever.lib.templates import Script\nfrom retriever.lib.models import Table, Cleanup, correct_invalid_value\nfrom retriever import VERSION\nfrom pkg_resources import parse_version\n\nclass main(Script):\n def __init__(self, **kwargs):\n Script.__init__(self, **kwargs)\n self.title=\"Food web including metazoan parasites for a brackish shallow water ecosystem in Germany and Denmark\"\n self.citation=\"C. Dieter Zander, Neri Josten, Kim C. Detloff, Robert Poulin, John P. McLaughlin, and David W. Thieltges. 2011. Food web including metazoan parasites for a brackish shallow water ecosystem in Germany and Denmark. Ecology 92:2007.\"\n self.name=\"flensburg-food-web\"\n self.shortname=\"flensburg-food-web\"\n self.ref=\"https://figshare.com/articles/Full_Archive/3552066\"\n self.description=\"This data is of a food web for the Flensburg Fjord, a brackish shallow water inlet on the Baltic Sea, between Germany and Denmark.\"\n self.keywords = []\n self.retriever_minimum_version='2.0.dev'\n self.version='1.0.1'\n self.urls={\"zip\": \"https://ndownloader.figshare.com/files/5620326\"}\n self.cleanup_func_table = Cleanup(correct_invalid_value, missing_values=[''])\n\n if parse_version(VERSION) <= parse_version(\"2.0.0\"):\n self.shortname = self.name\n self.name = self.title\n self.tags = self.keywords\n self.cleanup_func_table = Cleanup(correct_invalid_value, nulls=['', 'unknown'])\n\n def download(self, engine=None, debug=False):\n Script.download(self, engine, debug)\n engine = self.engine\n file_names = [ ('Flensburg_Data_Links.csv','links'),\n ('Flensburg_Data_Nodes.csv','nodes')\n ]\n\n engine.download_files_from_archive(self.urls[\"zip\"], [i[0] for i in file_names], filetype=\"zip\", archivename=\"ECOL_92_174\")\n \n for(filename,tablename) in file_names:\n data_path = self.engine.format_filename(filename)\n self.engine.auto_create_table(Table(str(tablename), cleanup=self.cleanup_func_table),filename=filename)\n self.engine.insert_data_from_file(data_path)\n\nSCRIPT = main()\n", "path": "scripts/flensburg_food_web.py"}, {"content": "# -*- coding: latin-1 -*-\n#retriever\nfrom retriever.lib.templates import Script\nfrom retriever.lib.models import Table, Cleanup, correct_invalid_value\nfrom retriever import VERSION\nfrom pkg_resources import parse_version\n\nclass main(Script):\n def __init__(self, **kwargs):\n Script.__init__(self, **kwargs)\n self.title=\"A Southern Ocean dietary database\"\n self.citation=\"Ben Raymond, Michelle Marshall, Gabrielle Nevitt, Chris L. Gillies, John van den Hoff, Jonathan S. Stark, Marcel Losekoot, Eric J. Woehler, and Andrew J. Constable. 2011. A Southern Ocean dietary database. Ecology 92:1188.\"\n self.name=\"socean-diet-data\"\n self.shortname=\"socean-diet-data\"\n self.ref=\"https://figshare.com/articles/Full_Archive/3551304\"\n self.description=\"Diet-related data from published and unpublished data sets and studies\"\n self.keywords = []\n self.retriever_minimum_version='2.0.dev'\n self.version='1.0.1'\n self.urls={\"zip\": \"https://ndownloader.figshare.com/files/5618823\"}\n self.cleanup_func_table = Cleanup(correct_invalid_value, missing_values=['', 'unknown'])\n\n if parse_version(VERSION) <= parse_version(\"2.0.0\"):\n self.shortname = self.name\n self.name = self.title\n self.tags = self.keywords\n self.cleanup_func_table = Cleanup(correct_invalid_value, nulls=['', 'unknown'])\n\n def download(self, engine=None, debug=False):\n Script.download(self, engine, debug)\n engine = self.engine\n file_names = [ ('isotopes.csv','isotopes'),\n ('sources.csv','sources'), \n ('diet.csv', 'diet')\n ]\n\n engine.download_files_from_archive(self.urls[\"zip\"], [i[0] for i in file_names], filetype=\"zip\", archivename=\"ECOL_92_97\")\n \n for(filename,tablename) in file_names:\n data_path = self.engine.format_filename(filename)\n self.engine.auto_create_table(Table(str(tablename), cleanup=self.cleanup_func_table),filename=filename)\n self.engine.insert_data_from_file(data_path)\n\nSCRIPT = main()\n", "path": "scripts/socean_diet_data.py"}]}
| 2,316 | 515 |
gh_patches_debug_31020
|
rasdani/github-patches
|
git_diff
|
OpenMined__PySyft-3150
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove ZSTD
**Is your feature request related to a problem? Please describe.**
ZSTD is used for compression in our serde process. However we don't need extra compression as we move to Protobuf.
ZSTD is usually a source of problems when installing PySyft with different hacks to solve it.
**Describe the solution you'd like**
Remove ZSTD dependency.
This will require removing the tests and its use in serde.
**Describe alternatives you've considered**
Protobuf covers compression.
**Additional context**
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `syft/serde/compression.py`
Content:
```
1 """
2 This file exists to provide one common place for all compression methods used in
3 simplifying and serializing PySyft objects.
4 """
5
6 import lz4
7 from lz4 import ( # noqa: F401
8 frame,
9 ) # needed as otherwise we will get: module 'lz4' has no attribute 'frame'
10 import zstd
11
12 from syft.exceptions import CompressionNotFoundException
13
14 # COMPRESSION SCHEME INT CODES
15 NO_COMPRESSION = 40
16 LZ4 = 41
17 ZSTD = 42
18 scheme_to_bytes = {
19 NO_COMPRESSION: NO_COMPRESSION.to_bytes(1, byteorder="big"),
20 LZ4: LZ4.to_bytes(1, byteorder="big"),
21 ZSTD: ZSTD.to_bytes(1, byteorder="big"),
22 }
23
24 ## SECTION: chosen Compression Algorithm
25
26
27 def _apply_compress_scheme(decompressed_input_bin) -> tuple:
28 """
29 Apply the selected compression scheme.
30 By default is used LZ4
31
32 Args:
33 decompressed_input_bin: the binary to be compressed
34 """
35 return apply_lz4_compression(decompressed_input_bin)
36
37
38 def apply_lz4_compression(decompressed_input_bin) -> tuple:
39 """
40 Apply LZ4 compression to the input
41
42 Args:
43 decompressed_input_bin: the binary to be compressed
44
45 Returns:
46 a tuple (compressed_result, LZ4)
47 """
48 return lz4.frame.compress(decompressed_input_bin), LZ4
49
50
51 def apply_zstd_compression(decompressed_input_bin) -> tuple:
52 """
53 Apply ZSTD compression to the input
54
55 Args:
56 decompressed_input_bin: the binary to be compressed
57
58 Returns:
59 a tuple (compressed_result, ZSTD)
60 """
61
62 return zstd.compress(decompressed_input_bin), ZSTD
63
64
65 def apply_no_compression(decompressed_input_bin) -> tuple:
66 """
67 No compression is applied to the input
68
69 Args:
70 decompressed_input_bin: the binary
71
72 Returns:
73 a tuple (the binary, LZ4)
74 """
75
76 return decompressed_input_bin, NO_COMPRESSION
77
78
79 def _compress(decompressed_input_bin: bin) -> bin:
80 """
81 This function compresses a binary using the function _apply_compress_scheme
82 if the input has been already compressed in some step, it will return it as it is
83
84 Args:
85 decompressed_input_bin (bin): binary to be compressed
86
87 Returns:
88 bin: a compressed binary
89
90 """
91 compress_stream, compress_scheme = _apply_compress_scheme(decompressed_input_bin)
92 try:
93 z = scheme_to_bytes[compress_scheme] + compress_stream
94 return z
95 except KeyError:
96 raise CompressionNotFoundException(
97 f"Compression scheme not found for compression code: {str(compress_scheme)}"
98 )
99
100
101 def _decompress(binary: bin) -> bin:
102 """
103 This function decompresses a binary using the scheme defined in the first byte of the input
104
105 Args:
106 binary (bin): a compressed binary
107
108 Returns:
109 bin: decompressed binary
110
111 """
112
113 # check the 1-byte header to check the compression scheme used
114 compress_scheme = binary[0]
115
116 # remove the 1-byte header from the input stream
117 binary = binary[1:]
118 # 1) Decompress or return the original stream
119 if compress_scheme == LZ4:
120 return lz4.frame.decompress(binary)
121 elif compress_scheme == ZSTD:
122 return zstd.decompress(binary)
123 elif compress_scheme == NO_COMPRESSION:
124 return binary
125 else:
126 raise CompressionNotFoundException(
127 f"Compression scheme not found for compression code: {str(compress_scheme)}"
128 )
129
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/syft/serde/compression.py b/syft/serde/compression.py
--- a/syft/serde/compression.py
+++ b/syft/serde/compression.py
@@ -7,18 +7,15 @@
from lz4 import ( # noqa: F401
frame,
) # needed as otherwise we will get: module 'lz4' has no attribute 'frame'
-import zstd
from syft.exceptions import CompressionNotFoundException
# COMPRESSION SCHEME INT CODES
NO_COMPRESSION = 40
LZ4 = 41
-ZSTD = 42
scheme_to_bytes = {
NO_COMPRESSION: NO_COMPRESSION.to_bytes(1, byteorder="big"),
LZ4: LZ4.to_bytes(1, byteorder="big"),
- ZSTD: ZSTD.to_bytes(1, byteorder="big"),
}
## SECTION: chosen Compression Algorithm
@@ -48,20 +45,6 @@
return lz4.frame.compress(decompressed_input_bin), LZ4
-def apply_zstd_compression(decompressed_input_bin) -> tuple:
- """
- Apply ZSTD compression to the input
-
- Args:
- decompressed_input_bin: the binary to be compressed
-
- Returns:
- a tuple (compressed_result, ZSTD)
- """
-
- return zstd.compress(decompressed_input_bin), ZSTD
-
-
def apply_no_compression(decompressed_input_bin) -> tuple:
"""
No compression is applied to the input
@@ -118,8 +101,6 @@
# 1) Decompress or return the original stream
if compress_scheme == LZ4:
return lz4.frame.decompress(binary)
- elif compress_scheme == ZSTD:
- return zstd.decompress(binary)
elif compress_scheme == NO_COMPRESSION:
return binary
else:
|
{"golden_diff": "diff --git a/syft/serde/compression.py b/syft/serde/compression.py\n--- a/syft/serde/compression.py\n+++ b/syft/serde/compression.py\n@@ -7,18 +7,15 @@\n from lz4 import ( # noqa: F401\n frame,\n ) # needed as otherwise we will get: module 'lz4' has no attribute 'frame'\n-import zstd\n \n from syft.exceptions import CompressionNotFoundException\n \n # COMPRESSION SCHEME INT CODES\n NO_COMPRESSION = 40\n LZ4 = 41\n-ZSTD = 42\n scheme_to_bytes = {\n NO_COMPRESSION: NO_COMPRESSION.to_bytes(1, byteorder=\"big\"),\n LZ4: LZ4.to_bytes(1, byteorder=\"big\"),\n- ZSTD: ZSTD.to_bytes(1, byteorder=\"big\"),\n }\n \n ## SECTION: chosen Compression Algorithm\n@@ -48,20 +45,6 @@\n return lz4.frame.compress(decompressed_input_bin), LZ4\n \n \n-def apply_zstd_compression(decompressed_input_bin) -> tuple:\n- \"\"\"\n- Apply ZSTD compression to the input\n-\n- Args:\n- decompressed_input_bin: the binary to be compressed\n-\n- Returns:\n- a tuple (compressed_result, ZSTD)\n- \"\"\"\n-\n- return zstd.compress(decompressed_input_bin), ZSTD\n-\n-\n def apply_no_compression(decompressed_input_bin) -> tuple:\n \"\"\"\n No compression is applied to the input\n@@ -118,8 +101,6 @@\n # 1) Decompress or return the original stream\n if compress_scheme == LZ4:\n return lz4.frame.decompress(binary)\n- elif compress_scheme == ZSTD:\n- return zstd.decompress(binary)\n elif compress_scheme == NO_COMPRESSION:\n return binary\n else:\n", "issue": "Remove ZSTD\n**Is your feature request related to a problem? Please describe.**\r\nZSTD is used for compression in our serde process. However we don't need extra compression as we move to Protobuf.\r\nZSTD is usually a source of problems when installing PySyft with different hacks to solve it.\r\n\r\n**Describe the solution you'd like**\r\nRemove ZSTD dependency.\r\nThis will require removing the tests and its use in serde.\r\n\r\n**Describe alternatives you've considered**\r\nProtobuf covers compression.\r\n\r\n**Additional context**\r\n\n", "before_files": [{"content": "\"\"\"\nThis file exists to provide one common place for all compression methods used in\nsimplifying and serializing PySyft objects.\n\"\"\"\n\nimport lz4\nfrom lz4 import ( # noqa: F401\n frame,\n) # needed as otherwise we will get: module 'lz4' has no attribute 'frame'\nimport zstd\n\nfrom syft.exceptions import CompressionNotFoundException\n\n# COMPRESSION SCHEME INT CODES\nNO_COMPRESSION = 40\nLZ4 = 41\nZSTD = 42\nscheme_to_bytes = {\n NO_COMPRESSION: NO_COMPRESSION.to_bytes(1, byteorder=\"big\"),\n LZ4: LZ4.to_bytes(1, byteorder=\"big\"),\n ZSTD: ZSTD.to_bytes(1, byteorder=\"big\"),\n}\n\n## SECTION: chosen Compression Algorithm\n\n\ndef _apply_compress_scheme(decompressed_input_bin) -> tuple:\n \"\"\"\n Apply the selected compression scheme.\n By default is used LZ4\n\n Args:\n decompressed_input_bin: the binary to be compressed\n \"\"\"\n return apply_lz4_compression(decompressed_input_bin)\n\n\ndef apply_lz4_compression(decompressed_input_bin) -> tuple:\n \"\"\"\n Apply LZ4 compression to the input\n\n Args:\n decompressed_input_bin: the binary to be compressed\n\n Returns:\n a tuple (compressed_result, LZ4)\n \"\"\"\n return lz4.frame.compress(decompressed_input_bin), LZ4\n\n\ndef apply_zstd_compression(decompressed_input_bin) -> tuple:\n \"\"\"\n Apply ZSTD compression to the input\n\n Args:\n decompressed_input_bin: the binary to be compressed\n\n Returns:\n a tuple (compressed_result, ZSTD)\n \"\"\"\n\n return zstd.compress(decompressed_input_bin), ZSTD\n\n\ndef apply_no_compression(decompressed_input_bin) -> tuple:\n \"\"\"\n No compression is applied to the input\n\n Args:\n decompressed_input_bin: the binary\n\n Returns:\n a tuple (the binary, LZ4)\n \"\"\"\n\n return decompressed_input_bin, NO_COMPRESSION\n\n\ndef _compress(decompressed_input_bin: bin) -> bin:\n \"\"\"\n This function compresses a binary using the function _apply_compress_scheme\n if the input has been already compressed in some step, it will return it as it is\n\n Args:\n decompressed_input_bin (bin): binary to be compressed\n\n Returns:\n bin: a compressed binary\n\n \"\"\"\n compress_stream, compress_scheme = _apply_compress_scheme(decompressed_input_bin)\n try:\n z = scheme_to_bytes[compress_scheme] + compress_stream\n return z\n except KeyError:\n raise CompressionNotFoundException(\n f\"Compression scheme not found for compression code: {str(compress_scheme)}\"\n )\n\n\ndef _decompress(binary: bin) -> bin:\n \"\"\"\n This function decompresses a binary using the scheme defined in the first byte of the input\n\n Args:\n binary (bin): a compressed binary\n\n Returns:\n bin: decompressed binary\n\n \"\"\"\n\n # check the 1-byte header to check the compression scheme used\n compress_scheme = binary[0]\n\n # remove the 1-byte header from the input stream\n binary = binary[1:]\n # 1) Decompress or return the original stream\n if compress_scheme == LZ4:\n return lz4.frame.decompress(binary)\n elif compress_scheme == ZSTD:\n return zstd.decompress(binary)\n elif compress_scheme == NO_COMPRESSION:\n return binary\n else:\n raise CompressionNotFoundException(\n f\"Compression scheme not found for compression code: {str(compress_scheme)}\"\n )\n", "path": "syft/serde/compression.py"}], "after_files": [{"content": "\"\"\"\nThis file exists to provide one common place for all compression methods used in\nsimplifying and serializing PySyft objects.\n\"\"\"\n\nimport lz4\nfrom lz4 import ( # noqa: F401\n frame,\n) # needed as otherwise we will get: module 'lz4' has no attribute 'frame'\n\nfrom syft.exceptions import CompressionNotFoundException\n\n# COMPRESSION SCHEME INT CODES\nNO_COMPRESSION = 40\nLZ4 = 41\nscheme_to_bytes = {\n NO_COMPRESSION: NO_COMPRESSION.to_bytes(1, byteorder=\"big\"),\n LZ4: LZ4.to_bytes(1, byteorder=\"big\"),\n}\n\n## SECTION: chosen Compression Algorithm\n\n\ndef _apply_compress_scheme(decompressed_input_bin) -> tuple:\n \"\"\"\n Apply the selected compression scheme.\n By default is used LZ4\n\n Args:\n decompressed_input_bin: the binary to be compressed\n \"\"\"\n return apply_lz4_compression(decompressed_input_bin)\n\n\ndef apply_lz4_compression(decompressed_input_bin) -> tuple:\n \"\"\"\n Apply LZ4 compression to the input\n\n Args:\n decompressed_input_bin: the binary to be compressed\n\n Returns:\n a tuple (compressed_result, LZ4)\n \"\"\"\n return lz4.frame.compress(decompressed_input_bin), LZ4\n\n\ndef apply_no_compression(decompressed_input_bin) -> tuple:\n \"\"\"\n No compression is applied to the input\n\n Args:\n decompressed_input_bin: the binary\n\n Returns:\n a tuple (the binary, LZ4)\n \"\"\"\n\n return decompressed_input_bin, NO_COMPRESSION\n\n\ndef _compress(decompressed_input_bin: bin) -> bin:\n \"\"\"\n This function compresses a binary using the function _apply_compress_scheme\n if the input has been already compressed in some step, it will return it as it is\n\n Args:\n decompressed_input_bin (bin): binary to be compressed\n\n Returns:\n bin: a compressed binary\n\n \"\"\"\n compress_stream, compress_scheme = _apply_compress_scheme(decompressed_input_bin)\n try:\n z = scheme_to_bytes[compress_scheme] + compress_stream\n return z\n except KeyError:\n raise CompressionNotFoundException(\n f\"Compression scheme not found for compression code: {str(compress_scheme)}\"\n )\n\n\ndef _decompress(binary: bin) -> bin:\n \"\"\"\n This function decompresses a binary using the scheme defined in the first byte of the input\n\n Args:\n binary (bin): a compressed binary\n\n Returns:\n bin: decompressed binary\n\n \"\"\"\n\n # check the 1-byte header to check the compression scheme used\n compress_scheme = binary[0]\n\n # remove the 1-byte header from the input stream\n binary = binary[1:]\n # 1) Decompress or return the original stream\n if compress_scheme == LZ4:\n return lz4.frame.decompress(binary)\n elif compress_scheme == NO_COMPRESSION:\n return binary\n else:\n raise CompressionNotFoundException(\n f\"Compression scheme not found for compression code: {str(compress_scheme)}\"\n )\n", "path": "syft/serde/compression.py"}]}
| 1,444 | 414 |
gh_patches_debug_17202
|
rasdani/github-patches
|
git_diff
|
e2nIEE__pandapower-1007
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
bug in WLSZeroInjectionConstraintsAlgorithm
code in line 135 of the file, https://github.com/e2nIEE/pandapower/blob/develop/pandapower/estimation/algorithm/base.py
bugs when zero_injection_bus is empty.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pandapower/estimation/algorithm/base.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # Copyright (c) 2016-2020 by University of Kassel and Fraunhofer Institute for Energy Economics
4 # and Energy System Technology (IEE), Kassel. All rights reserved.
5
6 import numpy as np
7 from scipy.sparse import csr_matrix, vstack, hstack
8 from scipy.sparse.linalg import spsolve
9
10 from pandapower.estimation.algorithm.estimator import BaseEstimatorIRWLS, get_estimator
11 from pandapower.estimation.algorithm.matrix_base import BaseAlgebra, \
12 BaseAlgebraZeroInjConstraints
13 from pandapower.estimation.idx_bus import ZERO_INJ_FLAG, P, P_STD, Q, Q_STD
14 from pandapower.estimation.ppc_conversion import ExtendedPPCI
15 from pandapower.pypower.idx_bus import bus_cols
16
17 try:
18 import pplog as logging
19 except ImportError:
20 import logging
21 std_logger = logging.getLogger(__name__)
22
23 __all__ = ["WLSAlgorithm", "WLSZeroInjectionConstraintsAlgorithm", "IRWLSAlgorithm"]
24
25
26 class BaseAlgorithm:
27 def __init__(self, tolerance, maximum_iterations, logger=std_logger):
28 self.tolerance = tolerance
29 self.max_iterations = maximum_iterations
30 self.logger = logger
31 self.successful = False
32 self.iterations = None
33
34 # Parameters for estimate
35 self.eppci = None
36 self.pp_meas_indices = None
37
38 def check_observability(self, eppci: ExtendedPPCI, z):
39 # Check if observability criterion is fulfilled and the state estimation is possible
40 if len(z) < 2 * eppci["bus"].shape[0] - 1:
41 self.logger.error("System is not observable (cancelling)")
42 self.logger.error("Measurements available: %d. Measurements required: %d" %
43 (len(z), 2 * eppci["bus"].shape[0] - 1))
44 raise UserWarning("Measurements available: %d. Measurements required: %d" %
45 (len(z), 2 * eppci["bus"].shape[0] - 1))
46
47 def check_result(self, current_error, cur_it):
48 # print output for results
49 if current_error <= self.tolerance:
50 self.successful = True
51 self.logger.debug("State Estimation successful ({:d} iterations)".format(cur_it))
52 else:
53 self.successful = False
54 self.logger.debug("State Estimation not successful ({:d}/{:d} iterations)".format(cur_it,
55 self.max_iterations))
56
57 def initialize(self, eppci: ExtendedPPCI):
58 # Check observability
59 self.eppci = eppci
60 self.pp_meas_indices = eppci.pp_meas_indices
61 self.check_observability(eppci, eppci.z)
62
63 def estimate(self, ppci: ExtendedPPCI, **kwargs):
64 # Must be implemented individually!!
65 pass
66
67
68 class WLSAlgorithm(BaseAlgorithm):
69 def __init__(self, tolerance, maximum_iterations, logger=std_logger):
70 super(WLSAlgorithm, self).__init__(tolerance, maximum_iterations, logger)
71
72 # Parameters for Bad data detection
73 self.R_inv = None
74 self.Gm = None
75 self.r = None
76 self.H = None
77 self.hx = None
78
79 def estimate(self, eppci, **kwargs):
80 self.initialize(eppci)
81 # matrix calculation object
82 sem = BaseAlgebra(eppci)
83
84 current_error, cur_it = 100., 0
85 # invert covariance matrix
86 r_inv = csr_matrix(np.diagflat(1 / eppci.r_cov ** 2))
87 E = eppci.E
88 while current_error > self.tolerance and cur_it < self.max_iterations:
89 self.logger.debug("Starting iteration {:d}".format(1 + cur_it))
90 try:
91 # residual r
92 r = csr_matrix(sem.create_rx(E)).T
93
94 # jacobian matrix H
95 H = csr_matrix(sem.create_hx_jacobian(E))
96
97 # gain matrix G_m
98 # G_m = H^t * R^-1 * H
99 G_m = H.T * (r_inv * H)
100
101 # state vector difference d_E
102 # d_E = G_m^-1 * (H' * R^-1 * r)
103 d_E = spsolve(G_m, H.T * (r_inv * r))
104
105 # Update E with d_E
106 E += d_E.ravel()
107 eppci.update_E(E)
108
109 # prepare next iteration
110 cur_it += 1
111 current_error = np.max(np.abs(d_E))
112 self.logger.debug("Current error: {:.7f}".format(current_error))
113 except np.linalg.linalg.LinAlgError:
114 self.logger.error("A problem appeared while using the linear algebra methods."
115 "Check and change the measurement set.")
116 return False
117
118 # check if the estimation is successfull
119 self.check_result(current_error, cur_it)
120 if self.successful:
121 # store variables required for chi^2 and r_N_max test:
122 self.R_inv = r_inv.toarray()
123 self.Gm = G_m.toarray()
124 self.r = r.toarray()
125 self.H = H.toarray()
126 # create h(x) for the current iteration
127 self.hx = sem.create_hx(eppci.E)
128 return eppci
129
130
131 class WLSZeroInjectionConstraintsAlgorithm(BaseAlgorithm):
132 def estimate(self, eppci, **kwargs):
133 # state vector built from delta, |V| and zero injections
134 # Find pq bus with zero p,q and shunt admittance
135 zero_injection_bus = np.argwhere(eppci["bus"][:, bus_cols + ZERO_INJ_FLAG] == True).ravel()
136 eppci["bus"][zero_injection_bus, [bus_cols + P, bus_cols + P_STD, bus_cols + Q, bus_cols + Q_STD]] = np.NaN
137 # Withn pq buses with zero injection identify those who have also no p or q measurement
138 p_zero_injections = zero_injection_bus
139 q_zero_injections = zero_injection_bus
140 new_states = np.zeros(len(p_zero_injections) + len(q_zero_injections))
141
142 num_bus = eppci["bus"].shape[0]
143
144 # matrix calculation object
145 sem = BaseAlgebraZeroInjConstraints(eppci)
146
147 current_error, cur_it = 100., 0
148 r_inv = csr_matrix((np.diagflat(1 / eppci.r_cov) ** 2))
149 E = eppci.E
150 # update the E matrix
151 E_ext = np.r_[eppci.E, new_states]
152
153 while current_error > self.tolerance and cur_it < self.max_iterations:
154 self.logger.debug("Starting iteration {:d}".format(1 + cur_it))
155 try:
156 c_x = sem.create_cx(E, p_zero_injections, q_zero_injections)
157
158 # residual r
159 r = csr_matrix(sem.create_rx(E)).T
160 c_rxh = csr_matrix(c_x).T
161
162 # jacobian matrix H
163 H_temp = sem.create_hx_jacobian(E)
164 C_temp = sem.create_cx_jacobian(E, p_zero_injections, q_zero_injections)
165 H, C = csr_matrix(H_temp), csr_matrix(C_temp)
166
167 # gain matrix G_m
168 # G_m = H^t * R^-1 * H
169 G_m = H.T * (r_inv * H)
170
171 # building a new gain matrix for new constraints.
172 A_1 = vstack([G_m, C])
173 c_ax = hstack([C, np.zeros((C.shape[0], C.shape[0]))])
174 c_xT = c_ax.T
175 M_tx = csr_matrix(hstack((A_1, c_xT))) # again adding to the new gain matrix
176 rhs = H.T * (r_inv * r) # original right hand side
177 C_rhs = vstack((rhs, -c_rxh)) # creating the righ hand side with new constraints
178
179 # state vector difference d_E and update E
180 d_E_ext = spsolve(M_tx, C_rhs)
181 E_ext += d_E_ext.ravel()
182 E = E_ext[:E.shape[0]]
183 eppci.update_E(E)
184
185 # prepare next iteration
186 cur_it += 1
187 current_error = np.max(np.abs(d_E_ext[:len(eppci.non_slack_buses) + num_bus]))
188 self.logger.debug("Current error: {:.7f}".format(current_error))
189 except np.linalg.linalg.LinAlgError:
190 self.logger.error("A problem appeared while using the linear algebra methods."
191 "Check and change the measurement set.")
192 return False
193
194 # check if the estimation is successfull
195 self.check_result(current_error, cur_it)
196 return eppci
197
198
199 class IRWLSAlgorithm(BaseAlgorithm):
200 def estimate(self, eppci, estimator="wls", **kwargs):
201 self.initialize(eppci)
202
203 # matrix calculation object
204 sem = get_estimator(BaseEstimatorIRWLS, estimator)(eppci, **kwargs)
205
206 current_error, cur_it = 100., 0
207 E = eppci.E
208 while current_error > self.tolerance and cur_it < self.max_iterations:
209 self.logger.debug("Starting iteration {:d}".format(1 + cur_it))
210 try:
211 # residual r
212 r = csr_matrix(sem.create_rx(E)).T
213
214 # jacobian matrix H
215 H = csr_matrix(sem.create_hx_jacobian(E))
216
217 # gain matrix G_m
218 # G_m = H^t * Phi * H
219 phi = csr_matrix(sem.create_phi(E))
220 G_m = H.T * (phi * H)
221
222 # state vector difference d_E and update E
223 d_E = spsolve(G_m, H.T * (phi * r))
224 E += d_E.ravel()
225 eppci.update_E(E)
226
227 # prepare next iteration
228 cur_it += 1
229 current_error = np.max(np.abs(d_E))
230 self.logger.debug("Current error: {:.7f}".format(current_error))
231 except np.linalg.linalg.LinAlgError:
232 self.logger.error("A problem appeared while using the linear algebra methods."
233 "Check and change the measurement set.")
234 return False
235
236 # check if the estimation is successfull
237 self.check_result(current_error, cur_it)
238 # update V/delta
239 return eppci
240
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pandapower/estimation/algorithm/base.py b/pandapower/estimation/algorithm/base.py
--- a/pandapower/estimation/algorithm/base.py
+++ b/pandapower/estimation/algorithm/base.py
@@ -132,6 +132,8 @@
def estimate(self, eppci, **kwargs):
# state vector built from delta, |V| and zero injections
# Find pq bus with zero p,q and shunt admittance
+ if not np.any(eppci["bus"][:, bus_cols + ZERO_INJ_FLAG]):
+ raise UserWarning("Network has no bus with zero injections! Please use WLS instead!")
zero_injection_bus = np.argwhere(eppci["bus"][:, bus_cols + ZERO_INJ_FLAG] == True).ravel()
eppci["bus"][zero_injection_bus, [bus_cols + P, bus_cols + P_STD, bus_cols + Q, bus_cols + Q_STD]] = np.NaN
# Withn pq buses with zero injection identify those who have also no p or q measurement
|
{"golden_diff": "diff --git a/pandapower/estimation/algorithm/base.py b/pandapower/estimation/algorithm/base.py\n--- a/pandapower/estimation/algorithm/base.py\n+++ b/pandapower/estimation/algorithm/base.py\n@@ -132,6 +132,8 @@\n def estimate(self, eppci, **kwargs):\n # state vector built from delta, |V| and zero injections\n # Find pq bus with zero p,q and shunt admittance\n+ if not np.any(eppci[\"bus\"][:, bus_cols + ZERO_INJ_FLAG]):\n+ raise UserWarning(\"Network has no bus with zero injections! Please use WLS instead!\")\n zero_injection_bus = np.argwhere(eppci[\"bus\"][:, bus_cols + ZERO_INJ_FLAG] == True).ravel()\n eppci[\"bus\"][zero_injection_bus, [bus_cols + P, bus_cols + P_STD, bus_cols + Q, bus_cols + Q_STD]] = np.NaN\n # Withn pq buses with zero injection identify those who have also no p or q measurement\n", "issue": "bug in WLSZeroInjectionConstraintsAlgorithm\ncode in line 135 of the file, https://github.com/e2nIEE/pandapower/blob/develop/pandapower/estimation/algorithm/base.py\r\n\r\nbugs when zero_injection_bus is empty.\r\n\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright (c) 2016-2020 by University of Kassel and Fraunhofer Institute for Energy Economics\n# and Energy System Technology (IEE), Kassel. All rights reserved.\n\nimport numpy as np\nfrom scipy.sparse import csr_matrix, vstack, hstack\nfrom scipy.sparse.linalg import spsolve\n\nfrom pandapower.estimation.algorithm.estimator import BaseEstimatorIRWLS, get_estimator\nfrom pandapower.estimation.algorithm.matrix_base import BaseAlgebra, \\\n BaseAlgebraZeroInjConstraints\nfrom pandapower.estimation.idx_bus import ZERO_INJ_FLAG, P, P_STD, Q, Q_STD\nfrom pandapower.estimation.ppc_conversion import ExtendedPPCI\nfrom pandapower.pypower.idx_bus import bus_cols\n\ntry:\n import pplog as logging\nexcept ImportError:\n import logging\nstd_logger = logging.getLogger(__name__)\n\n__all__ = [\"WLSAlgorithm\", \"WLSZeroInjectionConstraintsAlgorithm\", \"IRWLSAlgorithm\"]\n\n\nclass BaseAlgorithm:\n def __init__(self, tolerance, maximum_iterations, logger=std_logger):\n self.tolerance = tolerance\n self.max_iterations = maximum_iterations\n self.logger = logger\n self.successful = False\n self.iterations = None\n\n # Parameters for estimate\n self.eppci = None\n self.pp_meas_indices = None\n\n def check_observability(self, eppci: ExtendedPPCI, z):\n # Check if observability criterion is fulfilled and the state estimation is possible\n if len(z) < 2 * eppci[\"bus\"].shape[0] - 1:\n self.logger.error(\"System is not observable (cancelling)\")\n self.logger.error(\"Measurements available: %d. Measurements required: %d\" %\n (len(z), 2 * eppci[\"bus\"].shape[0] - 1))\n raise UserWarning(\"Measurements available: %d. Measurements required: %d\" %\n (len(z), 2 * eppci[\"bus\"].shape[0] - 1))\n\n def check_result(self, current_error, cur_it):\n # print output for results\n if current_error <= self.tolerance:\n self.successful = True\n self.logger.debug(\"State Estimation successful ({:d} iterations)\".format(cur_it))\n else:\n self.successful = False\n self.logger.debug(\"State Estimation not successful ({:d}/{:d} iterations)\".format(cur_it,\n self.max_iterations))\n\n def initialize(self, eppci: ExtendedPPCI):\n # Check observability\n self.eppci = eppci\n self.pp_meas_indices = eppci.pp_meas_indices\n self.check_observability(eppci, eppci.z)\n\n def estimate(self, ppci: ExtendedPPCI, **kwargs):\n # Must be implemented individually!!\n pass\n\n\nclass WLSAlgorithm(BaseAlgorithm):\n def __init__(self, tolerance, maximum_iterations, logger=std_logger):\n super(WLSAlgorithm, self).__init__(tolerance, maximum_iterations, logger)\n\n # Parameters for Bad data detection\n self.R_inv = None\n self.Gm = None\n self.r = None\n self.H = None\n self.hx = None\n\n def estimate(self, eppci, **kwargs):\n self.initialize(eppci)\n # matrix calculation object\n sem = BaseAlgebra(eppci)\n\n current_error, cur_it = 100., 0\n # invert covariance matrix\n r_inv = csr_matrix(np.diagflat(1 / eppci.r_cov ** 2))\n E = eppci.E\n while current_error > self.tolerance and cur_it < self.max_iterations:\n self.logger.debug(\"Starting iteration {:d}\".format(1 + cur_it))\n try:\n # residual r\n r = csr_matrix(sem.create_rx(E)).T\n\n # jacobian matrix H\n H = csr_matrix(sem.create_hx_jacobian(E))\n\n # gain matrix G_m\n # G_m = H^t * R^-1 * H\n G_m = H.T * (r_inv * H)\n\n # state vector difference d_E\n # d_E = G_m^-1 * (H' * R^-1 * r)\n d_E = spsolve(G_m, H.T * (r_inv * r))\n\n # Update E with d_E\n E += d_E.ravel()\n eppci.update_E(E)\n\n # prepare next iteration\n cur_it += 1\n current_error = np.max(np.abs(d_E))\n self.logger.debug(\"Current error: {:.7f}\".format(current_error))\n except np.linalg.linalg.LinAlgError:\n self.logger.error(\"A problem appeared while using the linear algebra methods.\"\n \"Check and change the measurement set.\")\n return False\n\n # check if the estimation is successfull\n self.check_result(current_error, cur_it)\n if self.successful:\n # store variables required for chi^2 and r_N_max test:\n self.R_inv = r_inv.toarray()\n self.Gm = G_m.toarray()\n self.r = r.toarray()\n self.H = H.toarray()\n # create h(x) for the current iteration\n self.hx = sem.create_hx(eppci.E)\n return eppci\n\n\nclass WLSZeroInjectionConstraintsAlgorithm(BaseAlgorithm):\n def estimate(self, eppci, **kwargs):\n # state vector built from delta, |V| and zero injections\n # Find pq bus with zero p,q and shunt admittance\n zero_injection_bus = np.argwhere(eppci[\"bus\"][:, bus_cols + ZERO_INJ_FLAG] == True).ravel()\n eppci[\"bus\"][zero_injection_bus, [bus_cols + P, bus_cols + P_STD, bus_cols + Q, bus_cols + Q_STD]] = np.NaN\n # Withn pq buses with zero injection identify those who have also no p or q measurement\n p_zero_injections = zero_injection_bus\n q_zero_injections = zero_injection_bus\n new_states = np.zeros(len(p_zero_injections) + len(q_zero_injections))\n\n num_bus = eppci[\"bus\"].shape[0]\n\n # matrix calculation object\n sem = BaseAlgebraZeroInjConstraints(eppci)\n\n current_error, cur_it = 100., 0\n r_inv = csr_matrix((np.diagflat(1 / eppci.r_cov) ** 2))\n E = eppci.E\n # update the E matrix\n E_ext = np.r_[eppci.E, new_states]\n\n while current_error > self.tolerance and cur_it < self.max_iterations:\n self.logger.debug(\"Starting iteration {:d}\".format(1 + cur_it))\n try:\n c_x = sem.create_cx(E, p_zero_injections, q_zero_injections)\n\n # residual r\n r = csr_matrix(sem.create_rx(E)).T\n c_rxh = csr_matrix(c_x).T\n\n # jacobian matrix H\n H_temp = sem.create_hx_jacobian(E)\n C_temp = sem.create_cx_jacobian(E, p_zero_injections, q_zero_injections)\n H, C = csr_matrix(H_temp), csr_matrix(C_temp)\n\n # gain matrix G_m\n # G_m = H^t * R^-1 * H\n G_m = H.T * (r_inv * H)\n\n # building a new gain matrix for new constraints.\n A_1 = vstack([G_m, C])\n c_ax = hstack([C, np.zeros((C.shape[0], C.shape[0]))])\n c_xT = c_ax.T\n M_tx = csr_matrix(hstack((A_1, c_xT))) # again adding to the new gain matrix\n rhs = H.T * (r_inv * r) # original right hand side\n C_rhs = vstack((rhs, -c_rxh)) # creating the righ hand side with new constraints\n\n # state vector difference d_E and update E\n d_E_ext = spsolve(M_tx, C_rhs)\n E_ext += d_E_ext.ravel()\n E = E_ext[:E.shape[0]]\n eppci.update_E(E)\n\n # prepare next iteration\n cur_it += 1\n current_error = np.max(np.abs(d_E_ext[:len(eppci.non_slack_buses) + num_bus]))\n self.logger.debug(\"Current error: {:.7f}\".format(current_error))\n except np.linalg.linalg.LinAlgError:\n self.logger.error(\"A problem appeared while using the linear algebra methods.\"\n \"Check and change the measurement set.\")\n return False\n\n # check if the estimation is successfull\n self.check_result(current_error, cur_it)\n return eppci\n\n\nclass IRWLSAlgorithm(BaseAlgorithm):\n def estimate(self, eppci, estimator=\"wls\", **kwargs):\n self.initialize(eppci)\n\n # matrix calculation object\n sem = get_estimator(BaseEstimatorIRWLS, estimator)(eppci, **kwargs)\n\n current_error, cur_it = 100., 0\n E = eppci.E\n while current_error > self.tolerance and cur_it < self.max_iterations:\n self.logger.debug(\"Starting iteration {:d}\".format(1 + cur_it))\n try:\n # residual r\n r = csr_matrix(sem.create_rx(E)).T\n\n # jacobian matrix H\n H = csr_matrix(sem.create_hx_jacobian(E))\n\n # gain matrix G_m\n # G_m = H^t * Phi * H\n phi = csr_matrix(sem.create_phi(E))\n G_m = H.T * (phi * H)\n\n # state vector difference d_E and update E\n d_E = spsolve(G_m, H.T * (phi * r))\n E += d_E.ravel()\n eppci.update_E(E)\n\n # prepare next iteration\n cur_it += 1\n current_error = np.max(np.abs(d_E))\n self.logger.debug(\"Current error: {:.7f}\".format(current_error))\n except np.linalg.linalg.LinAlgError:\n self.logger.error(\"A problem appeared while using the linear algebra methods.\"\n \"Check and change the measurement set.\")\n return False\n\n # check if the estimation is successfull\n self.check_result(current_error, cur_it)\n # update V/delta\n return eppci\n", "path": "pandapower/estimation/algorithm/base.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright (c) 2016-2020 by University of Kassel and Fraunhofer Institute for Energy Economics\n# and Energy System Technology (IEE), Kassel. All rights reserved.\n\nimport numpy as np\nfrom scipy.sparse import csr_matrix, vstack, hstack\nfrom scipy.sparse.linalg import spsolve\n\nfrom pandapower.estimation.algorithm.estimator import BaseEstimatorIRWLS, get_estimator\nfrom pandapower.estimation.algorithm.matrix_base import BaseAlgebra, \\\n BaseAlgebraZeroInjConstraints\nfrom pandapower.estimation.idx_bus import ZERO_INJ_FLAG, P, P_STD, Q, Q_STD\nfrom pandapower.estimation.ppc_conversion import ExtendedPPCI\nfrom pandapower.pypower.idx_bus import bus_cols\n\ntry:\n import pplog as logging\nexcept ImportError:\n import logging\nstd_logger = logging.getLogger(__name__)\n\n__all__ = [\"WLSAlgorithm\", \"WLSZeroInjectionConstraintsAlgorithm\", \"IRWLSAlgorithm\"]\n\n\nclass BaseAlgorithm:\n def __init__(self, tolerance, maximum_iterations, logger=std_logger):\n self.tolerance = tolerance\n self.max_iterations = maximum_iterations\n self.logger = logger\n self.successful = False\n self.iterations = None\n\n # Parameters for estimate\n self.eppci = None\n self.pp_meas_indices = None\n\n def check_observability(self, eppci: ExtendedPPCI, z):\n # Check if observability criterion is fulfilled and the state estimation is possible\n if len(z) < 2 * eppci[\"bus\"].shape[0] - 1:\n self.logger.error(\"System is not observable (cancelling)\")\n self.logger.error(\"Measurements available: %d. Measurements required: %d\" %\n (len(z), 2 * eppci[\"bus\"].shape[0] - 1))\n raise UserWarning(\"Measurements available: %d. Measurements required: %d\" %\n (len(z), 2 * eppci[\"bus\"].shape[0] - 1))\n\n def check_result(self, current_error, cur_it):\n # print output for results\n if current_error <= self.tolerance:\n self.successful = True\n self.logger.debug(\"State Estimation successful ({:d} iterations)\".format(cur_it))\n else:\n self.successful = False\n self.logger.debug(\"State Estimation not successful ({:d}/{:d} iterations)\".format(cur_it,\n self.max_iterations))\n\n def initialize(self, eppci: ExtendedPPCI):\n # Check observability\n self.eppci = eppci\n self.pp_meas_indices = eppci.pp_meas_indices\n self.check_observability(eppci, eppci.z)\n\n def estimate(self, ppci: ExtendedPPCI, **kwargs):\n # Must be implemented individually!!\n pass\n\n\nclass WLSAlgorithm(BaseAlgorithm):\n def __init__(self, tolerance, maximum_iterations, logger=std_logger):\n super(WLSAlgorithm, self).__init__(tolerance, maximum_iterations, logger)\n\n # Parameters for Bad data detection\n self.R_inv = None\n self.Gm = None\n self.r = None\n self.H = None\n self.hx = None\n\n def estimate(self, eppci, **kwargs):\n self.initialize(eppci)\n # matrix calculation object\n sem = BaseAlgebra(eppci)\n\n current_error, cur_it = 100., 0\n # invert covariance matrix\n r_inv = csr_matrix(np.diagflat(1 / eppci.r_cov ** 2))\n E = eppci.E\n while current_error > self.tolerance and cur_it < self.max_iterations:\n self.logger.debug(\"Starting iteration {:d}\".format(1 + cur_it))\n try:\n # residual r\n r = csr_matrix(sem.create_rx(E)).T\n\n # jacobian matrix H\n H = csr_matrix(sem.create_hx_jacobian(E))\n\n # gain matrix G_m\n # G_m = H^t * R^-1 * H\n G_m = H.T * (r_inv * H)\n\n # state vector difference d_E\n # d_E = G_m^-1 * (H' * R^-1 * r)\n d_E = spsolve(G_m, H.T * (r_inv * r))\n\n # Update E with d_E\n E += d_E.ravel()\n eppci.update_E(E)\n\n # prepare next iteration\n cur_it += 1\n current_error = np.max(np.abs(d_E))\n self.logger.debug(\"Current error: {:.7f}\".format(current_error))\n except np.linalg.linalg.LinAlgError:\n self.logger.error(\"A problem appeared while using the linear algebra methods.\"\n \"Check and change the measurement set.\")\n return False\n\n # check if the estimation is successfull\n self.check_result(current_error, cur_it)\n if self.successful:\n # store variables required for chi^2 and r_N_max test:\n self.R_inv = r_inv.toarray()\n self.Gm = G_m.toarray()\n self.r = r.toarray()\n self.H = H.toarray()\n # create h(x) for the current iteration\n self.hx = sem.create_hx(eppci.E)\n return eppci\n\n\nclass WLSZeroInjectionConstraintsAlgorithm(BaseAlgorithm):\n def estimate(self, eppci, **kwargs):\n # state vector built from delta, |V| and zero injections\n # Find pq bus with zero p,q and shunt admittance\n if not np.any(eppci[\"bus\"][:, bus_cols + ZERO_INJ_FLAG]):\n raise UserWarning(\"Network has no bus with zero injections! Please use WLS instead!\")\n zero_injection_bus = np.argwhere(eppci[\"bus\"][:, bus_cols + ZERO_INJ_FLAG] == True).ravel()\n eppci[\"bus\"][zero_injection_bus, [bus_cols + P, bus_cols + P_STD, bus_cols + Q, bus_cols + Q_STD]] = np.NaN\n # Withn pq buses with zero injection identify those who have also no p or q measurement\n p_zero_injections = zero_injection_bus\n q_zero_injections = zero_injection_bus\n new_states = np.zeros(len(p_zero_injections) + len(q_zero_injections))\n\n num_bus = eppci[\"bus\"].shape[0]\n\n # matrix calculation object\n sem = BaseAlgebraZeroInjConstraints(eppci)\n\n current_error, cur_it = 100., 0\n r_inv = csr_matrix((np.diagflat(1 / eppci.r_cov) ** 2))\n E = eppci.E\n # update the E matrix\n E_ext = np.r_[eppci.E, new_states]\n\n while current_error > self.tolerance and cur_it < self.max_iterations:\n self.logger.debug(\"Starting iteration {:d}\".format(1 + cur_it))\n try:\n c_x = sem.create_cx(E, p_zero_injections, q_zero_injections)\n\n # residual r\n r = csr_matrix(sem.create_rx(E)).T\n c_rxh = csr_matrix(c_x).T\n\n # jacobian matrix H\n H_temp = sem.create_hx_jacobian(E)\n C_temp = sem.create_cx_jacobian(E, p_zero_injections, q_zero_injections)\n H, C = csr_matrix(H_temp), csr_matrix(C_temp)\n\n # gain matrix G_m\n # G_m = H^t * R^-1 * H\n G_m = H.T * (r_inv * H)\n\n # building a new gain matrix for new constraints.\n A_1 = vstack([G_m, C])\n c_ax = hstack([C, np.zeros((C.shape[0], C.shape[0]))])\n c_xT = c_ax.T\n M_tx = csr_matrix(hstack((A_1, c_xT))) # again adding to the new gain matrix\n rhs = H.T * (r_inv * r) # original right hand side\n C_rhs = vstack((rhs, -c_rxh)) # creating the righ hand side with new constraints\n\n # state vector difference d_E and update E\n d_E_ext = spsolve(M_tx, C_rhs)\n E_ext += d_E_ext.ravel()\n E = E_ext[:E.shape[0]]\n eppci.update_E(E)\n\n # prepare next iteration\n cur_it += 1\n current_error = np.max(np.abs(d_E_ext[:len(eppci.non_slack_buses) + num_bus]))\n self.logger.debug(\"Current error: {:.7f}\".format(current_error))\n except np.linalg.linalg.LinAlgError:\n self.logger.error(\"A problem appeared while using the linear algebra methods.\"\n \"Check and change the measurement set.\")\n return False\n\n # check if the estimation is successfull\n self.check_result(current_error, cur_it)\n return eppci\n\n\nclass IRWLSAlgorithm(BaseAlgorithm):\n def estimate(self, eppci, estimator=\"wls\", **kwargs):\n self.initialize(eppci)\n\n # matrix calculation object\n sem = get_estimator(BaseEstimatorIRWLS, estimator)(eppci, **kwargs)\n\n current_error, cur_it = 100., 0\n E = eppci.E\n while current_error > self.tolerance and cur_it < self.max_iterations:\n self.logger.debug(\"Starting iteration {:d}\".format(1 + cur_it))\n try:\n # residual r\n r = csr_matrix(sem.create_rx(E)).T\n\n # jacobian matrix H\n H = csr_matrix(sem.create_hx_jacobian(E))\n\n # gain matrix G_m\n # G_m = H^t * Phi * H\n phi = csr_matrix(sem.create_phi(E))\n G_m = H.T * (phi * H)\n\n # state vector difference d_E and update E\n d_E = spsolve(G_m, H.T * (phi * r))\n E += d_E.ravel()\n eppci.update_E(E)\n\n # prepare next iteration\n cur_it += 1\n current_error = np.max(np.abs(d_E))\n self.logger.debug(\"Current error: {:.7f}\".format(current_error))\n except np.linalg.linalg.LinAlgError:\n self.logger.error(\"A problem appeared while using the linear algebra methods.\"\n \"Check and change the measurement set.\")\n return False\n\n # check if the estimation is successfull\n self.check_result(current_error, cur_it)\n # update V/delta\n return eppci\n", "path": "pandapower/estimation/algorithm/base.py"}]}
| 3,251 | 242 |
gh_patches_debug_18924
|
rasdani/github-patches
|
git_diff
|
nltk__nltk-2897
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
BllipParser: AttributeError: 'str' object has no attribute 'decode' on Python 3.5
When calling next(BllipParser.parse()) on Python 3.5, I receive the following error:
File "/home/andrew/.local/lib/python3.5/site-packages/nltk/parse/bllip.py", line 169, in parse
_ensure_ascii(sentence)
File "/home/andrew/.local/lib/python3.5/site-packages/nltk/parse/bllip.py", line 101, in _ensure_ascii
word.decode('ascii')
AttributeError: 'str' object has no attribute 'decode'
Related to https://github.com/nltk/nltk/issues/507
NLTK v3.2.5
Python v3.5.2
OS: Ubuntu (16.04.3) running on WSL (16299.125)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nltk/parse/bllip.py`
Content:
```
1 # Natural Language Toolkit: Interface to BLLIP Parser
2 #
3 # Author: David McClosky <[email protected]>
4 #
5 # Copyright (C) 2001-2021 NLTK Project
6 # URL: <https://www.nltk.org/>
7 # For license information, see LICENSE.TXT
8
9 from nltk.parse.api import ParserI
10 from nltk.tree import Tree
11
12 """
13 Interface for parsing with BLLIP Parser. Requires the Python
14 bllipparser module. BllipParser objects can be constructed with the
15 ``BllipParser.from_unified_model_dir`` class method or manually using the
16 ``BllipParser`` constructor. The former is generally easier if you have
17 a BLLIP Parser unified model directory -- a basic model can be obtained
18 from NLTK's downloader. More unified parsing models can be obtained with
19 BLLIP Parser's ModelFetcher (run ``python -m bllipparser.ModelFetcher``
20 or see docs for ``bllipparser.ModelFetcher.download_and_install_model``).
21
22 Basic usage::
23
24 # download and install a basic unified parsing model (Wall Street Journal)
25 # sudo python -m nltk.downloader bllip_wsj_no_aux
26
27 >>> from nltk.data import find
28 >>> model_dir = find('models/bllip_wsj_no_aux').path
29 >>> bllip = BllipParser.from_unified_model_dir(model_dir)
30
31 # 1-best parsing
32 >>> sentence1 = 'British left waffles on Falklands .'.split()
33 >>> top_parse = bllip.parse_one(sentence1)
34 >>> print(top_parse)
35 (S1
36 (S
37 (NP (JJ British) (NN left))
38 (VP (VBZ waffles) (PP (IN on) (NP (NNP Falklands))))
39 (. .)))
40
41 # n-best parsing
42 >>> sentence2 = 'Time flies'.split()
43 >>> all_parses = bllip.parse_all(sentence2)
44 >>> print(len(all_parses))
45 50
46 >>> print(all_parses[0])
47 (S1 (S (NP (NNP Time)) (VP (VBZ flies))))
48
49 # incorporating external tagging constraints (None means unconstrained tag)
50 >>> constrained1 = bllip.tagged_parse([('Time', 'VB'), ('flies', 'NNS')])
51 >>> print(next(constrained1))
52 (S1 (NP (VB Time) (NNS flies)))
53 >>> constrained2 = bllip.tagged_parse([('Time', 'NN'), ('flies', None)])
54 >>> print(next(constrained2))
55 (S1 (NP (NN Time) (VBZ flies)))
56
57 References
58 ----------
59
60 - Charniak, Eugene. "A maximum-entropy-inspired parser." Proceedings of
61 the 1st North American chapter of the Association for Computational
62 Linguistics conference. Association for Computational Linguistics,
63 2000.
64
65 - Charniak, Eugene, and Mark Johnson. "Coarse-to-fine n-best parsing
66 and MaxEnt discriminative reranking." Proceedings of the 43rd Annual
67 Meeting on Association for Computational Linguistics. Association
68 for Computational Linguistics, 2005.
69
70 Known issues
71 ------------
72
73 Note that BLLIP Parser is not currently threadsafe. Since this module
74 uses a SWIG interface, it is potentially unsafe to create multiple
75 ``BllipParser`` objects in the same process. BLLIP Parser currently
76 has issues with non-ASCII text and will raise an error if given any.
77
78 See https://pypi.python.org/pypi/bllipparser/ for more information
79 on BLLIP Parser's Python interface.
80 """
81
82 __all__ = ["BllipParser"]
83
84 # this block allows this module to be imported even if bllipparser isn't
85 # available
86 try:
87 from bllipparser import RerankingParser
88 from bllipparser.RerankingParser import get_unified_model_parameters
89
90 def _ensure_bllip_import_or_error():
91 pass
92
93
94 except ImportError as ie:
95
96 def _ensure_bllip_import_or_error(ie=ie):
97 raise ImportError("Couldn't import bllipparser module: %s" % ie)
98
99
100 def _ensure_ascii(words):
101 try:
102 for i, word in enumerate(words):
103 word.decode("ascii")
104 except UnicodeDecodeError as e:
105 raise ValueError(
106 "Token %d (%r) is non-ASCII. BLLIP Parser "
107 "currently doesn't support non-ASCII inputs." % (i, word)
108 ) from e
109
110
111 def _scored_parse_to_nltk_tree(scored_parse):
112 return Tree.fromstring(str(scored_parse.ptb_parse))
113
114
115 class BllipParser(ParserI):
116 """
117 Interface for parsing with BLLIP Parser. BllipParser objects can be
118 constructed with the ``BllipParser.from_unified_model_dir`` class
119 method or manually using the ``BllipParser`` constructor.
120 """
121
122 def __init__(
123 self,
124 parser_model=None,
125 reranker_features=None,
126 reranker_weights=None,
127 parser_options=None,
128 reranker_options=None,
129 ):
130 """
131 Load a BLLIP Parser model from scratch. You'll typically want to
132 use the ``from_unified_model_dir()`` class method to construct
133 this object.
134
135 :param parser_model: Path to parser model directory
136 :type parser_model: str
137
138 :param reranker_features: Path the reranker model's features file
139 :type reranker_features: str
140
141 :param reranker_weights: Path the reranker model's weights file
142 :type reranker_weights: str
143
144 :param parser_options: optional dictionary of parser options, see
145 ``bllipparser.RerankingParser.RerankingParser.load_parser_options()``
146 for more information.
147 :type parser_options: dict(str)
148
149 :param reranker_options: optional
150 dictionary of reranker options, see
151 ``bllipparser.RerankingParser.RerankingParser.load_reranker_model()``
152 for more information.
153 :type reranker_options: dict(str)
154 """
155 _ensure_bllip_import_or_error()
156
157 parser_options = parser_options or {}
158 reranker_options = reranker_options or {}
159
160 self.rrp = RerankingParser()
161 self.rrp.load_parser_model(parser_model, **parser_options)
162 if reranker_features and reranker_weights:
163 self.rrp.load_reranker_model(
164 features_filename=reranker_features,
165 weights_filename=reranker_weights,
166 **reranker_options
167 )
168
169 def parse(self, sentence):
170 """
171 Use BLLIP Parser to parse a sentence. Takes a sentence as a list
172 of words; it will be automatically tagged with this BLLIP Parser
173 instance's tagger.
174
175 :return: An iterator that generates parse trees for the sentence
176 from most likely to least likely.
177
178 :param sentence: The sentence to be parsed
179 :type sentence: list(str)
180 :rtype: iter(Tree)
181 """
182 _ensure_ascii(sentence)
183 nbest_list = self.rrp.parse(sentence)
184 for scored_parse in nbest_list:
185 yield _scored_parse_to_nltk_tree(scored_parse)
186
187 def tagged_parse(self, word_and_tag_pairs):
188 """
189 Use BLLIP to parse a sentence. Takes a sentence as a list of
190 (word, tag) tuples; the sentence must have already been tokenized
191 and tagged. BLLIP will attempt to use the tags provided but may
192 use others if it can't come up with a complete parse subject
193 to those constraints. You may also specify a tag as ``None``
194 to leave a token's tag unconstrained.
195
196 :return: An iterator that generates parse trees for the sentence
197 from most likely to least likely.
198
199 :param sentence: Input sentence to parse as (word, tag) pairs
200 :type sentence: list(tuple(str, str))
201 :rtype: iter(Tree)
202 """
203 words = []
204 tag_map = {}
205 for i, (word, tag) in enumerate(word_and_tag_pairs):
206 words.append(word)
207 if tag is not None:
208 tag_map[i] = tag
209
210 _ensure_ascii(words)
211 nbest_list = self.rrp.parse_tagged(words, tag_map)
212 for scored_parse in nbest_list:
213 yield _scored_parse_to_nltk_tree(scored_parse)
214
215 @classmethod
216 def from_unified_model_dir(
217 cls, model_dir, parser_options=None, reranker_options=None
218 ):
219 """
220 Create a ``BllipParser`` object from a unified parsing model
221 directory. Unified parsing model directories are a standardized
222 way of storing BLLIP parser and reranker models together on disk.
223 See ``bllipparser.RerankingParser.get_unified_model_parameters()``
224 for more information about unified model directories.
225
226 :return: A ``BllipParser`` object using the parser and reranker
227 models in the model directory.
228
229 :param model_dir: Path to the unified model directory.
230 :type model_dir: str
231 :param parser_options: optional dictionary of parser options, see
232 ``bllipparser.RerankingParser.RerankingParser.load_parser_options()``
233 for more information.
234 :type parser_options: dict(str)
235 :param reranker_options: optional dictionary of reranker options, see
236 ``bllipparser.RerankingParser.RerankingParser.load_reranker_model()``
237 for more information.
238 :type reranker_options: dict(str)
239 :rtype: BllipParser
240 """
241 (
242 parser_model_dir,
243 reranker_features_filename,
244 reranker_weights_filename,
245 ) = get_unified_model_parameters(model_dir)
246 return cls(
247 parser_model_dir,
248 reranker_features_filename,
249 reranker_weights_filename,
250 parser_options,
251 reranker_options,
252 )
253
254
255 def demo():
256 """This assumes the Python module bllipparser is installed."""
257
258 # download and install a basic unified parsing model (Wall Street Journal)
259 # sudo python -m nltk.downloader bllip_wsj_no_aux
260
261 from nltk.data import find
262
263 model_dir = find("models/bllip_wsj_no_aux").path
264
265 print("Loading BLLIP Parsing models...")
266 # the easiest way to get started is to use a unified model
267 bllip = BllipParser.from_unified_model_dir(model_dir)
268 print("Done.")
269
270 sentence1 = "British left waffles on Falklands .".split()
271 sentence2 = "I saw the man with the telescope .".split()
272 # this sentence is known to fail under the WSJ parsing model
273 fail1 = "# ! ? : -".split()
274 for sentence in (sentence1, sentence2, fail1):
275 print("Sentence: %r" % " ".join(sentence))
276 try:
277 tree = next(bllip.parse(sentence))
278 print(tree)
279 except StopIteration:
280 print("(parse failed)")
281
282 # n-best parsing demo
283 for i, parse in enumerate(bllip.parse(sentence1)):
284 print("parse %d:\n%s" % (i, parse))
285
286 # using external POS tag constraints
287 print(
288 "forcing 'tree' to be 'NN':",
289 next(bllip.tagged_parse([("A", None), ("tree", "NN")])),
290 )
291 print(
292 "forcing 'A' to be 'DT' and 'tree' to be 'NNP':",
293 next(bllip.tagged_parse([("A", "DT"), ("tree", "NNP")])),
294 )
295 # constraints don't have to make sense... (though on more complicated
296 # sentences, they may cause the parse to fail)
297 print(
298 "forcing 'A' to be 'NNP':",
299 next(bllip.tagged_parse([("A", "NNP"), ("tree", None)])),
300 )
301
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/nltk/parse/bllip.py b/nltk/parse/bllip.py
--- a/nltk/parse/bllip.py
+++ b/nltk/parse/bllip.py
@@ -100,11 +100,11 @@
def _ensure_ascii(words):
try:
for i, word in enumerate(words):
- word.decode("ascii")
- except UnicodeDecodeError as e:
+ word.encode("ascii")
+ except UnicodeEncodeError as e:
raise ValueError(
- "Token %d (%r) is non-ASCII. BLLIP Parser "
- "currently doesn't support non-ASCII inputs." % (i, word)
+ f"Token {i} ({word!r}) is non-ASCII. BLLIP Parser "
+ "currently doesn't support non-ASCII inputs."
) from e
@@ -163,7 +163,7 @@
self.rrp.load_reranker_model(
features_filename=reranker_features,
weights_filename=reranker_weights,
- **reranker_options
+ **reranker_options,
)
def parse(self, sentence):
|
{"golden_diff": "diff --git a/nltk/parse/bllip.py b/nltk/parse/bllip.py\n--- a/nltk/parse/bllip.py\n+++ b/nltk/parse/bllip.py\n@@ -100,11 +100,11 @@\n def _ensure_ascii(words):\n try:\n for i, word in enumerate(words):\n- word.decode(\"ascii\")\n- except UnicodeDecodeError as e:\n+ word.encode(\"ascii\")\n+ except UnicodeEncodeError as e:\n raise ValueError(\n- \"Token %d (%r) is non-ASCII. BLLIP Parser \"\n- \"currently doesn't support non-ASCII inputs.\" % (i, word)\n+ f\"Token {i} ({word!r}) is non-ASCII. BLLIP Parser \"\n+ \"currently doesn't support non-ASCII inputs.\"\n ) from e\n \n \n@@ -163,7 +163,7 @@\n self.rrp.load_reranker_model(\n features_filename=reranker_features,\n weights_filename=reranker_weights,\n- **reranker_options\n+ **reranker_options,\n )\n \n def parse(self, sentence):\n", "issue": "BllipParser: AttributeError: 'str' object has no attribute 'decode' on Python 3.5\nWhen calling next(BllipParser.parse()) on Python 3.5, I receive the following error:\r\n\r\nFile \"/home/andrew/.local/lib/python3.5/site-packages/nltk/parse/bllip.py\", line 169, in parse\r\n _ensure_ascii(sentence)\r\n File \"/home/andrew/.local/lib/python3.5/site-packages/nltk/parse/bllip.py\", line 101, in _ensure_ascii\r\n word.decode('ascii')\r\nAttributeError: 'str' object has no attribute 'decode'\r\n\r\n\r\nRelated to https://github.com/nltk/nltk/issues/507\r\n\r\nNLTK v3.2.5\r\nPython v3.5.2\r\nOS: Ubuntu (16.04.3) running on WSL (16299.125)\n", "before_files": [{"content": "# Natural Language Toolkit: Interface to BLLIP Parser\n#\n# Author: David McClosky <[email protected]>\n#\n# Copyright (C) 2001-2021 NLTK Project\n# URL: <https://www.nltk.org/>\n# For license information, see LICENSE.TXT\n\nfrom nltk.parse.api import ParserI\nfrom nltk.tree import Tree\n\n\"\"\"\nInterface for parsing with BLLIP Parser. Requires the Python\nbllipparser module. BllipParser objects can be constructed with the\n``BllipParser.from_unified_model_dir`` class method or manually using the\n``BllipParser`` constructor. The former is generally easier if you have\na BLLIP Parser unified model directory -- a basic model can be obtained\nfrom NLTK's downloader. More unified parsing models can be obtained with\nBLLIP Parser's ModelFetcher (run ``python -m bllipparser.ModelFetcher``\nor see docs for ``bllipparser.ModelFetcher.download_and_install_model``).\n\nBasic usage::\n\n # download and install a basic unified parsing model (Wall Street Journal)\n # sudo python -m nltk.downloader bllip_wsj_no_aux\n\n >>> from nltk.data import find\n >>> model_dir = find('models/bllip_wsj_no_aux').path\n >>> bllip = BllipParser.from_unified_model_dir(model_dir)\n\n # 1-best parsing\n >>> sentence1 = 'British left waffles on Falklands .'.split()\n >>> top_parse = bllip.parse_one(sentence1)\n >>> print(top_parse)\n (S1\n (S\n (NP (JJ British) (NN left))\n (VP (VBZ waffles) (PP (IN on) (NP (NNP Falklands))))\n (. .)))\n\n # n-best parsing\n >>> sentence2 = 'Time flies'.split()\n >>> all_parses = bllip.parse_all(sentence2)\n >>> print(len(all_parses))\n 50\n >>> print(all_parses[0])\n (S1 (S (NP (NNP Time)) (VP (VBZ flies))))\n\n # incorporating external tagging constraints (None means unconstrained tag)\n >>> constrained1 = bllip.tagged_parse([('Time', 'VB'), ('flies', 'NNS')])\n >>> print(next(constrained1))\n (S1 (NP (VB Time) (NNS flies)))\n >>> constrained2 = bllip.tagged_parse([('Time', 'NN'), ('flies', None)])\n >>> print(next(constrained2))\n (S1 (NP (NN Time) (VBZ flies)))\n\nReferences\n----------\n\n- Charniak, Eugene. \"A maximum-entropy-inspired parser.\" Proceedings of\n the 1st North American chapter of the Association for Computational\n Linguistics conference. Association for Computational Linguistics,\n 2000.\n\n- Charniak, Eugene, and Mark Johnson. \"Coarse-to-fine n-best parsing\n and MaxEnt discriminative reranking.\" Proceedings of the 43rd Annual\n Meeting on Association for Computational Linguistics. Association\n for Computational Linguistics, 2005.\n\nKnown issues\n------------\n\nNote that BLLIP Parser is not currently threadsafe. Since this module\nuses a SWIG interface, it is potentially unsafe to create multiple\n``BllipParser`` objects in the same process. BLLIP Parser currently\nhas issues with non-ASCII text and will raise an error if given any.\n\nSee https://pypi.python.org/pypi/bllipparser/ for more information\non BLLIP Parser's Python interface.\n\"\"\"\n\n__all__ = [\"BllipParser\"]\n\n# this block allows this module to be imported even if bllipparser isn't\n# available\ntry:\n from bllipparser import RerankingParser\n from bllipparser.RerankingParser import get_unified_model_parameters\n\n def _ensure_bllip_import_or_error():\n pass\n\n\nexcept ImportError as ie:\n\n def _ensure_bllip_import_or_error(ie=ie):\n raise ImportError(\"Couldn't import bllipparser module: %s\" % ie)\n\n\ndef _ensure_ascii(words):\n try:\n for i, word in enumerate(words):\n word.decode(\"ascii\")\n except UnicodeDecodeError as e:\n raise ValueError(\n \"Token %d (%r) is non-ASCII. BLLIP Parser \"\n \"currently doesn't support non-ASCII inputs.\" % (i, word)\n ) from e\n\n\ndef _scored_parse_to_nltk_tree(scored_parse):\n return Tree.fromstring(str(scored_parse.ptb_parse))\n\n\nclass BllipParser(ParserI):\n \"\"\"\n Interface for parsing with BLLIP Parser. BllipParser objects can be\n constructed with the ``BllipParser.from_unified_model_dir`` class\n method or manually using the ``BllipParser`` constructor.\n \"\"\"\n\n def __init__(\n self,\n parser_model=None,\n reranker_features=None,\n reranker_weights=None,\n parser_options=None,\n reranker_options=None,\n ):\n \"\"\"\n Load a BLLIP Parser model from scratch. You'll typically want to\n use the ``from_unified_model_dir()`` class method to construct\n this object.\n\n :param parser_model: Path to parser model directory\n :type parser_model: str\n\n :param reranker_features: Path the reranker model's features file\n :type reranker_features: str\n\n :param reranker_weights: Path the reranker model's weights file\n :type reranker_weights: str\n\n :param parser_options: optional dictionary of parser options, see\n ``bllipparser.RerankingParser.RerankingParser.load_parser_options()``\n for more information.\n :type parser_options: dict(str)\n\n :param reranker_options: optional\n dictionary of reranker options, see\n ``bllipparser.RerankingParser.RerankingParser.load_reranker_model()``\n for more information.\n :type reranker_options: dict(str)\n \"\"\"\n _ensure_bllip_import_or_error()\n\n parser_options = parser_options or {}\n reranker_options = reranker_options or {}\n\n self.rrp = RerankingParser()\n self.rrp.load_parser_model(parser_model, **parser_options)\n if reranker_features and reranker_weights:\n self.rrp.load_reranker_model(\n features_filename=reranker_features,\n weights_filename=reranker_weights,\n **reranker_options\n )\n\n def parse(self, sentence):\n \"\"\"\n Use BLLIP Parser to parse a sentence. Takes a sentence as a list\n of words; it will be automatically tagged with this BLLIP Parser\n instance's tagger.\n\n :return: An iterator that generates parse trees for the sentence\n from most likely to least likely.\n\n :param sentence: The sentence to be parsed\n :type sentence: list(str)\n :rtype: iter(Tree)\n \"\"\"\n _ensure_ascii(sentence)\n nbest_list = self.rrp.parse(sentence)\n for scored_parse in nbest_list:\n yield _scored_parse_to_nltk_tree(scored_parse)\n\n def tagged_parse(self, word_and_tag_pairs):\n \"\"\"\n Use BLLIP to parse a sentence. Takes a sentence as a list of\n (word, tag) tuples; the sentence must have already been tokenized\n and tagged. BLLIP will attempt to use the tags provided but may\n use others if it can't come up with a complete parse subject\n to those constraints. You may also specify a tag as ``None``\n to leave a token's tag unconstrained.\n\n :return: An iterator that generates parse trees for the sentence\n from most likely to least likely.\n\n :param sentence: Input sentence to parse as (word, tag) pairs\n :type sentence: list(tuple(str, str))\n :rtype: iter(Tree)\n \"\"\"\n words = []\n tag_map = {}\n for i, (word, tag) in enumerate(word_and_tag_pairs):\n words.append(word)\n if tag is not None:\n tag_map[i] = tag\n\n _ensure_ascii(words)\n nbest_list = self.rrp.parse_tagged(words, tag_map)\n for scored_parse in nbest_list:\n yield _scored_parse_to_nltk_tree(scored_parse)\n\n @classmethod\n def from_unified_model_dir(\n cls, model_dir, parser_options=None, reranker_options=None\n ):\n \"\"\"\n Create a ``BllipParser`` object from a unified parsing model\n directory. Unified parsing model directories are a standardized\n way of storing BLLIP parser and reranker models together on disk.\n See ``bllipparser.RerankingParser.get_unified_model_parameters()``\n for more information about unified model directories.\n\n :return: A ``BllipParser`` object using the parser and reranker\n models in the model directory.\n\n :param model_dir: Path to the unified model directory.\n :type model_dir: str\n :param parser_options: optional dictionary of parser options, see\n ``bllipparser.RerankingParser.RerankingParser.load_parser_options()``\n for more information.\n :type parser_options: dict(str)\n :param reranker_options: optional dictionary of reranker options, see\n ``bllipparser.RerankingParser.RerankingParser.load_reranker_model()``\n for more information.\n :type reranker_options: dict(str)\n :rtype: BllipParser\n \"\"\"\n (\n parser_model_dir,\n reranker_features_filename,\n reranker_weights_filename,\n ) = get_unified_model_parameters(model_dir)\n return cls(\n parser_model_dir,\n reranker_features_filename,\n reranker_weights_filename,\n parser_options,\n reranker_options,\n )\n\n\ndef demo():\n \"\"\"This assumes the Python module bllipparser is installed.\"\"\"\n\n # download and install a basic unified parsing model (Wall Street Journal)\n # sudo python -m nltk.downloader bllip_wsj_no_aux\n\n from nltk.data import find\n\n model_dir = find(\"models/bllip_wsj_no_aux\").path\n\n print(\"Loading BLLIP Parsing models...\")\n # the easiest way to get started is to use a unified model\n bllip = BllipParser.from_unified_model_dir(model_dir)\n print(\"Done.\")\n\n sentence1 = \"British left waffles on Falklands .\".split()\n sentence2 = \"I saw the man with the telescope .\".split()\n # this sentence is known to fail under the WSJ parsing model\n fail1 = \"# ! ? : -\".split()\n for sentence in (sentence1, sentence2, fail1):\n print(\"Sentence: %r\" % \" \".join(sentence))\n try:\n tree = next(bllip.parse(sentence))\n print(tree)\n except StopIteration:\n print(\"(parse failed)\")\n\n # n-best parsing demo\n for i, parse in enumerate(bllip.parse(sentence1)):\n print(\"parse %d:\\n%s\" % (i, parse))\n\n # using external POS tag constraints\n print(\n \"forcing 'tree' to be 'NN':\",\n next(bllip.tagged_parse([(\"A\", None), (\"tree\", \"NN\")])),\n )\n print(\n \"forcing 'A' to be 'DT' and 'tree' to be 'NNP':\",\n next(bllip.tagged_parse([(\"A\", \"DT\"), (\"tree\", \"NNP\")])),\n )\n # constraints don't have to make sense... (though on more complicated\n # sentences, they may cause the parse to fail)\n print(\n \"forcing 'A' to be 'NNP':\",\n next(bllip.tagged_parse([(\"A\", \"NNP\"), (\"tree\", None)])),\n )\n", "path": "nltk/parse/bllip.py"}], "after_files": [{"content": "# Natural Language Toolkit: Interface to BLLIP Parser\n#\n# Author: David McClosky <[email protected]>\n#\n# Copyright (C) 2001-2021 NLTK Project\n# URL: <https://www.nltk.org/>\n# For license information, see LICENSE.TXT\n\nfrom nltk.parse.api import ParserI\nfrom nltk.tree import Tree\n\n\"\"\"\nInterface for parsing with BLLIP Parser. Requires the Python\nbllipparser module. BllipParser objects can be constructed with the\n``BllipParser.from_unified_model_dir`` class method or manually using the\n``BllipParser`` constructor. The former is generally easier if you have\na BLLIP Parser unified model directory -- a basic model can be obtained\nfrom NLTK's downloader. More unified parsing models can be obtained with\nBLLIP Parser's ModelFetcher (run ``python -m bllipparser.ModelFetcher``\nor see docs for ``bllipparser.ModelFetcher.download_and_install_model``).\n\nBasic usage::\n\n # download and install a basic unified parsing model (Wall Street Journal)\n # sudo python -m nltk.downloader bllip_wsj_no_aux\n\n >>> from nltk.data import find\n >>> model_dir = find('models/bllip_wsj_no_aux').path\n >>> bllip = BllipParser.from_unified_model_dir(model_dir)\n\n # 1-best parsing\n >>> sentence1 = 'British left waffles on Falklands .'.split()\n >>> top_parse = bllip.parse_one(sentence1)\n >>> print(top_parse)\n (S1\n (S\n (NP (JJ British) (NN left))\n (VP (VBZ waffles) (PP (IN on) (NP (NNP Falklands))))\n (. .)))\n\n # n-best parsing\n >>> sentence2 = 'Time flies'.split()\n >>> all_parses = bllip.parse_all(sentence2)\n >>> print(len(all_parses))\n 50\n >>> print(all_parses[0])\n (S1 (S (NP (NNP Time)) (VP (VBZ flies))))\n\n # incorporating external tagging constraints (None means unconstrained tag)\n >>> constrained1 = bllip.tagged_parse([('Time', 'VB'), ('flies', 'NNS')])\n >>> print(next(constrained1))\n (S1 (NP (VB Time) (NNS flies)))\n >>> constrained2 = bllip.tagged_parse([('Time', 'NN'), ('flies', None)])\n >>> print(next(constrained2))\n (S1 (NP (NN Time) (VBZ flies)))\n\nReferences\n----------\n\n- Charniak, Eugene. \"A maximum-entropy-inspired parser.\" Proceedings of\n the 1st North American chapter of the Association for Computational\n Linguistics conference. Association for Computational Linguistics,\n 2000.\n\n- Charniak, Eugene, and Mark Johnson. \"Coarse-to-fine n-best parsing\n and MaxEnt discriminative reranking.\" Proceedings of the 43rd Annual\n Meeting on Association for Computational Linguistics. Association\n for Computational Linguistics, 2005.\n\nKnown issues\n------------\n\nNote that BLLIP Parser is not currently threadsafe. Since this module\nuses a SWIG interface, it is potentially unsafe to create multiple\n``BllipParser`` objects in the same process. BLLIP Parser currently\nhas issues with non-ASCII text and will raise an error if given any.\n\nSee https://pypi.python.org/pypi/bllipparser/ for more information\non BLLIP Parser's Python interface.\n\"\"\"\n\n__all__ = [\"BllipParser\"]\n\n# this block allows this module to be imported even if bllipparser isn't\n# available\ntry:\n from bllipparser import RerankingParser\n from bllipparser.RerankingParser import get_unified_model_parameters\n\n def _ensure_bllip_import_or_error():\n pass\n\n\nexcept ImportError as ie:\n\n def _ensure_bllip_import_or_error(ie=ie):\n raise ImportError(\"Couldn't import bllipparser module: %s\" % ie)\n\n\ndef _ensure_ascii(words):\n try:\n for i, word in enumerate(words):\n word.encode(\"ascii\")\n except UnicodeEncodeError as e:\n raise ValueError(\n f\"Token {i} ({word!r}) is non-ASCII. BLLIP Parser \"\n \"currently doesn't support non-ASCII inputs.\"\n ) from e\n\n\ndef _scored_parse_to_nltk_tree(scored_parse):\n return Tree.fromstring(str(scored_parse.ptb_parse))\n\n\nclass BllipParser(ParserI):\n \"\"\"\n Interface for parsing with BLLIP Parser. BllipParser objects can be\n constructed with the ``BllipParser.from_unified_model_dir`` class\n method or manually using the ``BllipParser`` constructor.\n \"\"\"\n\n def __init__(\n self,\n parser_model=None,\n reranker_features=None,\n reranker_weights=None,\n parser_options=None,\n reranker_options=None,\n ):\n \"\"\"\n Load a BLLIP Parser model from scratch. You'll typically want to\n use the ``from_unified_model_dir()`` class method to construct\n this object.\n\n :param parser_model: Path to parser model directory\n :type parser_model: str\n\n :param reranker_features: Path the reranker model's features file\n :type reranker_features: str\n\n :param reranker_weights: Path the reranker model's weights file\n :type reranker_weights: str\n\n :param parser_options: optional dictionary of parser options, see\n ``bllipparser.RerankingParser.RerankingParser.load_parser_options()``\n for more information.\n :type parser_options: dict(str)\n\n :param reranker_options: optional\n dictionary of reranker options, see\n ``bllipparser.RerankingParser.RerankingParser.load_reranker_model()``\n for more information.\n :type reranker_options: dict(str)\n \"\"\"\n _ensure_bllip_import_or_error()\n\n parser_options = parser_options or {}\n reranker_options = reranker_options or {}\n\n self.rrp = RerankingParser()\n self.rrp.load_parser_model(parser_model, **parser_options)\n if reranker_features and reranker_weights:\n self.rrp.load_reranker_model(\n features_filename=reranker_features,\n weights_filename=reranker_weights,\n **reranker_options,\n )\n\n def parse(self, sentence):\n \"\"\"\n Use BLLIP Parser to parse a sentence. Takes a sentence as a list\n of words; it will be automatically tagged with this BLLIP Parser\n instance's tagger.\n\n :return: An iterator that generates parse trees for the sentence\n from most likely to least likely.\n\n :param sentence: The sentence to be parsed\n :type sentence: list(str)\n :rtype: iter(Tree)\n \"\"\"\n _ensure_ascii(sentence)\n nbest_list = self.rrp.parse(sentence)\n for scored_parse in nbest_list:\n yield _scored_parse_to_nltk_tree(scored_parse)\n\n def tagged_parse(self, word_and_tag_pairs):\n \"\"\"\n Use BLLIP to parse a sentence. Takes a sentence as a list of\n (word, tag) tuples; the sentence must have already been tokenized\n and tagged. BLLIP will attempt to use the tags provided but may\n use others if it can't come up with a complete parse subject\n to those constraints. You may also specify a tag as ``None``\n to leave a token's tag unconstrained.\n\n :return: An iterator that generates parse trees for the sentence\n from most likely to least likely.\n\n :param sentence: Input sentence to parse as (word, tag) pairs\n :type sentence: list(tuple(str, str))\n :rtype: iter(Tree)\n \"\"\"\n words = []\n tag_map = {}\n for i, (word, tag) in enumerate(word_and_tag_pairs):\n words.append(word)\n if tag is not None:\n tag_map[i] = tag\n\n _ensure_ascii(words)\n nbest_list = self.rrp.parse_tagged(words, tag_map)\n for scored_parse in nbest_list:\n yield _scored_parse_to_nltk_tree(scored_parse)\n\n @classmethod\n def from_unified_model_dir(\n cls, model_dir, parser_options=None, reranker_options=None\n ):\n \"\"\"\n Create a ``BllipParser`` object from a unified parsing model\n directory. Unified parsing model directories are a standardized\n way of storing BLLIP parser and reranker models together on disk.\n See ``bllipparser.RerankingParser.get_unified_model_parameters()``\n for more information about unified model directories.\n\n :return: A ``BllipParser`` object using the parser and reranker\n models in the model directory.\n\n :param model_dir: Path to the unified model directory.\n :type model_dir: str\n :param parser_options: optional dictionary of parser options, see\n ``bllipparser.RerankingParser.RerankingParser.load_parser_options()``\n for more information.\n :type parser_options: dict(str)\n :param reranker_options: optional dictionary of reranker options, see\n ``bllipparser.RerankingParser.RerankingParser.load_reranker_model()``\n for more information.\n :type reranker_options: dict(str)\n :rtype: BllipParser\n \"\"\"\n (\n parser_model_dir,\n reranker_features_filename,\n reranker_weights_filename,\n ) = get_unified_model_parameters(model_dir)\n return cls(\n parser_model_dir,\n reranker_features_filename,\n reranker_weights_filename,\n parser_options,\n reranker_options,\n )\n\n\ndef demo():\n \"\"\"This assumes the Python module bllipparser is installed.\"\"\"\n\n # download and install a basic unified parsing model (Wall Street Journal)\n # sudo python -m nltk.downloader bllip_wsj_no_aux\n\n from nltk.data import find\n\n model_dir = find(\"models/bllip_wsj_no_aux\").path\n\n print(\"Loading BLLIP Parsing models...\")\n # the easiest way to get started is to use a unified model\n bllip = BllipParser.from_unified_model_dir(model_dir)\n print(\"Done.\")\n\n sentence1 = \"British left waffles on Falklands .\".split()\n sentence2 = \"I saw the man with the telescope .\".split()\n # this sentence is known to fail under the WSJ parsing model\n fail1 = \"# ! ? : -\".split()\n for sentence in (sentence1, sentence2, fail1):\n print(\"Sentence: %r\" % \" \".join(sentence))\n try:\n tree = next(bllip.parse(sentence))\n print(tree)\n except StopIteration:\n print(\"(parse failed)\")\n\n # n-best parsing demo\n for i, parse in enumerate(bllip.parse(sentence1)):\n print(\"parse %d:\\n%s\" % (i, parse))\n\n # using external POS tag constraints\n print(\n \"forcing 'tree' to be 'NN':\",\n next(bllip.tagged_parse([(\"A\", None), (\"tree\", \"NN\")])),\n )\n print(\n \"forcing 'A' to be 'DT' and 'tree' to be 'NNP':\",\n next(bllip.tagged_parse([(\"A\", \"DT\"), (\"tree\", \"NNP\")])),\n )\n # constraints don't have to make sense... (though on more complicated\n # sentences, they may cause the parse to fail)\n print(\n \"forcing 'A' to be 'NNP':\",\n next(bllip.tagged_parse([(\"A\", \"NNP\"), (\"tree\", None)])),\n )\n", "path": "nltk/parse/bllip.py"}]}
| 3,851 | 255 |
gh_patches_debug_10830
|
rasdani/github-patches
|
git_diff
|
Mailu__Mailu-2177
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Manage user authentication and permissions
Currently no authentication is implemented. Multiple issues will have to be tackled:
- complete permission scheme or simple admin role plus admins per domain?
- how to store user passwords (shared format between Flask-admin and dovecot)?
- how should the initial use be created?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `core/admin/start.py`
Content:
```
1 #!/usr/bin/python3
2
3 import os
4 import logging as log
5 import sys
6
7 log.basicConfig(stream=sys.stderr, level=os.environ.get("LOG_LEVEL", "INFO"))
8
9 os.system("flask mailu advertise")
10 os.system("flask db upgrade")
11
12 account = os.environ.get("INITIAL_ADMIN_ACCOUNT")
13 domain = os.environ.get("INITIAL_ADMIN_DOMAIN")
14 password = os.environ.get("INITIAL_ADMIN_PW")
15
16 if account is not None and domain is not None and password is not None:
17 mode = os.environ.get("INITIAL_ADMIN_MODE", default="ifmissing")
18 log.info("Creating initial admin accout %s@%s with mode %s",account,domain,mode)
19 os.system("flask mailu admin %s %s '%s' --mode %s" % (account, domain, password, mode))
20
21 def test_DNS():
22 import dns.resolver
23 import dns.exception
24 import dns.flags
25 import dns.rdtypes
26 import dns.rdatatype
27 import dns.rdataclass
28 import time
29 # DNS stub configured to do DNSSEC enabled queries
30 resolver = dns.resolver.Resolver()
31 resolver.use_edns(0, 0, 1232)
32 resolver.flags = dns.flags.AD | dns.flags.RD
33 nameservers = resolver.nameservers
34 for ns in nameservers:
35 resolver.nameservers=[ns]
36 while True:
37 try:
38 result = resolver.query('example.org', dns.rdatatype.A, dns.rdataclass.IN, lifetime=10)
39 except Exception as e:
40 log.critical("Your DNS resolver at %s is not working (%s). Please use another resolver or enable unbound via https://setup.mailu.io.", ns, e);
41 else:
42 if result.response.flags & dns.flags.AD:
43 break
44 log.critical("Your DNS resolver at %s isn't doing DNSSEC validation; Please use another resolver or enable unbound via https://setup.mailu.io.", ns)
45 time.sleep(5)
46
47 test_DNS()
48
49 start_command="".join([
50 "gunicorn --threads ", str(os.cpu_count()),
51 " -b :80 ",
52 "--access-logfile - " if (log.root.level<=log.INFO) else "",
53 "--error-logfile - ",
54 "--preload ",
55 "'mailu:create_app()'"])
56
57 os.system(start_command)
58
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/core/admin/start.py b/core/admin/start.py
--- a/core/admin/start.py
+++ b/core/admin/start.py
@@ -35,7 +35,7 @@
resolver.nameservers=[ns]
while True:
try:
- result = resolver.query('example.org', dns.rdatatype.A, dns.rdataclass.IN, lifetime=10)
+ result = resolver.resolve('example.org', dns.rdatatype.A, dns.rdataclass.IN, lifetime=10)
except Exception as e:
log.critical("Your DNS resolver at %s is not working (%s). Please use another resolver or enable unbound via https://setup.mailu.io.", ns, e);
else:
|
{"golden_diff": "diff --git a/core/admin/start.py b/core/admin/start.py\n--- a/core/admin/start.py\n+++ b/core/admin/start.py\n@@ -35,7 +35,7 @@\n resolver.nameservers=[ns]\n while True:\n try:\n- result = resolver.query('example.org', dns.rdatatype.A, dns.rdataclass.IN, lifetime=10)\n+ result = resolver.resolve('example.org', dns.rdatatype.A, dns.rdataclass.IN, lifetime=10)\n except Exception as e:\n log.critical(\"Your DNS resolver at %s is not working (%s). Please use another resolver or enable unbound via https://setup.mailu.io.\", ns, e);\n else:\n", "issue": "Manage user authentication and permissions\nCurrently no authentication is implemented. Multiple issues will have to be tackled:\n- complete permission scheme or simple admin role plus admins per domain?\n- how to store user passwords (shared format between Flask-admin and dovecot)?\n- how should the initial use be created?\n\n", "before_files": [{"content": "#!/usr/bin/python3\n\nimport os\nimport logging as log\nimport sys\n\nlog.basicConfig(stream=sys.stderr, level=os.environ.get(\"LOG_LEVEL\", \"INFO\"))\n\nos.system(\"flask mailu advertise\")\nos.system(\"flask db upgrade\")\n\naccount = os.environ.get(\"INITIAL_ADMIN_ACCOUNT\")\ndomain = os.environ.get(\"INITIAL_ADMIN_DOMAIN\")\npassword = os.environ.get(\"INITIAL_ADMIN_PW\")\n\nif account is not None and domain is not None and password is not None:\n mode = os.environ.get(\"INITIAL_ADMIN_MODE\", default=\"ifmissing\")\n log.info(\"Creating initial admin accout %s@%s with mode %s\",account,domain,mode)\n os.system(\"flask mailu admin %s %s '%s' --mode %s\" % (account, domain, password, mode))\n\ndef test_DNS():\n import dns.resolver\n import dns.exception\n import dns.flags\n import dns.rdtypes\n import dns.rdatatype\n import dns.rdataclass\n import time\n # DNS stub configured to do DNSSEC enabled queries\n resolver = dns.resolver.Resolver()\n resolver.use_edns(0, 0, 1232)\n resolver.flags = dns.flags.AD | dns.flags.RD\n nameservers = resolver.nameservers\n for ns in nameservers:\n resolver.nameservers=[ns]\n while True:\n try:\n result = resolver.query('example.org', dns.rdatatype.A, dns.rdataclass.IN, lifetime=10)\n except Exception as e:\n log.critical(\"Your DNS resolver at %s is not working (%s). Please use another resolver or enable unbound via https://setup.mailu.io.\", ns, e);\n else:\n if result.response.flags & dns.flags.AD:\n break\n log.critical(\"Your DNS resolver at %s isn't doing DNSSEC validation; Please use another resolver or enable unbound via https://setup.mailu.io.\", ns)\n time.sleep(5)\n\ntest_DNS()\n\nstart_command=\"\".join([\n \"gunicorn --threads \", str(os.cpu_count()),\n \" -b :80 \",\n \"--access-logfile - \" if (log.root.level<=log.INFO) else \"\",\n \"--error-logfile - \",\n \"--preload \",\n \"'mailu:create_app()'\"])\n\nos.system(start_command)\n", "path": "core/admin/start.py"}], "after_files": [{"content": "#!/usr/bin/python3\n\nimport os\nimport logging as log\nimport sys\n\nlog.basicConfig(stream=sys.stderr, level=os.environ.get(\"LOG_LEVEL\", \"INFO\"))\n\nos.system(\"flask mailu advertise\")\nos.system(\"flask db upgrade\")\n\naccount = os.environ.get(\"INITIAL_ADMIN_ACCOUNT\")\ndomain = os.environ.get(\"INITIAL_ADMIN_DOMAIN\")\npassword = os.environ.get(\"INITIAL_ADMIN_PW\")\n\nif account is not None and domain is not None and password is not None:\n mode = os.environ.get(\"INITIAL_ADMIN_MODE\", default=\"ifmissing\")\n log.info(\"Creating initial admin accout %s@%s with mode %s\",account,domain,mode)\n os.system(\"flask mailu admin %s %s '%s' --mode %s\" % (account, domain, password, mode))\n\ndef test_DNS():\n import dns.resolver\n import dns.exception\n import dns.flags\n import dns.rdtypes\n import dns.rdatatype\n import dns.rdataclass\n import time\n # DNS stub configured to do DNSSEC enabled queries\n resolver = dns.resolver.Resolver()\n resolver.use_edns(0, 0, 1232)\n resolver.flags = dns.flags.AD | dns.flags.RD\n nameservers = resolver.nameservers\n for ns in nameservers:\n resolver.nameservers=[ns]\n while True:\n try:\n result = resolver.resolve('example.org', dns.rdatatype.A, dns.rdataclass.IN, lifetime=10)\n except Exception as e:\n log.critical(\"Your DNS resolver at %s is not working (%s). Please use another resolver or enable unbound via https://setup.mailu.io.\", ns, e);\n else:\n if result.response.flags & dns.flags.AD:\n break\n log.critical(\"Your DNS resolver at %s isn't doing DNSSEC validation; Please use another resolver or enable unbound via https://setup.mailu.io.\", ns)\n time.sleep(5)\n\ntest_DNS()\n\nstart_command=\"\".join([\n \"gunicorn --threads \", str(os.cpu_count()),\n \" -b :80 \",\n \"--access-logfile - \" if (log.root.level<=log.INFO) else \"\",\n \"--error-logfile - \",\n \"--preload \",\n \"'mailu:create_app()'\"])\n\nos.system(start_command)\n", "path": "core/admin/start.py"}]}
| 930 | 152 |
gh_patches_debug_1727
|
rasdani/github-patches
|
git_diff
|
mitmproxy__mitmproxy-1904
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ValueError: data must be bytes, but is str
Hi ,
When i use 'e' to edit form , sometimes i get this .
```
➜ ~ mitmproxy -b 192.168.1.2 -p 8080
Traceback (most recent call last):
File "mitmproxy/tools/console/master.py", line 292, in run
File "site-packages/urwid/main_loop.py", line 278, in run
File "site-packages/urwid/main_loop.py", line 376, in _run
File "site-packages/urwid/main_loop.py", line 682, in run
File "site-packages/urwid/main_loop.py", line 719, in _loop
File "site-packages/urwid/raw_display.py", line 393, in <lambda>
File "site-packages/urwid/raw_display.py", line 493, in parse_input
File "site-packages/urwid/main_loop.py", line 403, in _update
File "site-packages/urwid/main_loop.py", line 503, in process_input
File "mitmproxy/tools/console/window.py", line 84, in keypress
File "site-packages/urwid/container.py", line 1116, in keypress
File "mitmproxy/tools/console/statusbar.py", line 155, in keypress
File "mitmproxy/tools/console/statusbar.py", line 108, in keypress
File "mitmproxy/tools/console/statusbar.py", line 133, in prompt_execute
File "mitmproxy/tools/console/statusbar.py", line 31, in __call__
File "mitmproxy/tools/console/flowview.py", line 415, in edit
File "mitmproxy/tools/console/flowview.py", line 351, in edit_form
File "mitmproxy/tools/console/master.py", line 352, in view_grideditor
File "site-packages/blinker/base.py", line 267, in send
File "site-packages/blinker/base.py", line 267, in <listcomp>
File "mitmproxy/tools/console/master.py", line 144, in sig_push_view_state
File "site-packages/urwid/main_loop.py", line 578, in draw_screen
File "site-packages/urwid/widget.py", line 141, in cached_render
File "site-packages/urwid/container.py", line 1083, in render
File "site-packages/urwid/widget.py", line 141, in cached_render
File "site-packages/urwid/decoration.py", line 225, in render
File "site-packages/urwid/widget.py", line 141, in cached_render
File "site-packages/urwid/widget.py", line 1750, in render
File "site-packages/urwid/widget.py", line 141, in cached_render
File "site-packages/urwid/container.py", line 1083, in render
File "site-packages/urwid/widget.py", line 141, in cached_render
File "site-packages/urwid/listbox.py", line 455, in render
File "site-packages/urwid/listbox.py", line 337, in calculate_visible
File "site-packages/urwid/listbox.py", line 702, in _set_focus_complete
File "site-packages/urwid/listbox.py", line 672, in _set_focus_first_selectable
File "site-packages/urwid/listbox.py", line 340, in calculate_visible
File "mitmproxy/tools/console/grideditor/base.py", line 223, in get_focus
File "mitmproxy/tools/console/grideditor/base.py", line 77, in __init__
File "mitmproxy/tools/console/grideditor/col_bytes.py", line 33, in Display
File "mitmproxy/tools/console/grideditor/col_bytes.py", line 73, in __init__
File "mitmproxy/utils/strutils.py", line 72, in bytes_to_escaped_str
ValueError: data must be bytes, but is str
mitmproxy has crashed!
Please lodge a bug report at:
https://github.com/mitmproxy/mitmproxy
Shutting down...
```
systeminfo:
Mitmproxy version: 1.0.0
Python version: 3.5.2
Platform: Darwin-15.6.0-x86_64-i386-64bit
SSL version: OpenSSL 1.0.2j 26 Sep 2016
Mac version: 10.11.6 ('', '', '') x86_64
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mitmproxy/tools/console/grideditor/editors.py`
Content:
```
1 import re
2 import urwid
3 from mitmproxy import exceptions
4 from mitmproxy import flowfilter
5 from mitmproxy.addons import script
6 from mitmproxy.tools.console import common
7 from mitmproxy.tools.console.grideditor import base
8 from mitmproxy.tools.console.grideditor import col_text
9 from mitmproxy.tools.console.grideditor import col_bytes
10 from mitmproxy.tools.console.grideditor import col_subgrid
11 from mitmproxy.tools.console import signals
12 from mitmproxy.net.http import user_agents
13
14
15 class QueryEditor(base.GridEditor):
16 title = "Editing query"
17 columns = [
18 col_text.Column("Key"),
19 col_text.Column("Value")
20 ]
21
22
23 class HeaderEditor(base.GridEditor):
24 title = "Editing headers"
25 columns = [
26 col_bytes.Column("Key"),
27 col_bytes.Column("Value")
28 ]
29
30 def make_help(self):
31 h = super().make_help()
32 text = [
33 urwid.Text([("text", "Special keys:\n")])
34 ]
35 keys = [
36 ("U", "add User-Agent header"),
37 ]
38 text.extend(
39 common.format_keyvals(keys, key="key", val="text", indent=4)
40 )
41 text.append(urwid.Text([("text", "\n")]))
42 text.extend(h)
43 return text
44
45 def set_user_agent(self, k):
46 ua = user_agents.get_by_shortcut(k)
47 if ua:
48 self.walker.add_value(
49 [
50 b"User-Agent",
51 ua[2].encode()
52 ]
53 )
54
55 def handle_key(self, key):
56 if key == "U":
57 signals.status_prompt_onekey.send(
58 prompt="Add User-Agent header:",
59 keys=[(i[0], i[1]) for i in user_agents.UASTRINGS],
60 callback=self.set_user_agent,
61 )
62 return True
63
64
65 class URLEncodedFormEditor(base.GridEditor):
66 title = "Editing URL-encoded form"
67 columns = [
68 col_bytes.Column("Key"),
69 col_bytes.Column("Value")
70 ]
71
72
73 class ReplaceEditor(base.GridEditor):
74 title = "Editing replacement patterns"
75 columns = [
76 col_text.Column("Filter"),
77 col_text.Column("Regex"),
78 col_text.Column("Replacement"),
79 ]
80
81 def is_error(self, col, val):
82 if col == 0:
83 if not flowfilter.parse(val):
84 return "Invalid filter specification."
85 elif col == 1:
86 try:
87 re.compile(val)
88 except re.error:
89 return "Invalid regular expression."
90 return False
91
92
93 class SetHeadersEditor(base.GridEditor):
94 title = "Editing header set patterns"
95 columns = [
96 col_text.Column("Filter"),
97 col_text.Column("Header"),
98 col_text.Column("Value"),
99 ]
100
101 def is_error(self, col, val):
102 if col == 0:
103 if not flowfilter.parse(val):
104 return "Invalid filter specification"
105 return False
106
107 def make_help(self):
108 h = super().make_help()
109 text = [
110 urwid.Text([("text", "Special keys:\n")])
111 ]
112 keys = [
113 ("U", "add User-Agent header"),
114 ]
115 text.extend(
116 common.format_keyvals(keys, key="key", val="text", indent=4)
117 )
118 text.append(urwid.Text([("text", "\n")]))
119 text.extend(h)
120 return text
121
122 def set_user_agent(self, k):
123 ua = user_agents.get_by_shortcut(k)
124 if ua:
125 self.walker.add_value(
126 [
127 ".*",
128 b"User-Agent",
129 ua[2].encode()
130 ]
131 )
132
133 def handle_key(self, key):
134 if key == "U":
135 signals.status_prompt_onekey.send(
136 prompt="Add User-Agent header:",
137 keys=[(i[0], i[1]) for i in user_agents.UASTRINGS],
138 callback=self.set_user_agent,
139 )
140 return True
141
142
143 class PathEditor(base.GridEditor):
144 # TODO: Next row on enter?
145
146 title = "Editing URL path components"
147 columns = [
148 col_text.Column("Component"),
149 ]
150
151 def data_in(self, data):
152 return [[i] for i in data]
153
154 def data_out(self, data):
155 return [i[0] for i in data]
156
157
158 class ScriptEditor(base.GridEditor):
159 title = "Editing scripts"
160 columns = [
161 col_text.Column("Command"),
162 ]
163
164 def is_error(self, col, val):
165 try:
166 script.parse_command(val)
167 except exceptions.OptionsError as e:
168 return str(e)
169
170
171 class HostPatternEditor(base.GridEditor):
172 title = "Editing host patterns"
173 columns = [
174 col_text.Column("Regex (matched on hostname:port / ip:port)")
175 ]
176
177 def is_error(self, col, val):
178 try:
179 re.compile(val, re.IGNORECASE)
180 except re.error as e:
181 return "Invalid regex: %s" % str(e)
182
183 def data_in(self, data):
184 return [[i] for i in data]
185
186 def data_out(self, data):
187 return [i[0] for i in data]
188
189
190 class CookieEditor(base.GridEditor):
191 title = "Editing request Cookie header"
192 columns = [
193 col_text.Column("Name"),
194 col_text.Column("Value"),
195 ]
196
197
198 class CookieAttributeEditor(base.GridEditor):
199 title = "Editing Set-Cookie attributes"
200 columns = [
201 col_text.Column("Name"),
202 col_text.Column("Value"),
203 ]
204
205 def data_in(self, data):
206 return [(k, v or "") for k, v in data]
207
208 def data_out(self, data):
209 ret = []
210 for i in data:
211 if not i[1]:
212 ret.append([i[0], None])
213 else:
214 ret.append(i)
215 return ret
216
217
218 class SetCookieEditor(base.GridEditor):
219 title = "Editing response SetCookie header"
220 columns = [
221 col_text.Column("Name"),
222 col_text.Column("Value"),
223 col_subgrid.Column("Attributes", CookieAttributeEditor),
224 ]
225
226 def data_in(self, data):
227 flattened = []
228 for key, (value, attrs) in data:
229 flattened.append([key, value, attrs.items(multi=True)])
230 return flattened
231
232 def data_out(self, data):
233 vals = []
234 for key, value, attrs in data:
235 vals.append(
236 [
237 key,
238 (value, attrs)
239 ]
240 )
241 return vals
242
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mitmproxy/tools/console/grideditor/editors.py b/mitmproxy/tools/console/grideditor/editors.py
--- a/mitmproxy/tools/console/grideditor/editors.py
+++ b/mitmproxy/tools/console/grideditor/editors.py
@@ -65,8 +65,8 @@
class URLEncodedFormEditor(base.GridEditor):
title = "Editing URL-encoded form"
columns = [
- col_bytes.Column("Key"),
- col_bytes.Column("Value")
+ col_text.Column("Key"),
+ col_text.Column("Value")
]
|
{"golden_diff": "diff --git a/mitmproxy/tools/console/grideditor/editors.py b/mitmproxy/tools/console/grideditor/editors.py\n--- a/mitmproxy/tools/console/grideditor/editors.py\n+++ b/mitmproxy/tools/console/grideditor/editors.py\n@@ -65,8 +65,8 @@\n class URLEncodedFormEditor(base.GridEditor):\n title = \"Editing URL-encoded form\"\n columns = [\n- col_bytes.Column(\"Key\"),\n- col_bytes.Column(\"Value\")\n+ col_text.Column(\"Key\"),\n+ col_text.Column(\"Value\")\n ]\n", "issue": "ValueError: data must be bytes, but is str\nHi , \r\n When i use 'e' to edit form , sometimes i get this .\r\n\r\n```\r\n\u279c ~ mitmproxy -b 192.168.1.2 -p 8080\r\nTraceback (most recent call last):\r\n File \"mitmproxy/tools/console/master.py\", line 292, in run\r\n File \"site-packages/urwid/main_loop.py\", line 278, in run\r\n File \"site-packages/urwid/main_loop.py\", line 376, in _run\r\n File \"site-packages/urwid/main_loop.py\", line 682, in run\r\n File \"site-packages/urwid/main_loop.py\", line 719, in _loop\r\n File \"site-packages/urwid/raw_display.py\", line 393, in <lambda>\r\n File \"site-packages/urwid/raw_display.py\", line 493, in parse_input\r\n File \"site-packages/urwid/main_loop.py\", line 403, in _update\r\n File \"site-packages/urwid/main_loop.py\", line 503, in process_input\r\n File \"mitmproxy/tools/console/window.py\", line 84, in keypress\r\n File \"site-packages/urwid/container.py\", line 1116, in keypress\r\n File \"mitmproxy/tools/console/statusbar.py\", line 155, in keypress\r\n File \"mitmproxy/tools/console/statusbar.py\", line 108, in keypress\r\n File \"mitmproxy/tools/console/statusbar.py\", line 133, in prompt_execute\r\n File \"mitmproxy/tools/console/statusbar.py\", line 31, in __call__\r\n File \"mitmproxy/tools/console/flowview.py\", line 415, in edit\r\n File \"mitmproxy/tools/console/flowview.py\", line 351, in edit_form\r\n File \"mitmproxy/tools/console/master.py\", line 352, in view_grideditor\r\n File \"site-packages/blinker/base.py\", line 267, in send\r\n File \"site-packages/blinker/base.py\", line 267, in <listcomp>\r\n File \"mitmproxy/tools/console/master.py\", line 144, in sig_push_view_state\r\n File \"site-packages/urwid/main_loop.py\", line 578, in draw_screen\r\n File \"site-packages/urwid/widget.py\", line 141, in cached_render\r\n File \"site-packages/urwid/container.py\", line 1083, in render\r\n File \"site-packages/urwid/widget.py\", line 141, in cached_render\r\n File \"site-packages/urwid/decoration.py\", line 225, in render\r\n File \"site-packages/urwid/widget.py\", line 141, in cached_render\r\n File \"site-packages/urwid/widget.py\", line 1750, in render\r\n File \"site-packages/urwid/widget.py\", line 141, in cached_render\r\n File \"site-packages/urwid/container.py\", line 1083, in render\r\n File \"site-packages/urwid/widget.py\", line 141, in cached_render\r\n File \"site-packages/urwid/listbox.py\", line 455, in render\r\n File \"site-packages/urwid/listbox.py\", line 337, in calculate_visible\r\n File \"site-packages/urwid/listbox.py\", line 702, in _set_focus_complete\r\n File \"site-packages/urwid/listbox.py\", line 672, in _set_focus_first_selectable\r\n File \"site-packages/urwid/listbox.py\", line 340, in calculate_visible\r\n File \"mitmproxy/tools/console/grideditor/base.py\", line 223, in get_focus\r\n File \"mitmproxy/tools/console/grideditor/base.py\", line 77, in __init__\r\n File \"mitmproxy/tools/console/grideditor/col_bytes.py\", line 33, in Display\r\n File \"mitmproxy/tools/console/grideditor/col_bytes.py\", line 73, in __init__\r\n File \"mitmproxy/utils/strutils.py\", line 72, in bytes_to_escaped_str\r\nValueError: data must be bytes, but is str\r\n\r\nmitmproxy has crashed!\r\nPlease lodge a bug report at:\r\n\thttps://github.com/mitmproxy/mitmproxy\r\nShutting down...\r\n\r\n```\r\nsysteminfo:\r\nMitmproxy version: 1.0.0\r\nPython version: 3.5.2\r\nPlatform: Darwin-15.6.0-x86_64-i386-64bit\r\nSSL version: OpenSSL 1.0.2j 26 Sep 2016\r\nMac version: 10.11.6 ('', '', '') x86_64\r\n\n", "before_files": [{"content": "import re\nimport urwid\nfrom mitmproxy import exceptions\nfrom mitmproxy import flowfilter\nfrom mitmproxy.addons import script\nfrom mitmproxy.tools.console import common\nfrom mitmproxy.tools.console.grideditor import base\nfrom mitmproxy.tools.console.grideditor import col_text\nfrom mitmproxy.tools.console.grideditor import col_bytes\nfrom mitmproxy.tools.console.grideditor import col_subgrid\nfrom mitmproxy.tools.console import signals\nfrom mitmproxy.net.http import user_agents\n\n\nclass QueryEditor(base.GridEditor):\n title = \"Editing query\"\n columns = [\n col_text.Column(\"Key\"),\n col_text.Column(\"Value\")\n ]\n\n\nclass HeaderEditor(base.GridEditor):\n title = \"Editing headers\"\n columns = [\n col_bytes.Column(\"Key\"),\n col_bytes.Column(\"Value\")\n ]\n\n def make_help(self):\n h = super().make_help()\n text = [\n urwid.Text([(\"text\", \"Special keys:\\n\")])\n ]\n keys = [\n (\"U\", \"add User-Agent header\"),\n ]\n text.extend(\n common.format_keyvals(keys, key=\"key\", val=\"text\", indent=4)\n )\n text.append(urwid.Text([(\"text\", \"\\n\")]))\n text.extend(h)\n return text\n\n def set_user_agent(self, k):\n ua = user_agents.get_by_shortcut(k)\n if ua:\n self.walker.add_value(\n [\n b\"User-Agent\",\n ua[2].encode()\n ]\n )\n\n def handle_key(self, key):\n if key == \"U\":\n signals.status_prompt_onekey.send(\n prompt=\"Add User-Agent header:\",\n keys=[(i[0], i[1]) for i in user_agents.UASTRINGS],\n callback=self.set_user_agent,\n )\n return True\n\n\nclass URLEncodedFormEditor(base.GridEditor):\n title = \"Editing URL-encoded form\"\n columns = [\n col_bytes.Column(\"Key\"),\n col_bytes.Column(\"Value\")\n ]\n\n\nclass ReplaceEditor(base.GridEditor):\n title = \"Editing replacement patterns\"\n columns = [\n col_text.Column(\"Filter\"),\n col_text.Column(\"Regex\"),\n col_text.Column(\"Replacement\"),\n ]\n\n def is_error(self, col, val):\n if col == 0:\n if not flowfilter.parse(val):\n return \"Invalid filter specification.\"\n elif col == 1:\n try:\n re.compile(val)\n except re.error:\n return \"Invalid regular expression.\"\n return False\n\n\nclass SetHeadersEditor(base.GridEditor):\n title = \"Editing header set patterns\"\n columns = [\n col_text.Column(\"Filter\"),\n col_text.Column(\"Header\"),\n col_text.Column(\"Value\"),\n ]\n\n def is_error(self, col, val):\n if col == 0:\n if not flowfilter.parse(val):\n return \"Invalid filter specification\"\n return False\n\n def make_help(self):\n h = super().make_help()\n text = [\n urwid.Text([(\"text\", \"Special keys:\\n\")])\n ]\n keys = [\n (\"U\", \"add User-Agent header\"),\n ]\n text.extend(\n common.format_keyvals(keys, key=\"key\", val=\"text\", indent=4)\n )\n text.append(urwid.Text([(\"text\", \"\\n\")]))\n text.extend(h)\n return text\n\n def set_user_agent(self, k):\n ua = user_agents.get_by_shortcut(k)\n if ua:\n self.walker.add_value(\n [\n \".*\",\n b\"User-Agent\",\n ua[2].encode()\n ]\n )\n\n def handle_key(self, key):\n if key == \"U\":\n signals.status_prompt_onekey.send(\n prompt=\"Add User-Agent header:\",\n keys=[(i[0], i[1]) for i in user_agents.UASTRINGS],\n callback=self.set_user_agent,\n )\n return True\n\n\nclass PathEditor(base.GridEditor):\n # TODO: Next row on enter?\n\n title = \"Editing URL path components\"\n columns = [\n col_text.Column(\"Component\"),\n ]\n\n def data_in(self, data):\n return [[i] for i in data]\n\n def data_out(self, data):\n return [i[0] for i in data]\n\n\nclass ScriptEditor(base.GridEditor):\n title = \"Editing scripts\"\n columns = [\n col_text.Column(\"Command\"),\n ]\n\n def is_error(self, col, val):\n try:\n script.parse_command(val)\n except exceptions.OptionsError as e:\n return str(e)\n\n\nclass HostPatternEditor(base.GridEditor):\n title = \"Editing host patterns\"\n columns = [\n col_text.Column(\"Regex (matched on hostname:port / ip:port)\")\n ]\n\n def is_error(self, col, val):\n try:\n re.compile(val, re.IGNORECASE)\n except re.error as e:\n return \"Invalid regex: %s\" % str(e)\n\n def data_in(self, data):\n return [[i] for i in data]\n\n def data_out(self, data):\n return [i[0] for i in data]\n\n\nclass CookieEditor(base.GridEditor):\n title = \"Editing request Cookie header\"\n columns = [\n col_text.Column(\"Name\"),\n col_text.Column(\"Value\"),\n ]\n\n\nclass CookieAttributeEditor(base.GridEditor):\n title = \"Editing Set-Cookie attributes\"\n columns = [\n col_text.Column(\"Name\"),\n col_text.Column(\"Value\"),\n ]\n\n def data_in(self, data):\n return [(k, v or \"\") for k, v in data]\n\n def data_out(self, data):\n ret = []\n for i in data:\n if not i[1]:\n ret.append([i[0], None])\n else:\n ret.append(i)\n return ret\n\n\nclass SetCookieEditor(base.GridEditor):\n title = \"Editing response SetCookie header\"\n columns = [\n col_text.Column(\"Name\"),\n col_text.Column(\"Value\"),\n col_subgrid.Column(\"Attributes\", CookieAttributeEditor),\n ]\n\n def data_in(self, data):\n flattened = []\n for key, (value, attrs) in data:\n flattened.append([key, value, attrs.items(multi=True)])\n return flattened\n\n def data_out(self, data):\n vals = []\n for key, value, attrs in data:\n vals.append(\n [\n key,\n (value, attrs)\n ]\n )\n return vals\n", "path": "mitmproxy/tools/console/grideditor/editors.py"}], "after_files": [{"content": "import re\nimport urwid\nfrom mitmproxy import exceptions\nfrom mitmproxy import flowfilter\nfrom mitmproxy.addons import script\nfrom mitmproxy.tools.console import common\nfrom mitmproxy.tools.console.grideditor import base\nfrom mitmproxy.tools.console.grideditor import col_text\nfrom mitmproxy.tools.console.grideditor import col_bytes\nfrom mitmproxy.tools.console.grideditor import col_subgrid\nfrom mitmproxy.tools.console import signals\nfrom mitmproxy.net.http import user_agents\n\n\nclass QueryEditor(base.GridEditor):\n title = \"Editing query\"\n columns = [\n col_text.Column(\"Key\"),\n col_text.Column(\"Value\")\n ]\n\n\nclass HeaderEditor(base.GridEditor):\n title = \"Editing headers\"\n columns = [\n col_bytes.Column(\"Key\"),\n col_bytes.Column(\"Value\")\n ]\n\n def make_help(self):\n h = super().make_help()\n text = [\n urwid.Text([(\"text\", \"Special keys:\\n\")])\n ]\n keys = [\n (\"U\", \"add User-Agent header\"),\n ]\n text.extend(\n common.format_keyvals(keys, key=\"key\", val=\"text\", indent=4)\n )\n text.append(urwid.Text([(\"text\", \"\\n\")]))\n text.extend(h)\n return text\n\n def set_user_agent(self, k):\n ua = user_agents.get_by_shortcut(k)\n if ua:\n self.walker.add_value(\n [\n b\"User-Agent\",\n ua[2].encode()\n ]\n )\n\n def handle_key(self, key):\n if key == \"U\":\n signals.status_prompt_onekey.send(\n prompt=\"Add User-Agent header:\",\n keys=[(i[0], i[1]) for i in user_agents.UASTRINGS],\n callback=self.set_user_agent,\n )\n return True\n\n\nclass URLEncodedFormEditor(base.GridEditor):\n title = \"Editing URL-encoded form\"\n columns = [\n col_text.Column(\"Key\"),\n col_text.Column(\"Value\")\n ]\n\n\nclass ReplaceEditor(base.GridEditor):\n title = \"Editing replacement patterns\"\n columns = [\n col_text.Column(\"Filter\"),\n col_text.Column(\"Regex\"),\n col_text.Column(\"Replacement\"),\n ]\n\n def is_error(self, col, val):\n if col == 0:\n if not flowfilter.parse(val):\n return \"Invalid filter specification.\"\n elif col == 1:\n try:\n re.compile(val)\n except re.error:\n return \"Invalid regular expression.\"\n return False\n\n\nclass SetHeadersEditor(base.GridEditor):\n title = \"Editing header set patterns\"\n columns = [\n col_text.Column(\"Filter\"),\n col_text.Column(\"Header\"),\n col_text.Column(\"Value\"),\n ]\n\n def is_error(self, col, val):\n if col == 0:\n if not flowfilter.parse(val):\n return \"Invalid filter specification\"\n return False\n\n def make_help(self):\n h = super().make_help()\n text = [\n urwid.Text([(\"text\", \"Special keys:\\n\")])\n ]\n keys = [\n (\"U\", \"add User-Agent header\"),\n ]\n text.extend(\n common.format_keyvals(keys, key=\"key\", val=\"text\", indent=4)\n )\n text.append(urwid.Text([(\"text\", \"\\n\")]))\n text.extend(h)\n return text\n\n def set_user_agent(self, k):\n ua = user_agents.get_by_shortcut(k)\n if ua:\n self.walker.add_value(\n [\n \".*\",\n b\"User-Agent\",\n ua[2].encode()\n ]\n )\n\n def handle_key(self, key):\n if key == \"U\":\n signals.status_prompt_onekey.send(\n prompt=\"Add User-Agent header:\",\n keys=[(i[0], i[1]) for i in user_agents.UASTRINGS],\n callback=self.set_user_agent,\n )\n return True\n\n\nclass PathEditor(base.GridEditor):\n # TODO: Next row on enter?\n\n title = \"Editing URL path components\"\n columns = [\n col_text.Column(\"Component\"),\n ]\n\n def data_in(self, data):\n return [[i] for i in data]\n\n def data_out(self, data):\n return [i[0] for i in data]\n\n\nclass ScriptEditor(base.GridEditor):\n title = \"Editing scripts\"\n columns = [\n col_text.Column(\"Command\"),\n ]\n\n def is_error(self, col, val):\n try:\n script.parse_command(val)\n except exceptions.OptionsError as e:\n return str(e)\n\n\nclass HostPatternEditor(base.GridEditor):\n title = \"Editing host patterns\"\n columns = [\n col_text.Column(\"Regex (matched on hostname:port / ip:port)\")\n ]\n\n def is_error(self, col, val):\n try:\n re.compile(val, re.IGNORECASE)\n except re.error as e:\n return \"Invalid regex: %s\" % str(e)\n\n def data_in(self, data):\n return [[i] for i in data]\n\n def data_out(self, data):\n return [i[0] for i in data]\n\n\nclass CookieEditor(base.GridEditor):\n title = \"Editing request Cookie header\"\n columns = [\n col_text.Column(\"Name\"),\n col_text.Column(\"Value\"),\n ]\n\n\nclass CookieAttributeEditor(base.GridEditor):\n title = \"Editing Set-Cookie attributes\"\n columns = [\n col_text.Column(\"Name\"),\n col_text.Column(\"Value\"),\n ]\n\n def data_in(self, data):\n return [(k, v or \"\") for k, v in data]\n\n def data_out(self, data):\n ret = []\n for i in data:\n if not i[1]:\n ret.append([i[0], None])\n else:\n ret.append(i)\n return ret\n\n\nclass SetCookieEditor(base.GridEditor):\n title = \"Editing response SetCookie header\"\n columns = [\n col_text.Column(\"Name\"),\n col_text.Column(\"Value\"),\n col_subgrid.Column(\"Attributes\", CookieAttributeEditor),\n ]\n\n def data_in(self, data):\n flattened = []\n for key, (value, attrs) in data:\n flattened.append([key, value, attrs.items(multi=True)])\n return flattened\n\n def data_out(self, data):\n vals = []\n for key, value, attrs in data:\n vals.append(\n [\n key,\n (value, attrs)\n ]\n )\n return vals\n", "path": "mitmproxy/tools/console/grideditor/editors.py"}]}
| 3,397 | 122 |
gh_patches_debug_40592
|
rasdani/github-patches
|
git_diff
|
hpcaitech__ColossalAI-5731
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[tensor] fix some unittests
[tensor] fix some unittests
[tensor] fix some unittests
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `colossalai/zero/gemini/placement_policy.py`
Content:
```
1 import functools
2 import warnings
3 from abc import ABC, abstractmethod
4 from time import time
5 from typing import Dict, List, Optional, Tuple, Type
6
7 import torch
8
9 from colossalai.accelerator import get_accelerator
10 from colossalai.legacy.utils.memory import colo_device_memory_capacity
11 from colossalai.zero.gemini.chunk import Chunk
12
13 from .chunk import Chunk, ChunkManager
14 from .memory_tracer import ChunkMemStatsCollector
15
16
17 class PlacementPolicy(ABC):
18 need_mem_stats: bool = False
19
20 def __init__(
21 self,
22 gemini_manager: "GeminiManager",
23 chunk_manager: ChunkManager,
24 mem_stats_collector: Optional[ChunkMemStatsCollector] = None,
25 max_prefetch: int = 0,
26 **kwargs,
27 ) -> None:
28 self.gemini_manager = gemini_manager
29 self.chunk_manager = chunk_manager
30 self.mem_stats_collector: Optional[ChunkMemStatsCollector] = mem_stats_collector
31 self.max_prefetch = max_prefetch
32
33 @abstractmethod
34 def evict_tensors(self, can_evict_chunks: List[Chunk], **kwargs) -> Tuple[int, float]:
35 raise NotImplementedError
36
37 @abstractmethod
38 def setup_grads_device(
39 self, params: List[torch.Tensor], grads_device_map: Dict[torch.Tensor, torch.device]
40 ) -> None:
41 raise NotImplementedError
42
43 @abstractmethod
44 def get_prefetch_chunks(self) -> List[Chunk]:
45 raise NotImplementedError
46
47
48 class StaticPlacementPolicy(PlacementPolicy):
49 def __init__(
50 self,
51 gemini_manager: "GeminiManager",
52 chunk_manager: ChunkManager,
53 mem_stats_collector: Optional[ChunkMemStatsCollector] = None,
54 max_prefetch: int = 0,
55 shard_param_frac: float = 1.0,
56 offload_optim_frac: float = 0.0,
57 offload_param_frac: float = 0.0,
58 **kwargs,
59 ) -> None:
60 super().__init__(
61 gemini_manager, chunk_manager, mem_stats_collector=mem_stats_collector, max_prefetch=max_prefetch
62 )
63 if offload_param_frac > 0.0 and (shard_param_frac != 1.0 or offload_optim_frac != 1.0):
64 warnings.warn("offload_param_frac is ignored when shard_param_frac != 1.0 or offload_optim_frac != 1.0")
65 offload_param_frac = 0.0
66 self.shard_param_frac = shard_param_frac
67 self.offload_optim_frac = offload_optim_frac
68 self.offload_param_frac = offload_param_frac
69 # these should be initialized in setup_grads_device
70 self.keep_gathered_chunk_mem = 0.0
71 self.keep_cuda_chunk_mem = 0.0
72
73 def evict_tensors(self, can_evict_chunks: List[Chunk], **kwargs) -> Tuple[int, float]:
74 can_shard_chunk_mem = sum(chunk.chunk_mem for chunk in can_evict_chunks)
75 can_offload_chunk_mem = can_shard_chunk_mem
76 for chunk in can_evict_chunks:
77 if can_shard_chunk_mem <= self.keep_gathered_chunk_mem:
78 break
79 self.chunk_manager.release_chunk(chunk)
80 # real saved mem is chunk_mem - shard_mem, for simplicity we use chunk_mem
81 can_shard_chunk_mem -= chunk.chunk_mem
82 for chunk in can_evict_chunks:
83 if can_offload_chunk_mem <= self.keep_cuda_chunk_mem:
84 break
85 self.chunk_manager.move_chunk(chunk, torch.device("cpu"))
86 # real saved mem is shard_mem, for simplicity we use chunk_mem
87 can_offload_chunk_mem -= chunk.chunk_mem
88 return 0, 0.0
89
90 def setup_grads_device(
91 self, params: List[torch.Tensor], grads_device_map: Dict[torch.Tensor, torch.device]
92 ) -> None:
93 total_chunk_mem = sum(self.chunk_manager.get_chunk(p).chunk_mem for p in params)
94
95 offload_optim_chunk_mem = total_chunk_mem * self.offload_optim_frac
96 offloaded_optim_chunk_mem = 0
97 chunks = set(self.chunk_manager.get_chunk(p) for p in params)
98 for chunk in chunks:
99 params = chunk.get_tensors()
100 # init offload optim settings
101 # keep gathered chunks are in CUDA
102 if chunk.keep_gathered or offloaded_optim_chunk_mem >= offload_optim_chunk_mem:
103 device = get_accelerator().get_current_device()
104 else:
105 device = torch.device("cpu")
106 # real offloaded mem is chunk.shard_mem, for simplicity we use chunk mem here
107 offloaded_optim_chunk_mem += chunk.chunk_mem
108 for p in params:
109 grads_device_map[p] = device
110 self.keep_gathered_chunk_mem = total_chunk_mem * (1 - self.shard_param_frac)
111 self.keep_cuda_chunk_mem = total_chunk_mem * (1 - self.offload_param_frac)
112
113 def get_prefetch_chunks(self) -> List[Chunk]:
114 if self.gemini_manager.is_warmup(): # no prefetch during warmup since we need compute_list
115 return []
116 can_prefetch = self.max_prefetch - len(self.gemini_manager._async_works)
117 prefetch = []
118 for i in range(self.gemini_manager.compute_idx + 1, len(self.gemini_manager.compute_list)):
119 for chunk in self.gemini_manager.compute_list[i]:
120 if len(prefetch) >= can_prefetch:
121 break
122 if chunk not in prefetch and chunk not in self.chunk_manager.accessed_chunks:
123 prefetch.append(chunk)
124 if len(prefetch) >= can_prefetch:
125 break
126 return prefetch
127
128
129 class AutoPlacementPolicy(PlacementPolicy):
130 need_mem_stats: bool = True
131
132 def __init__(
133 self,
134 gemini_manager: "GeminiManager",
135 chunk_manager: ChunkManager,
136 mem_stats_collector: Optional[ChunkMemStatsCollector] = None,
137 max_prefetch: int = 0,
138 warmup_non_model_data_ratio: float = 0.8,
139 steady_cuda_cap_ratio: float = 0.9,
140 **kwargs,
141 ) -> None:
142 super().__init__(
143 gemini_manager, chunk_manager, mem_stats_collector=mem_stats_collector, max_prefetch=max_prefetch
144 )
145 # model data will use 1-_warmup_non_model_data_ratio CUDA memory in warmup phase
146 # you can set them by AutoPlacementPolicy.set_warmup_non_model_data_ratio()
147 # and AutoPlacementPolicy.set_steady_cuda_cap_ratio()
148 self._warmup_non_model_data_ratio = warmup_non_model_data_ratio
149 self._steady_cuda_cap_ratio = steady_cuda_cap_ratio
150
151 def evict_tensors(
152 self,
153 can_evict_chunks: List[Chunk],
154 cuda_demand: int = 0,
155 warmup: bool = True,
156 compute_list: Optional[List[Tuple[Chunk, ...]]] = None,
157 compute_idx: int = 0,
158 **kwargs,
159 ) -> Tuple[int, float]:
160 """
161 Evict tensors from CUDA device.
162
163 Args:
164 can_evict_chunks (List[StatefulTensor]): the list of tensors that can be evicted.
165 cuda_demand (int, optional): the volume of data needed on cuda device. Defaults to 0.
166 warmup (bool, optional): a flag indicates whether in the phase of warmup. Defaults to True.
167 compute_list (List[StatefulTensor], optional): TODO. Defaults to [].
168 compute_idx (int, optional): the idx of computing device. Defaults to 0.
169
170 Raises:
171 RuntimeError:
172
173 Returns:
174 int: the volume of memory that is evicted
175 """
176 start = time()
177 cuda_capacity = colo_device_memory_capacity(get_accelerator().get_current_device())
178 used_cuda_model_data = self.chunk_manager.total_mem["cuda"]
179 if warmup:
180 # We designate a part of CUDA memory for model data in warmup iterations.
181 max_cuda_non_model_data_per_period = cuda_capacity * self._warmup_non_model_data_ratio
182 else:
183 # max non-model-data cuda memory consumption of this sampling moment and the next sampling moment.
184 max_cuda_non_model_data_per_period = self.mem_stats_collector.next_period_non_model_data_usage("cuda")
185 cuda_capacity *= self._steady_cuda_cap_ratio
186 total_cuda_model_data = cuda_capacity - max_cuda_non_model_data_per_period
187 avail_cuda_model_data = total_cuda_model_data - used_cuda_model_data
188 freed_cuda_model_data = 0
189
190 if avail_cuda_model_data < cuda_demand:
191 # Move cuda_demand - avail_cuda_model_data volume of tensors
192 # to_free_cuda_model_data = cuda_demand - avail_cuda_model_data
193 to_free_cuda_model_data = cuda_demand - avail_cuda_model_data
194 to_free_chunks = can_evict_chunks
195 if not warmup:
196 to_free_chunks = self._sort_can_evict_chunks(tuple(to_free_chunks), compute_idx, tuple(compute_list))
197 # print(self._sort_can_evict_chunks.cache_info())
198 for chunk in to_free_chunks:
199 if freed_cuda_model_data >= to_free_cuda_model_data:
200 break
201
202 self.chunk_manager.release_chunk(chunk)
203 self.chunk_manager.move_chunk(chunk, torch.device("cpu"))
204 freed_cuda_model_data += chunk.chunk_mem
205 if freed_cuda_model_data < to_free_cuda_model_data:
206 raise RuntimeError(
207 f"Adjust layout failed! No enough CUDA memory! "
208 f"Need {to_free_cuda_model_data}, freed {freed_cuda_model_data}"
209 )
210 return freed_cuda_model_data, time() - start
211
212 @staticmethod
213 @functools.lru_cache(maxsize=None)
214 def _sort_can_evict_chunks(can_evict_chunks: tuple, compute_idx: int, compute_list: tuple) -> list:
215 next_compute_idx = {chunk: len(compute_list) for chunk in can_evict_chunks}
216 for i in range(len(compute_list) - 1, compute_idx, -1):
217 for chunk in compute_list[i]:
218 if chunk in next_compute_idx:
219 next_compute_idx[chunk] = i
220 next_compute_idx = sorted(next_compute_idx.items(), key=lambda pair: pair[1], reverse=True)
221 return [t for (t, idx) in next_compute_idx]
222
223 def setup_grads_device(
224 self, params: List[torch.Tensor], grads_device_map: Dict[torch.Tensor, torch.device]
225 ) -> None:
226 for p in params:
227 chunk = self.chunk_manager.get_chunk(p)
228 # init offload optim settings
229 # keep gathered chunks are in CUDA
230 if chunk.keep_gathered:
231 grads_device_map[p] = get_accelerator().get_current_device()
232 else:
233 grads_device_map[p] = torch.device("cpu")
234
235 def get_prefetch_chunks(self, max_prefetch: int) -> List[Chunk]:
236 return [] # TODO @botbw: implement prefetching for auto
237
238
239 class PlacementPolicyFactory:
240 policies: Dict[str, Type[PlacementPolicy]] = {
241 "auto": AutoPlacementPolicy,
242 "static": StaticPlacementPolicy,
243 }
244
245 @staticmethod
246 def create(policy_name: str) -> Type[PlacementPolicy]:
247 if policy_name not in PlacementPolicyFactory.policies:
248 raise TypeError(f"Unknown tensor placement policy {policy_name}")
249 return PlacementPolicyFactory.policies[policy_name]
250
251 @staticmethod
252 def get_policy_names():
253 return tuple(PlacementPolicyFactory.policies.keys())
254
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/colossalai/zero/gemini/placement_policy.py b/colossalai/zero/gemini/placement_policy.py
--- a/colossalai/zero/gemini/placement_policy.py
+++ b/colossalai/zero/gemini/placement_policy.py
@@ -19,7 +19,7 @@
def __init__(
self,
- gemini_manager: "GeminiManager",
+ gemini_manager: "GeminiManager", # TODO @botbw: solve circular import
chunk_manager: ChunkManager,
mem_stats_collector: Optional[ChunkMemStatsCollector] = None,
max_prefetch: int = 0,
@@ -40,9 +40,8 @@
) -> None:
raise NotImplementedError
- @abstractmethod
def get_prefetch_chunks(self) -> List[Chunk]:
- raise NotImplementedError
+ return [] # no prefetch by default
class StaticPlacementPolicy(PlacementPolicy):
@@ -116,12 +115,14 @@
can_prefetch = self.max_prefetch - len(self.gemini_manager._async_works)
prefetch = []
for i in range(self.gemini_manager.compute_idx + 1, len(self.gemini_manager.compute_list)):
+ break_flag = False
for chunk in self.gemini_manager.compute_list[i]:
if len(prefetch) >= can_prefetch:
+ break_flag = True
break
if chunk not in prefetch and chunk not in self.chunk_manager.accessed_chunks:
prefetch.append(chunk)
- if len(prefetch) >= can_prefetch:
+ if break_flag:
break
return prefetch
@@ -232,8 +233,33 @@
else:
grads_device_map[p] = torch.device("cpu")
- def get_prefetch_chunks(self, max_prefetch: int) -> List[Chunk]:
- return [] # TODO @botbw: implement prefetching for auto
+ def get_prefetch_chunks(self) -> List[Chunk]:
+ if self.gemini_manager.is_warmup(): # no prefetch during warmup since we need compute_list
+ return []
+ # modified from self.evict_tensors
+ cuda_capacity = self._steady_cuda_cap_ratio * colo_device_memory_capacity(
+ get_accelerator().get_current_device()
+ )
+ max_cuda_non_model_data_per_period = self.mem_stats_collector.next_period_non_model_data_usage("cuda")
+ used_cuda_model_data = self.chunk_manager.total_mem["cuda"]
+ total_cuda_model_data = cuda_capacity - max_cuda_non_model_data_per_period
+ avail_cuda_model_data = total_cuda_model_data - used_cuda_model_data
+
+ prefetch_chunk_memory = 0
+ can_prefetch = self.max_prefetch - len(self.gemini_manager._async_works)
+ prefetch = []
+ for i in range(self.gemini_manager.compute_idx + 1, len(self.gemini_manager.compute_list)):
+ break_flag = False
+ for chunk in self.gemini_manager.compute_list[i]:
+ chunk: Chunk
+ if len(prefetch) >= can_prefetch or prefetch_chunk_memory + chunk.chunk_mem > avail_cuda_model_data:
+ break_flag = True
+ break
+ if chunk not in prefetch and chunk not in self.chunk_manager.accessed_chunks:
+ prefetch.append(chunk)
+ if break_flag:
+ break
+ return prefetch
class PlacementPolicyFactory:
|
{"golden_diff": "diff --git a/colossalai/zero/gemini/placement_policy.py b/colossalai/zero/gemini/placement_policy.py\n--- a/colossalai/zero/gemini/placement_policy.py\n+++ b/colossalai/zero/gemini/placement_policy.py\n@@ -19,7 +19,7 @@\n \n def __init__(\n self,\n- gemini_manager: \"GeminiManager\",\n+ gemini_manager: \"GeminiManager\", # TODO @botbw: solve circular import\n chunk_manager: ChunkManager,\n mem_stats_collector: Optional[ChunkMemStatsCollector] = None,\n max_prefetch: int = 0,\n@@ -40,9 +40,8 @@\n ) -> None:\n raise NotImplementedError\n \n- @abstractmethod\n def get_prefetch_chunks(self) -> List[Chunk]:\n- raise NotImplementedError\n+ return [] # no prefetch by default\n \n \n class StaticPlacementPolicy(PlacementPolicy):\n@@ -116,12 +115,14 @@\n can_prefetch = self.max_prefetch - len(self.gemini_manager._async_works)\n prefetch = []\n for i in range(self.gemini_manager.compute_idx + 1, len(self.gemini_manager.compute_list)):\n+ break_flag = False\n for chunk in self.gemini_manager.compute_list[i]:\n if len(prefetch) >= can_prefetch:\n+ break_flag = True\n break\n if chunk not in prefetch and chunk not in self.chunk_manager.accessed_chunks:\n prefetch.append(chunk)\n- if len(prefetch) >= can_prefetch:\n+ if break_flag:\n break\n return prefetch\n \n@@ -232,8 +233,33 @@\n else:\n grads_device_map[p] = torch.device(\"cpu\")\n \n- def get_prefetch_chunks(self, max_prefetch: int) -> List[Chunk]:\n- return [] # TODO @botbw: implement prefetching for auto\n+ def get_prefetch_chunks(self) -> List[Chunk]:\n+ if self.gemini_manager.is_warmup(): # no prefetch during warmup since we need compute_list\n+ return []\n+ # modified from self.evict_tensors\n+ cuda_capacity = self._steady_cuda_cap_ratio * colo_device_memory_capacity(\n+ get_accelerator().get_current_device()\n+ )\n+ max_cuda_non_model_data_per_period = self.mem_stats_collector.next_period_non_model_data_usage(\"cuda\")\n+ used_cuda_model_data = self.chunk_manager.total_mem[\"cuda\"]\n+ total_cuda_model_data = cuda_capacity - max_cuda_non_model_data_per_period\n+ avail_cuda_model_data = total_cuda_model_data - used_cuda_model_data\n+\n+ prefetch_chunk_memory = 0\n+ can_prefetch = self.max_prefetch - len(self.gemini_manager._async_works)\n+ prefetch = []\n+ for i in range(self.gemini_manager.compute_idx + 1, len(self.gemini_manager.compute_list)):\n+ break_flag = False\n+ for chunk in self.gemini_manager.compute_list[i]:\n+ chunk: Chunk\n+ if len(prefetch) >= can_prefetch or prefetch_chunk_memory + chunk.chunk_mem > avail_cuda_model_data:\n+ break_flag = True\n+ break\n+ if chunk not in prefetch and chunk not in self.chunk_manager.accessed_chunks:\n+ prefetch.append(chunk)\n+ if break_flag:\n+ break\n+ return prefetch\n \n \n class PlacementPolicyFactory:\n", "issue": "[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n", "before_files": [{"content": "import functools\nimport warnings\nfrom abc import ABC, abstractmethod\nfrom time import time\nfrom typing import Dict, List, Optional, Tuple, Type\n\nimport torch\n\nfrom colossalai.accelerator import get_accelerator\nfrom colossalai.legacy.utils.memory import colo_device_memory_capacity\nfrom colossalai.zero.gemini.chunk import Chunk\n\nfrom .chunk import Chunk, ChunkManager\nfrom .memory_tracer import ChunkMemStatsCollector\n\n\nclass PlacementPolicy(ABC):\n need_mem_stats: bool = False\n\n def __init__(\n self,\n gemini_manager: \"GeminiManager\",\n chunk_manager: ChunkManager,\n mem_stats_collector: Optional[ChunkMemStatsCollector] = None,\n max_prefetch: int = 0,\n **kwargs,\n ) -> None:\n self.gemini_manager = gemini_manager\n self.chunk_manager = chunk_manager\n self.mem_stats_collector: Optional[ChunkMemStatsCollector] = mem_stats_collector\n self.max_prefetch = max_prefetch\n\n @abstractmethod\n def evict_tensors(self, can_evict_chunks: List[Chunk], **kwargs) -> Tuple[int, float]:\n raise NotImplementedError\n\n @abstractmethod\n def setup_grads_device(\n self, params: List[torch.Tensor], grads_device_map: Dict[torch.Tensor, torch.device]\n ) -> None:\n raise NotImplementedError\n\n @abstractmethod\n def get_prefetch_chunks(self) -> List[Chunk]:\n raise NotImplementedError\n\n\nclass StaticPlacementPolicy(PlacementPolicy):\n def __init__(\n self,\n gemini_manager: \"GeminiManager\",\n chunk_manager: ChunkManager,\n mem_stats_collector: Optional[ChunkMemStatsCollector] = None,\n max_prefetch: int = 0,\n shard_param_frac: float = 1.0,\n offload_optim_frac: float = 0.0,\n offload_param_frac: float = 0.0,\n **kwargs,\n ) -> None:\n super().__init__(\n gemini_manager, chunk_manager, mem_stats_collector=mem_stats_collector, max_prefetch=max_prefetch\n )\n if offload_param_frac > 0.0 and (shard_param_frac != 1.0 or offload_optim_frac != 1.0):\n warnings.warn(\"offload_param_frac is ignored when shard_param_frac != 1.0 or offload_optim_frac != 1.0\")\n offload_param_frac = 0.0\n self.shard_param_frac = shard_param_frac\n self.offload_optim_frac = offload_optim_frac\n self.offload_param_frac = offload_param_frac\n # these should be initialized in setup_grads_device\n self.keep_gathered_chunk_mem = 0.0\n self.keep_cuda_chunk_mem = 0.0\n\n def evict_tensors(self, can_evict_chunks: List[Chunk], **kwargs) -> Tuple[int, float]:\n can_shard_chunk_mem = sum(chunk.chunk_mem for chunk in can_evict_chunks)\n can_offload_chunk_mem = can_shard_chunk_mem\n for chunk in can_evict_chunks:\n if can_shard_chunk_mem <= self.keep_gathered_chunk_mem:\n break\n self.chunk_manager.release_chunk(chunk)\n # real saved mem is chunk_mem - shard_mem, for simplicity we use chunk_mem\n can_shard_chunk_mem -= chunk.chunk_mem\n for chunk in can_evict_chunks:\n if can_offload_chunk_mem <= self.keep_cuda_chunk_mem:\n break\n self.chunk_manager.move_chunk(chunk, torch.device(\"cpu\"))\n # real saved mem is shard_mem, for simplicity we use chunk_mem\n can_offload_chunk_mem -= chunk.chunk_mem\n return 0, 0.0\n\n def setup_grads_device(\n self, params: List[torch.Tensor], grads_device_map: Dict[torch.Tensor, torch.device]\n ) -> None:\n total_chunk_mem = sum(self.chunk_manager.get_chunk(p).chunk_mem for p in params)\n\n offload_optim_chunk_mem = total_chunk_mem * self.offload_optim_frac\n offloaded_optim_chunk_mem = 0\n chunks = set(self.chunk_manager.get_chunk(p) for p in params)\n for chunk in chunks:\n params = chunk.get_tensors()\n # init offload optim settings\n # keep gathered chunks are in CUDA\n if chunk.keep_gathered or offloaded_optim_chunk_mem >= offload_optim_chunk_mem:\n device = get_accelerator().get_current_device()\n else:\n device = torch.device(\"cpu\")\n # real offloaded mem is chunk.shard_mem, for simplicity we use chunk mem here\n offloaded_optim_chunk_mem += chunk.chunk_mem\n for p in params:\n grads_device_map[p] = device\n self.keep_gathered_chunk_mem = total_chunk_mem * (1 - self.shard_param_frac)\n self.keep_cuda_chunk_mem = total_chunk_mem * (1 - self.offload_param_frac)\n\n def get_prefetch_chunks(self) -> List[Chunk]:\n if self.gemini_manager.is_warmup(): # no prefetch during warmup since we need compute_list\n return []\n can_prefetch = self.max_prefetch - len(self.gemini_manager._async_works)\n prefetch = []\n for i in range(self.gemini_manager.compute_idx + 1, len(self.gemini_manager.compute_list)):\n for chunk in self.gemini_manager.compute_list[i]:\n if len(prefetch) >= can_prefetch:\n break\n if chunk not in prefetch and chunk not in self.chunk_manager.accessed_chunks:\n prefetch.append(chunk)\n if len(prefetch) >= can_prefetch:\n break\n return prefetch\n\n\nclass AutoPlacementPolicy(PlacementPolicy):\n need_mem_stats: bool = True\n\n def __init__(\n self,\n gemini_manager: \"GeminiManager\",\n chunk_manager: ChunkManager,\n mem_stats_collector: Optional[ChunkMemStatsCollector] = None,\n max_prefetch: int = 0,\n warmup_non_model_data_ratio: float = 0.8,\n steady_cuda_cap_ratio: float = 0.9,\n **kwargs,\n ) -> None:\n super().__init__(\n gemini_manager, chunk_manager, mem_stats_collector=mem_stats_collector, max_prefetch=max_prefetch\n )\n # model data will use 1-_warmup_non_model_data_ratio CUDA memory in warmup phase\n # you can set them by AutoPlacementPolicy.set_warmup_non_model_data_ratio()\n # and AutoPlacementPolicy.set_steady_cuda_cap_ratio()\n self._warmup_non_model_data_ratio = warmup_non_model_data_ratio\n self._steady_cuda_cap_ratio = steady_cuda_cap_ratio\n\n def evict_tensors(\n self,\n can_evict_chunks: List[Chunk],\n cuda_demand: int = 0,\n warmup: bool = True,\n compute_list: Optional[List[Tuple[Chunk, ...]]] = None,\n compute_idx: int = 0,\n **kwargs,\n ) -> Tuple[int, float]:\n \"\"\"\n Evict tensors from CUDA device.\n\n Args:\n can_evict_chunks (List[StatefulTensor]): the list of tensors that can be evicted.\n cuda_demand (int, optional): the volume of data needed on cuda device. Defaults to 0.\n warmup (bool, optional): a flag indicates whether in the phase of warmup. Defaults to True.\n compute_list (List[StatefulTensor], optional): TODO. Defaults to [].\n compute_idx (int, optional): the idx of computing device. Defaults to 0.\n\n Raises:\n RuntimeError:\n\n Returns:\n int: the volume of memory that is evicted\n \"\"\"\n start = time()\n cuda_capacity = colo_device_memory_capacity(get_accelerator().get_current_device())\n used_cuda_model_data = self.chunk_manager.total_mem[\"cuda\"]\n if warmup:\n # We designate a part of CUDA memory for model data in warmup iterations.\n max_cuda_non_model_data_per_period = cuda_capacity * self._warmup_non_model_data_ratio\n else:\n # max non-model-data cuda memory consumption of this sampling moment and the next sampling moment.\n max_cuda_non_model_data_per_period = self.mem_stats_collector.next_period_non_model_data_usage(\"cuda\")\n cuda_capacity *= self._steady_cuda_cap_ratio\n total_cuda_model_data = cuda_capacity - max_cuda_non_model_data_per_period\n avail_cuda_model_data = total_cuda_model_data - used_cuda_model_data\n freed_cuda_model_data = 0\n\n if avail_cuda_model_data < cuda_demand:\n # Move cuda_demand - avail_cuda_model_data volume of tensors\n # to_free_cuda_model_data = cuda_demand - avail_cuda_model_data\n to_free_cuda_model_data = cuda_demand - avail_cuda_model_data\n to_free_chunks = can_evict_chunks\n if not warmup:\n to_free_chunks = self._sort_can_evict_chunks(tuple(to_free_chunks), compute_idx, tuple(compute_list))\n # print(self._sort_can_evict_chunks.cache_info())\n for chunk in to_free_chunks:\n if freed_cuda_model_data >= to_free_cuda_model_data:\n break\n\n self.chunk_manager.release_chunk(chunk)\n self.chunk_manager.move_chunk(chunk, torch.device(\"cpu\"))\n freed_cuda_model_data += chunk.chunk_mem\n if freed_cuda_model_data < to_free_cuda_model_data:\n raise RuntimeError(\n f\"Adjust layout failed! No enough CUDA memory! \"\n f\"Need {to_free_cuda_model_data}, freed {freed_cuda_model_data}\"\n )\n return freed_cuda_model_data, time() - start\n\n @staticmethod\n @functools.lru_cache(maxsize=None)\n def _sort_can_evict_chunks(can_evict_chunks: tuple, compute_idx: int, compute_list: tuple) -> list:\n next_compute_idx = {chunk: len(compute_list) for chunk in can_evict_chunks}\n for i in range(len(compute_list) - 1, compute_idx, -1):\n for chunk in compute_list[i]:\n if chunk in next_compute_idx:\n next_compute_idx[chunk] = i\n next_compute_idx = sorted(next_compute_idx.items(), key=lambda pair: pair[1], reverse=True)\n return [t for (t, idx) in next_compute_idx]\n\n def setup_grads_device(\n self, params: List[torch.Tensor], grads_device_map: Dict[torch.Tensor, torch.device]\n ) -> None:\n for p in params:\n chunk = self.chunk_manager.get_chunk(p)\n # init offload optim settings\n # keep gathered chunks are in CUDA\n if chunk.keep_gathered:\n grads_device_map[p] = get_accelerator().get_current_device()\n else:\n grads_device_map[p] = torch.device(\"cpu\")\n\n def get_prefetch_chunks(self, max_prefetch: int) -> List[Chunk]:\n return [] # TODO @botbw: implement prefetching for auto\n\n\nclass PlacementPolicyFactory:\n policies: Dict[str, Type[PlacementPolicy]] = {\n \"auto\": AutoPlacementPolicy,\n \"static\": StaticPlacementPolicy,\n }\n\n @staticmethod\n def create(policy_name: str) -> Type[PlacementPolicy]:\n if policy_name not in PlacementPolicyFactory.policies:\n raise TypeError(f\"Unknown tensor placement policy {policy_name}\")\n return PlacementPolicyFactory.policies[policy_name]\n\n @staticmethod\n def get_policy_names():\n return tuple(PlacementPolicyFactory.policies.keys())\n", "path": "colossalai/zero/gemini/placement_policy.py"}], "after_files": [{"content": "import functools\nimport warnings\nfrom abc import ABC, abstractmethod\nfrom time import time\nfrom typing import Dict, List, Optional, Tuple, Type\n\nimport torch\n\nfrom colossalai.accelerator import get_accelerator\nfrom colossalai.legacy.utils.memory import colo_device_memory_capacity\nfrom colossalai.zero.gemini.chunk import Chunk\n\nfrom .chunk import Chunk, ChunkManager\nfrom .memory_tracer import ChunkMemStatsCollector\n\n\nclass PlacementPolicy(ABC):\n need_mem_stats: bool = False\n\n def __init__(\n self,\n gemini_manager: \"GeminiManager\", # TODO @botbw: solve circular import\n chunk_manager: ChunkManager,\n mem_stats_collector: Optional[ChunkMemStatsCollector] = None,\n max_prefetch: int = 0,\n **kwargs,\n ) -> None:\n self.gemini_manager = gemini_manager\n self.chunk_manager = chunk_manager\n self.mem_stats_collector: Optional[ChunkMemStatsCollector] = mem_stats_collector\n self.max_prefetch = max_prefetch\n\n @abstractmethod\n def evict_tensors(self, can_evict_chunks: List[Chunk], **kwargs) -> Tuple[int, float]:\n raise NotImplementedError\n\n @abstractmethod\n def setup_grads_device(\n self, params: List[torch.Tensor], grads_device_map: Dict[torch.Tensor, torch.device]\n ) -> None:\n raise NotImplementedError\n\n def get_prefetch_chunks(self) -> List[Chunk]:\n return [] # no prefetch by default\n\n\nclass StaticPlacementPolicy(PlacementPolicy):\n def __init__(\n self,\n gemini_manager: \"GeminiManager\",\n chunk_manager: ChunkManager,\n mem_stats_collector: Optional[ChunkMemStatsCollector] = None,\n max_prefetch: int = 0,\n shard_param_frac: float = 1.0,\n offload_optim_frac: float = 0.0,\n offload_param_frac: float = 0.0,\n **kwargs,\n ) -> None:\n super().__init__(\n gemini_manager, chunk_manager, mem_stats_collector=mem_stats_collector, max_prefetch=max_prefetch\n )\n if offload_param_frac > 0.0 and (shard_param_frac != 1.0 or offload_optim_frac != 1.0):\n warnings.warn(\"offload_param_frac is ignored when shard_param_frac != 1.0 or offload_optim_frac != 1.0\")\n offload_param_frac = 0.0\n self.shard_param_frac = shard_param_frac\n self.offload_optim_frac = offload_optim_frac\n self.offload_param_frac = offload_param_frac\n # these should be initialized in setup_grads_device\n self.keep_gathered_chunk_mem = 0.0\n self.keep_cuda_chunk_mem = 0.0\n\n def evict_tensors(self, can_evict_chunks: List[Chunk], **kwargs) -> Tuple[int, float]:\n can_shard_chunk_mem = sum(chunk.chunk_mem for chunk in can_evict_chunks)\n can_offload_chunk_mem = can_shard_chunk_mem\n for chunk in can_evict_chunks:\n if can_shard_chunk_mem <= self.keep_gathered_chunk_mem:\n break\n self.chunk_manager.release_chunk(chunk)\n # real saved mem is chunk_mem - shard_mem, for simplicity we use chunk_mem\n can_shard_chunk_mem -= chunk.chunk_mem\n for chunk in can_evict_chunks:\n if can_offload_chunk_mem <= self.keep_cuda_chunk_mem:\n break\n self.chunk_manager.move_chunk(chunk, torch.device(\"cpu\"))\n # real saved mem is shard_mem, for simplicity we use chunk_mem\n can_offload_chunk_mem -= chunk.chunk_mem\n return 0, 0.0\n\n def setup_grads_device(\n self, params: List[torch.Tensor], grads_device_map: Dict[torch.Tensor, torch.device]\n ) -> None:\n total_chunk_mem = sum(self.chunk_manager.get_chunk(p).chunk_mem for p in params)\n\n offload_optim_chunk_mem = total_chunk_mem * self.offload_optim_frac\n offloaded_optim_chunk_mem = 0\n chunks = set(self.chunk_manager.get_chunk(p) for p in params)\n for chunk in chunks:\n params = chunk.get_tensors()\n # init offload optim settings\n # keep gathered chunks are in CUDA\n if chunk.keep_gathered or offloaded_optim_chunk_mem >= offload_optim_chunk_mem:\n device = get_accelerator().get_current_device()\n else:\n device = torch.device(\"cpu\")\n # real offloaded mem is chunk.shard_mem, for simplicity we use chunk mem here\n offloaded_optim_chunk_mem += chunk.chunk_mem\n for p in params:\n grads_device_map[p] = device\n self.keep_gathered_chunk_mem = total_chunk_mem * (1 - self.shard_param_frac)\n self.keep_cuda_chunk_mem = total_chunk_mem * (1 - self.offload_param_frac)\n\n def get_prefetch_chunks(self) -> List[Chunk]:\n if self.gemini_manager.is_warmup(): # no prefetch during warmup since we need compute_list\n return []\n can_prefetch = self.max_prefetch - len(self.gemini_manager._async_works)\n prefetch = []\n for i in range(self.gemini_manager.compute_idx + 1, len(self.gemini_manager.compute_list)):\n break_flag = False\n for chunk in self.gemini_manager.compute_list[i]:\n if len(prefetch) >= can_prefetch:\n break_flag = True\n break\n if chunk not in prefetch and chunk not in self.chunk_manager.accessed_chunks:\n prefetch.append(chunk)\n if break_flag:\n break\n return prefetch\n\n\nclass AutoPlacementPolicy(PlacementPolicy):\n need_mem_stats: bool = True\n\n def __init__(\n self,\n gemini_manager: \"GeminiManager\",\n chunk_manager: ChunkManager,\n mem_stats_collector: Optional[ChunkMemStatsCollector] = None,\n max_prefetch: int = 0,\n warmup_non_model_data_ratio: float = 0.8,\n steady_cuda_cap_ratio: float = 0.9,\n **kwargs,\n ) -> None:\n super().__init__(\n gemini_manager, chunk_manager, mem_stats_collector=mem_stats_collector, max_prefetch=max_prefetch\n )\n # model data will use 1-_warmup_non_model_data_ratio CUDA memory in warmup phase\n # you can set them by AutoPlacementPolicy.set_warmup_non_model_data_ratio()\n # and AutoPlacementPolicy.set_steady_cuda_cap_ratio()\n self._warmup_non_model_data_ratio = warmup_non_model_data_ratio\n self._steady_cuda_cap_ratio = steady_cuda_cap_ratio\n\n def evict_tensors(\n self,\n can_evict_chunks: List[Chunk],\n cuda_demand: int = 0,\n warmup: bool = True,\n compute_list: Optional[List[Tuple[Chunk, ...]]] = None,\n compute_idx: int = 0,\n **kwargs,\n ) -> Tuple[int, float]:\n \"\"\"\n Evict tensors from CUDA device.\n\n Args:\n can_evict_chunks (List[StatefulTensor]): the list of tensors that can be evicted.\n cuda_demand (int, optional): the volume of data needed on cuda device. Defaults to 0.\n warmup (bool, optional): a flag indicates whether in the phase of warmup. Defaults to True.\n compute_list (List[StatefulTensor], optional): TODO. Defaults to [].\n compute_idx (int, optional): the idx of computing device. Defaults to 0.\n\n Raises:\n RuntimeError:\n\n Returns:\n int: the volume of memory that is evicted\n \"\"\"\n start = time()\n cuda_capacity = colo_device_memory_capacity(get_accelerator().get_current_device())\n used_cuda_model_data = self.chunk_manager.total_mem[\"cuda\"]\n if warmup:\n # We designate a part of CUDA memory for model data in warmup iterations.\n max_cuda_non_model_data_per_period = cuda_capacity * self._warmup_non_model_data_ratio\n else:\n # max non-model-data cuda memory consumption of this sampling moment and the next sampling moment.\n max_cuda_non_model_data_per_period = self.mem_stats_collector.next_period_non_model_data_usage(\"cuda\")\n cuda_capacity *= self._steady_cuda_cap_ratio\n total_cuda_model_data = cuda_capacity - max_cuda_non_model_data_per_period\n avail_cuda_model_data = total_cuda_model_data - used_cuda_model_data\n freed_cuda_model_data = 0\n\n if avail_cuda_model_data < cuda_demand:\n # Move cuda_demand - avail_cuda_model_data volume of tensors\n # to_free_cuda_model_data = cuda_demand - avail_cuda_model_data\n to_free_cuda_model_data = cuda_demand - avail_cuda_model_data\n to_free_chunks = can_evict_chunks\n if not warmup:\n to_free_chunks = self._sort_can_evict_chunks(tuple(to_free_chunks), compute_idx, tuple(compute_list))\n # print(self._sort_can_evict_chunks.cache_info())\n for chunk in to_free_chunks:\n if freed_cuda_model_data >= to_free_cuda_model_data:\n break\n\n self.chunk_manager.release_chunk(chunk)\n self.chunk_manager.move_chunk(chunk, torch.device(\"cpu\"))\n freed_cuda_model_data += chunk.chunk_mem\n if freed_cuda_model_data < to_free_cuda_model_data:\n raise RuntimeError(\n f\"Adjust layout failed! No enough CUDA memory! \"\n f\"Need {to_free_cuda_model_data}, freed {freed_cuda_model_data}\"\n )\n return freed_cuda_model_data, time() - start\n\n @staticmethod\n @functools.lru_cache(maxsize=None)\n def _sort_can_evict_chunks(can_evict_chunks: tuple, compute_idx: int, compute_list: tuple) -> list:\n next_compute_idx = {chunk: len(compute_list) for chunk in can_evict_chunks}\n for i in range(len(compute_list) - 1, compute_idx, -1):\n for chunk in compute_list[i]:\n if chunk in next_compute_idx:\n next_compute_idx[chunk] = i\n next_compute_idx = sorted(next_compute_idx.items(), key=lambda pair: pair[1], reverse=True)\n return [t for (t, idx) in next_compute_idx]\n\n def setup_grads_device(\n self, params: List[torch.Tensor], grads_device_map: Dict[torch.Tensor, torch.device]\n ) -> None:\n for p in params:\n chunk = self.chunk_manager.get_chunk(p)\n # init offload optim settings\n # keep gathered chunks are in CUDA\n if chunk.keep_gathered:\n grads_device_map[p] = get_accelerator().get_current_device()\n else:\n grads_device_map[p] = torch.device(\"cpu\")\n\n def get_prefetch_chunks(self) -> List[Chunk]:\n if self.gemini_manager.is_warmup(): # no prefetch during warmup since we need compute_list\n return []\n # modified from self.evict_tensors\n cuda_capacity = self._steady_cuda_cap_ratio * colo_device_memory_capacity(\n get_accelerator().get_current_device()\n )\n max_cuda_non_model_data_per_period = self.mem_stats_collector.next_period_non_model_data_usage(\"cuda\")\n used_cuda_model_data = self.chunk_manager.total_mem[\"cuda\"]\n total_cuda_model_data = cuda_capacity - max_cuda_non_model_data_per_period\n avail_cuda_model_data = total_cuda_model_data - used_cuda_model_data\n\n prefetch_chunk_memory = 0\n can_prefetch = self.max_prefetch - len(self.gemini_manager._async_works)\n prefetch = []\n for i in range(self.gemini_manager.compute_idx + 1, len(self.gemini_manager.compute_list)):\n break_flag = False\n for chunk in self.gemini_manager.compute_list[i]:\n chunk: Chunk\n if len(prefetch) >= can_prefetch or prefetch_chunk_memory + chunk.chunk_mem > avail_cuda_model_data:\n break_flag = True\n break\n if chunk not in prefetch and chunk not in self.chunk_manager.accessed_chunks:\n prefetch.append(chunk)\n if break_flag:\n break\n return prefetch\n\n\nclass PlacementPolicyFactory:\n policies: Dict[str, Type[PlacementPolicy]] = {\n \"auto\": AutoPlacementPolicy,\n \"static\": StaticPlacementPolicy,\n }\n\n @staticmethod\n def create(policy_name: str) -> Type[PlacementPolicy]:\n if policy_name not in PlacementPolicyFactory.policies:\n raise TypeError(f\"Unknown tensor placement policy {policy_name}\")\n return PlacementPolicyFactory.policies[policy_name]\n\n @staticmethod\n def get_policy_names():\n return tuple(PlacementPolicyFactory.policies.keys())\n", "path": "colossalai/zero/gemini/placement_policy.py"}]}
| 3,434 | 767 |
gh_patches_debug_30075
|
rasdani/github-patches
|
git_diff
|
ansible-collections__community.general-7090
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
lookup bitwarden field=notes resulting in KeyError
### Summary
When using the community.general.bitwarden lookup there is an error trying to lookup a secure note.
Playbook:
```yaml
- debug:
msg: >-
"{{ lookup('community.general.bitwarden', 'secure note 2023', field='notes') }}"
```
Error:
```
fatal: [my.hostname.net]: FAILED! => {
"msg": "An unhandled exception occurred while running the lookup plugin 'community.general.bitwarden'. Error was a <class 'KeyError'>, original message: 'fields'. 'fields'"
}
```
With debug:
```console
exception during Jinja2 execution: Traceback (most recent call last):
File "/opt/homebrew/Cellar/ansible/8.0.0/libexec/lib/python3.11/site-packages/ansible/template/__init__.py", line 831, in _lookup
ran = instance.run(loop_terms, variables=self._available_variables, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/homebrew/Cellar/ansible/8.0.0/libexec/lib/python3.11/site-packages/ansible_collections/community/general/plugins/lookup/bitwarden.py", line 159, in run
return [_bitwarden.get_field(field, term, search_field, collection_id) for term in terms]
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/homebrew/Cellar/ansible/8.0.0/libexec/lib/python3.11/site-packages/ansible_collections/community/general/plugins/lookup/bitwarden.py", line 159, in <listcomp>
return [_bitwarden.get_field(field, term, search_field, collection_id) for term in terms]
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/homebrew/Cellar/ansible/8.0.0/libexec/lib/python3.11/site-packages/ansible_collections/community/general/plugins/lookup/bitwarden.py", line 141, in get_field
for custom_field in match['fields']:
~~~~~^^^^^^^^^^
KeyError: 'fields'
fatal: [my.hostname.net]: FAILED! => {
"msg": "An unhandled exception occurred while running the lookup plugin 'community.general.bitwarden'. Error was a <class 'KeyError'>, original message: 'fields'. 'fields'"
}
```
Same thing when trying to do the lookup by id:
```yaml
- debug:
msg: >-
"{{ lookup('community.general.bitwarden', 'ba9f1125-e52a-4d4f-9b2b-af1c43a00bd6', search='id', field='notes') }}"
```
### Issue Type
Bug Report
### Component Name
bitwarden
### Ansible Version
```console (paste below)
$ ansible --version
ansible [core 2.15.0]
config file = /Users/sjoerd/.ansible.cfg
configured module search path = ['/Users/name/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /opt/homebrew/Cellar/ansible/8.0.0/libexec/lib/python3.11/site-packages/ansible
ansible collection location = /Users/name/.ansible/collections:/usr/share/ansible/collections
executable location = /opt/homebrew/bin/ansible
python version = 3.11.3 (main, Apr 7 2023, 20:13:31) [Clang 14.0.0 (clang-1400.0.29.202)] (/opt/homebrew/Cellar/ansible/8.0.0/libexec/bin/python3.11)
jinja version = 3.1.2
libyaml = True
```
### Community.general Version
```console (paste below)
$ ansible-galaxy collection list community.general
# /opt/homebrew/Cellar/ansible/8.0.0/libexec/lib/python3.11/site-packages/ansible_collections
Collection Version
----------------- -------
community.general 7.0.1
```
### Configuration
```console (paste below)
$ ansible-config dump --only-changed
```
### OS / Environment
Source: MacOS
Target: CentOS Stream 9
### Steps to Reproduce
<!--- Paste example playbooks or commands between quotes below -->
```yaml (paste below)
- debug:
msg: >-
"{{ lookup('community.general.bitwarden', 'secure note 2023', field='notes') }}"
```
### Expected Results
Expected the contents of the secure not to be printed to the terminal
### Actual Results
```console (paste below)
fatal: [my.hostname.net]: FAILED! => {
"msg": "An unhandled exception occurred while running the lookup plugin 'community.general.bitwarden'. Error was a <class 'KeyError'>, original message: 'fields'. 'fields'"
}
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `plugins/lookup/bitwarden.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # Copyright (c) 2022, Jonathan Lung <[email protected]>
3 # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
4 # SPDX-License-Identifier: GPL-3.0-or-later
5 from __future__ import (absolute_import, division, print_function)
6 __metaclass__ = type
7
8 DOCUMENTATION = """
9 name: bitwarden
10 author:
11 - Jonathan Lung (@lungj) <[email protected]>
12 requirements:
13 - bw (command line utility)
14 - be logged into bitwarden
15 - bitwarden vault unlocked
16 - E(BW_SESSION) environment variable set
17 short_description: Retrieve secrets from Bitwarden
18 version_added: 5.4.0
19 description:
20 - Retrieve secrets from Bitwarden.
21 options:
22 _terms:
23 description: Key(s) to fetch values for from login info.
24 required: true
25 type: list
26 elements: str
27 search:
28 description: Field to retrieve, for example V(name) or V(id).
29 type: str
30 default: name
31 version_added: 5.7.0
32 field:
33 description: Field to fetch. Leave unset to fetch whole response.
34 type: str
35 collection_id:
36 description: Collection ID to filter results by collection. Leave unset to skip filtering.
37 type: str
38 version_added: 6.3.0
39 """
40
41 EXAMPLES = """
42 - name: "Get 'password' from Bitwarden record named 'a_test'"
43 ansible.builtin.debug:
44 msg: >-
45 {{ lookup('community.general.bitwarden', 'a_test', field='password') }}
46
47 - name: "Get 'password' from Bitwarden record with id 'bafba515-af11-47e6-abe3-af1200cd18b2'"
48 ansible.builtin.debug:
49 msg: >-
50 {{ lookup('community.general.bitwarden', 'bafba515-af11-47e6-abe3-af1200cd18b2', search='id', field='password') }}
51
52 - name: "Get 'password' from Bitwarden record named 'a_test' from collection"
53 ansible.builtin.debug:
54 msg: >-
55 {{ lookup('community.general.bitwarden', 'a_test', field='password', collection_id='bafba515-af11-47e6-abe3-af1200cd18b2') }}
56
57 - name: "Get full Bitwarden record named 'a_test'"
58 ansible.builtin.debug:
59 msg: >-
60 {{ lookup('community.general.bitwarden', 'a_test') }}
61
62 - name: "Get custom field 'api_key' from Bitwarden record named 'a_test'"
63 ansible.builtin.debug:
64 msg: >-
65 {{ lookup('community.general.bitwarden', 'a_test', field='api_key') }}
66 """
67
68 RETURN = """
69 _raw:
70 description: List of requested field or JSON object of list of matches.
71 type: list
72 elements: raw
73 """
74
75 from subprocess import Popen, PIPE
76
77 from ansible.errors import AnsibleError
78 from ansible.module_utils.common.text.converters import to_bytes, to_text
79 from ansible.parsing.ajson import AnsibleJSONDecoder
80 from ansible.plugins.lookup import LookupBase
81
82
83 class BitwardenException(AnsibleError):
84 pass
85
86
87 class Bitwarden(object):
88
89 def __init__(self, path='bw'):
90 self._cli_path = path
91
92 @property
93 def cli_path(self):
94 return self._cli_path
95
96 @property
97 def unlocked(self):
98 out, err = self._run(['status'], stdin="")
99 decoded = AnsibleJSONDecoder().raw_decode(out)[0]
100 return decoded['status'] == 'unlocked'
101
102 def _run(self, args, stdin=None, expected_rc=0):
103 p = Popen([self.cli_path] + args, stdout=PIPE, stderr=PIPE, stdin=PIPE)
104 out, err = p.communicate(to_bytes(stdin))
105 rc = p.wait()
106 if rc != expected_rc:
107 raise BitwardenException(err)
108 return to_text(out, errors='surrogate_or_strict'), to_text(err, errors='surrogate_or_strict')
109
110 def _get_matches(self, search_value, search_field, collection_id):
111 """Return matching records whose search_field is equal to key.
112 """
113
114 # Prepare set of params for Bitwarden CLI
115 params = ['list', 'items', '--search', search_value]
116
117 if collection_id:
118 params.extend(['--collectionid', collection_id])
119
120 out, err = self._run(params)
121
122 # This includes things that matched in different fields.
123 initial_matches = AnsibleJSONDecoder().raw_decode(out)[0]
124
125 # Filter to only include results from the right field.
126 return [item for item in initial_matches if item[search_field] == search_value]
127
128 def get_field(self, field, search_value, search_field="name", collection_id=None):
129 """Return a list of the specified field for records whose search_field match search_value
130 and filtered by collection if collection has been provided.
131
132 If field is None, return the whole record for each match.
133 """
134 matches = self._get_matches(search_value, search_field, collection_id)
135
136 if field in ['autofillOnPageLoad', 'password', 'passwordRevisionDate', 'totp', 'uris', 'username']:
137 return [match['login'][field] for match in matches]
138 elif not field:
139 return matches
140 else:
141 custom_field_matches = []
142 for match in matches:
143 for custom_field in match['fields']:
144 if custom_field['name'] == field:
145 custom_field_matches.append(custom_field['value'])
146 if matches and not custom_field_matches:
147 raise AnsibleError("Custom field {field} does not exist in {search_value}".format(field=field, search_value=search_value))
148 return custom_field_matches
149
150
151 class LookupModule(LookupBase):
152
153 def run(self, terms, variables=None, **kwargs):
154 self.set_options(var_options=variables, direct=kwargs)
155 field = self.get_option('field')
156 search_field = self.get_option('search')
157 collection_id = self.get_option('collection_id')
158 if not _bitwarden.unlocked:
159 raise AnsibleError("Bitwarden Vault locked. Run 'bw unlock'.")
160
161 return [_bitwarden.get_field(field, term, search_field, collection_id) for term in terms]
162
163
164 _bitwarden = Bitwarden()
165
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/plugins/lookup/bitwarden.py b/plugins/lookup/bitwarden.py
--- a/plugins/lookup/bitwarden.py
+++ b/plugins/lookup/bitwarden.py
@@ -132,20 +132,29 @@
If field is None, return the whole record for each match.
"""
matches = self._get_matches(search_value, search_field, collection_id)
-
- if field in ['autofillOnPageLoad', 'password', 'passwordRevisionDate', 'totp', 'uris', 'username']:
- return [match['login'][field] for match in matches]
- elif not field:
+ if not field:
return matches
- else:
- custom_field_matches = []
- for match in matches:
+ field_matches = []
+ for match in matches:
+ # if there are no custom fields, then `match` has no key 'fields'
+ if 'fields' in match:
+ custom_field_found = False
for custom_field in match['fields']:
- if custom_field['name'] == field:
- custom_field_matches.append(custom_field['value'])
- if matches and not custom_field_matches:
- raise AnsibleError("Custom field {field} does not exist in {search_value}".format(field=field, search_value=search_value))
- return custom_field_matches
+ if field == custom_field['name']:
+ field_matches.append(custom_field['value'])
+ custom_field_found = True
+ break
+ if custom_field_found:
+ continue
+ if 'login' in match and field in match['login']:
+ field_matches.append(match['login'][field])
+ continue
+ if field in match:
+ field_matches.append(match[field])
+ continue
+ if matches and not field_matches:
+ raise AnsibleError("field {field} does not exist in {search_value}".format(field=field, search_value=search_value))
+ return field_matches
class LookupModule(LookupBase):
|
{"golden_diff": "diff --git a/plugins/lookup/bitwarden.py b/plugins/lookup/bitwarden.py\n--- a/plugins/lookup/bitwarden.py\n+++ b/plugins/lookup/bitwarden.py\n@@ -132,20 +132,29 @@\n If field is None, return the whole record for each match.\n \"\"\"\n matches = self._get_matches(search_value, search_field, collection_id)\n-\n- if field in ['autofillOnPageLoad', 'password', 'passwordRevisionDate', 'totp', 'uris', 'username']:\n- return [match['login'][field] for match in matches]\n- elif not field:\n+ if not field:\n return matches\n- else:\n- custom_field_matches = []\n- for match in matches:\n+ field_matches = []\n+ for match in matches:\n+ # if there are no custom fields, then `match` has no key 'fields'\n+ if 'fields' in match:\n+ custom_field_found = False\n for custom_field in match['fields']:\n- if custom_field['name'] == field:\n- custom_field_matches.append(custom_field['value'])\n- if matches and not custom_field_matches:\n- raise AnsibleError(\"Custom field {field} does not exist in {search_value}\".format(field=field, search_value=search_value))\n- return custom_field_matches\n+ if field == custom_field['name']:\n+ field_matches.append(custom_field['value'])\n+ custom_field_found = True\n+ break\n+ if custom_field_found:\n+ continue\n+ if 'login' in match and field in match['login']:\n+ field_matches.append(match['login'][field])\n+ continue\n+ if field in match:\n+ field_matches.append(match[field])\n+ continue\n+ if matches and not field_matches:\n+ raise AnsibleError(\"field {field} does not exist in {search_value}\".format(field=field, search_value=search_value))\n+ return field_matches\n \n \n class LookupModule(LookupBase):\n", "issue": "lookup bitwarden field=notes resulting in KeyError\n### Summary\n\nWhen using the community.general.bitwarden lookup there is an error trying to lookup a secure note.\r\n\r\nPlaybook:\r\n```yaml\r\n- debug:\r\n msg: >-\r\n \"{{ lookup('community.general.bitwarden', 'secure note 2023', field='notes') }}\"\r\n```\r\n\r\nError:\r\n```\r\nfatal: [my.hostname.net]: FAILED! => {\r\n \"msg\": \"An unhandled exception occurred while running the lookup plugin 'community.general.bitwarden'. Error was a <class 'KeyError'>, original message: 'fields'. 'fields'\"\r\n}\r\n```\r\nWith debug:\r\n```console\r\nexception during Jinja2 execution: Traceback (most recent call last):\r\n File \"/opt/homebrew/Cellar/ansible/8.0.0/libexec/lib/python3.11/site-packages/ansible/template/__init__.py\", line 831, in _lookup\r\n ran = instance.run(loop_terms, variables=self._available_variables, **kwargs)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/opt/homebrew/Cellar/ansible/8.0.0/libexec/lib/python3.11/site-packages/ansible_collections/community/general/plugins/lookup/bitwarden.py\", line 159, in run\r\n return [_bitwarden.get_field(field, term, search_field, collection_id) for term in terms]\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/opt/homebrew/Cellar/ansible/8.0.0/libexec/lib/python3.11/site-packages/ansible_collections/community/general/plugins/lookup/bitwarden.py\", line 159, in <listcomp>\r\n return [_bitwarden.get_field(field, term, search_field, collection_id) for term in terms]\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/opt/homebrew/Cellar/ansible/8.0.0/libexec/lib/python3.11/site-packages/ansible_collections/community/general/plugins/lookup/bitwarden.py\", line 141, in get_field\r\n for custom_field in match['fields']:\r\n ~~~~~^^^^^^^^^^\r\nKeyError: 'fields'\r\nfatal: [my.hostname.net]: FAILED! => {\r\n \"msg\": \"An unhandled exception occurred while running the lookup plugin 'community.general.bitwarden'. Error was a <class 'KeyError'>, original message: 'fields'. 'fields'\"\r\n}\r\n```\r\n\r\nSame thing when trying to do the lookup by id:\r\n```yaml\r\n- debug:\r\n msg: >-\r\n \"{{ lookup('community.general.bitwarden', 'ba9f1125-e52a-4d4f-9b2b-af1c43a00bd6', search='id', field='notes') }}\"\r\n```\n\n### Issue Type\n\nBug Report\n\n### Component Name\n\nbitwarden\n\n### Ansible Version\n\n```console (paste below)\r\n$ ansible --version\r\nansible [core 2.15.0]\r\n config file = /Users/sjoerd/.ansible.cfg\r\n configured module search path = ['/Users/name/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']\r\n ansible python module location = /opt/homebrew/Cellar/ansible/8.0.0/libexec/lib/python3.11/site-packages/ansible\r\n ansible collection location = /Users/name/.ansible/collections:/usr/share/ansible/collections\r\n executable location = /opt/homebrew/bin/ansible\r\n python version = 3.11.3 (main, Apr 7 2023, 20:13:31) [Clang 14.0.0 (clang-1400.0.29.202)] (/opt/homebrew/Cellar/ansible/8.0.0/libexec/bin/python3.11)\r\n jinja version = 3.1.2\r\n libyaml = True\r\n```\r\n\n\n### Community.general Version\n\n```console (paste below)\r\n$ ansible-galaxy collection list community.general\r\n\r\n# /opt/homebrew/Cellar/ansible/8.0.0/libexec/lib/python3.11/site-packages/ansible_collections\r\nCollection Version\r\n----------------- -------\r\ncommunity.general 7.0.1\r\n```\r\n\n\n### Configuration\n\n```console (paste below)\r\n$ ansible-config dump --only-changed\r\n\r\n```\r\n\n\n### OS / Environment\n\nSource: MacOS\r\nTarget: CentOS Stream 9\r\n\n\n### Steps to Reproduce\n\n<!--- Paste example playbooks or commands between quotes below -->\r\n```yaml (paste below)\r\n- debug:\r\n msg: >-\r\n \"{{ lookup('community.general.bitwarden', 'secure note 2023', field='notes') }}\"\r\n\r\n```\r\n\n\n### Expected Results\n\nExpected the contents of the secure not to be printed to the terminal\n\n### Actual Results\n\n```console (paste below)\r\nfatal: [my.hostname.net]: FAILED! => {\r\n \"msg\": \"An unhandled exception occurred while running the lookup plugin 'community.general.bitwarden'. Error was a <class 'KeyError'>, original message: 'fields'. 'fields'\"\r\n}\r\n```\r\n\n\n### Code of Conduct\n\n- [X] I agree to follow the Ansible Code of Conduct\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright (c) 2022, Jonathan Lung <[email protected]>\n# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)\n# SPDX-License-Identifier: GPL-3.0-or-later\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nDOCUMENTATION = \"\"\"\n name: bitwarden\n author:\n - Jonathan Lung (@lungj) <[email protected]>\n requirements:\n - bw (command line utility)\n - be logged into bitwarden\n - bitwarden vault unlocked\n - E(BW_SESSION) environment variable set\n short_description: Retrieve secrets from Bitwarden\n version_added: 5.4.0\n description:\n - Retrieve secrets from Bitwarden.\n options:\n _terms:\n description: Key(s) to fetch values for from login info.\n required: true\n type: list\n elements: str\n search:\n description: Field to retrieve, for example V(name) or V(id).\n type: str\n default: name\n version_added: 5.7.0\n field:\n description: Field to fetch. Leave unset to fetch whole response.\n type: str\n collection_id:\n description: Collection ID to filter results by collection. Leave unset to skip filtering.\n type: str\n version_added: 6.3.0\n\"\"\"\n\nEXAMPLES = \"\"\"\n- name: \"Get 'password' from Bitwarden record named 'a_test'\"\n ansible.builtin.debug:\n msg: >-\n {{ lookup('community.general.bitwarden', 'a_test', field='password') }}\n\n- name: \"Get 'password' from Bitwarden record with id 'bafba515-af11-47e6-abe3-af1200cd18b2'\"\n ansible.builtin.debug:\n msg: >-\n {{ lookup('community.general.bitwarden', 'bafba515-af11-47e6-abe3-af1200cd18b2', search='id', field='password') }}\n\n- name: \"Get 'password' from Bitwarden record named 'a_test' from collection\"\n ansible.builtin.debug:\n msg: >-\n {{ lookup('community.general.bitwarden', 'a_test', field='password', collection_id='bafba515-af11-47e6-abe3-af1200cd18b2') }}\n\n- name: \"Get full Bitwarden record named 'a_test'\"\n ansible.builtin.debug:\n msg: >-\n {{ lookup('community.general.bitwarden', 'a_test') }}\n\n- name: \"Get custom field 'api_key' from Bitwarden record named 'a_test'\"\n ansible.builtin.debug:\n msg: >-\n {{ lookup('community.general.bitwarden', 'a_test', field='api_key') }}\n\"\"\"\n\nRETURN = \"\"\"\n _raw:\n description: List of requested field or JSON object of list of matches.\n type: list\n elements: raw\n\"\"\"\n\nfrom subprocess import Popen, PIPE\n\nfrom ansible.errors import AnsibleError\nfrom ansible.module_utils.common.text.converters import to_bytes, to_text\nfrom ansible.parsing.ajson import AnsibleJSONDecoder\nfrom ansible.plugins.lookup import LookupBase\n\n\nclass BitwardenException(AnsibleError):\n pass\n\n\nclass Bitwarden(object):\n\n def __init__(self, path='bw'):\n self._cli_path = path\n\n @property\n def cli_path(self):\n return self._cli_path\n\n @property\n def unlocked(self):\n out, err = self._run(['status'], stdin=\"\")\n decoded = AnsibleJSONDecoder().raw_decode(out)[0]\n return decoded['status'] == 'unlocked'\n\n def _run(self, args, stdin=None, expected_rc=0):\n p = Popen([self.cli_path] + args, stdout=PIPE, stderr=PIPE, stdin=PIPE)\n out, err = p.communicate(to_bytes(stdin))\n rc = p.wait()\n if rc != expected_rc:\n raise BitwardenException(err)\n return to_text(out, errors='surrogate_or_strict'), to_text(err, errors='surrogate_or_strict')\n\n def _get_matches(self, search_value, search_field, collection_id):\n \"\"\"Return matching records whose search_field is equal to key.\n \"\"\"\n\n # Prepare set of params for Bitwarden CLI\n params = ['list', 'items', '--search', search_value]\n\n if collection_id:\n params.extend(['--collectionid', collection_id])\n\n out, err = self._run(params)\n\n # This includes things that matched in different fields.\n initial_matches = AnsibleJSONDecoder().raw_decode(out)[0]\n\n # Filter to only include results from the right field.\n return [item for item in initial_matches if item[search_field] == search_value]\n\n def get_field(self, field, search_value, search_field=\"name\", collection_id=None):\n \"\"\"Return a list of the specified field for records whose search_field match search_value\n and filtered by collection if collection has been provided.\n\n If field is None, return the whole record for each match.\n \"\"\"\n matches = self._get_matches(search_value, search_field, collection_id)\n\n if field in ['autofillOnPageLoad', 'password', 'passwordRevisionDate', 'totp', 'uris', 'username']:\n return [match['login'][field] for match in matches]\n elif not field:\n return matches\n else:\n custom_field_matches = []\n for match in matches:\n for custom_field in match['fields']:\n if custom_field['name'] == field:\n custom_field_matches.append(custom_field['value'])\n if matches and not custom_field_matches:\n raise AnsibleError(\"Custom field {field} does not exist in {search_value}\".format(field=field, search_value=search_value))\n return custom_field_matches\n\n\nclass LookupModule(LookupBase):\n\n def run(self, terms, variables=None, **kwargs):\n self.set_options(var_options=variables, direct=kwargs)\n field = self.get_option('field')\n search_field = self.get_option('search')\n collection_id = self.get_option('collection_id')\n if not _bitwarden.unlocked:\n raise AnsibleError(\"Bitwarden Vault locked. Run 'bw unlock'.\")\n\n return [_bitwarden.get_field(field, term, search_field, collection_id) for term in terms]\n\n\n_bitwarden = Bitwarden()\n", "path": "plugins/lookup/bitwarden.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright (c) 2022, Jonathan Lung <[email protected]>\n# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)\n# SPDX-License-Identifier: GPL-3.0-or-later\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nDOCUMENTATION = \"\"\"\n name: bitwarden\n author:\n - Jonathan Lung (@lungj) <[email protected]>\n requirements:\n - bw (command line utility)\n - be logged into bitwarden\n - bitwarden vault unlocked\n - E(BW_SESSION) environment variable set\n short_description: Retrieve secrets from Bitwarden\n version_added: 5.4.0\n description:\n - Retrieve secrets from Bitwarden.\n options:\n _terms:\n description: Key(s) to fetch values for from login info.\n required: true\n type: list\n elements: str\n search:\n description: Field to retrieve, for example V(name) or V(id).\n type: str\n default: name\n version_added: 5.7.0\n field:\n description: Field to fetch. Leave unset to fetch whole response.\n type: str\n collection_id:\n description: Collection ID to filter results by collection. Leave unset to skip filtering.\n type: str\n version_added: 6.3.0\n\"\"\"\n\nEXAMPLES = \"\"\"\n- name: \"Get 'password' from Bitwarden record named 'a_test'\"\n ansible.builtin.debug:\n msg: >-\n {{ lookup('community.general.bitwarden', 'a_test', field='password') }}\n\n- name: \"Get 'password' from Bitwarden record with id 'bafba515-af11-47e6-abe3-af1200cd18b2'\"\n ansible.builtin.debug:\n msg: >-\n {{ lookup('community.general.bitwarden', 'bafba515-af11-47e6-abe3-af1200cd18b2', search='id', field='password') }}\n\n- name: \"Get 'password' from Bitwarden record named 'a_test' from collection\"\n ansible.builtin.debug:\n msg: >-\n {{ lookup('community.general.bitwarden', 'a_test', field='password', collection_id='bafba515-af11-47e6-abe3-af1200cd18b2') }}\n\n- name: \"Get full Bitwarden record named 'a_test'\"\n ansible.builtin.debug:\n msg: >-\n {{ lookup('community.general.bitwarden', 'a_test') }}\n\n- name: \"Get custom field 'api_key' from Bitwarden record named 'a_test'\"\n ansible.builtin.debug:\n msg: >-\n {{ lookup('community.general.bitwarden', 'a_test', field='api_key') }}\n\"\"\"\n\nRETURN = \"\"\"\n _raw:\n description: List of requested field or JSON object of list of matches.\n type: list\n elements: raw\n\"\"\"\n\nfrom subprocess import Popen, PIPE\n\nfrom ansible.errors import AnsibleError\nfrom ansible.module_utils.common.text.converters import to_bytes, to_text\nfrom ansible.parsing.ajson import AnsibleJSONDecoder\nfrom ansible.plugins.lookup import LookupBase\n\n\nclass BitwardenException(AnsibleError):\n pass\n\n\nclass Bitwarden(object):\n\n def __init__(self, path='bw'):\n self._cli_path = path\n\n @property\n def cli_path(self):\n return self._cli_path\n\n @property\n def unlocked(self):\n out, err = self._run(['status'], stdin=\"\")\n decoded = AnsibleJSONDecoder().raw_decode(out)[0]\n return decoded['status'] == 'unlocked'\n\n def _run(self, args, stdin=None, expected_rc=0):\n p = Popen([self.cli_path] + args, stdout=PIPE, stderr=PIPE, stdin=PIPE)\n out, err = p.communicate(to_bytes(stdin))\n rc = p.wait()\n if rc != expected_rc:\n raise BitwardenException(err)\n return to_text(out, errors='surrogate_or_strict'), to_text(err, errors='surrogate_or_strict')\n\n def _get_matches(self, search_value, search_field, collection_id):\n \"\"\"Return matching records whose search_field is equal to key.\n \"\"\"\n\n # Prepare set of params for Bitwarden CLI\n params = ['list', 'items', '--search', search_value]\n\n if collection_id:\n params.extend(['--collectionid', collection_id])\n\n out, err = self._run(params)\n\n # This includes things that matched in different fields.\n initial_matches = AnsibleJSONDecoder().raw_decode(out)[0]\n\n # Filter to only include results from the right field.\n return [item for item in initial_matches if item[search_field] == search_value]\n\n def get_field(self, field, search_value, search_field=\"name\", collection_id=None):\n \"\"\"Return a list of the specified field for records whose search_field match search_value\n and filtered by collection if collection has been provided.\n\n If field is None, return the whole record for each match.\n \"\"\"\n matches = self._get_matches(search_value, search_field, collection_id)\n if not field:\n return matches\n field_matches = []\n for match in matches:\n # if there are no custom fields, then `match` has no key 'fields'\n if 'fields' in match:\n custom_field_found = False\n for custom_field in match['fields']:\n if field == custom_field['name']:\n field_matches.append(custom_field['value'])\n custom_field_found = True\n break\n if custom_field_found:\n continue\n if 'login' in match and field in match['login']:\n field_matches.append(match['login'][field])\n continue\n if field in match:\n field_matches.append(match[field])\n continue\n if matches and not field_matches:\n raise AnsibleError(\"field {field} does not exist in {search_value}\".format(field=field, search_value=search_value))\n return field_matches\n\n\nclass LookupModule(LookupBase):\n\n def run(self, terms, variables=None, **kwargs):\n self.set_options(var_options=variables, direct=kwargs)\n field = self.get_option('field')\n search_field = self.get_option('search')\n collection_id = self.get_option('collection_id')\n if not _bitwarden.unlocked:\n raise AnsibleError(\"Bitwarden Vault locked. Run 'bw unlock'.\")\n\n return [_bitwarden.get_field(field, term, search_field, collection_id) for term in terms]\n\n\n_bitwarden = Bitwarden()\n", "path": "plugins/lookup/bitwarden.py"}]}
| 3,294 | 443 |
gh_patches_debug_20282
|
rasdani/github-patches
|
git_diff
|
PaddlePaddle__models-449
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Need to set the version of CTC decoders formally
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `deep_speech_2/decoders/swig/setup.py`
Content:
```
1 """Script to build and install decoder package."""
2 from __future__ import absolute_import
3 from __future__ import division
4 from __future__ import print_function
5
6 from setuptools import setup, Extension, distutils
7 import glob
8 import platform
9 import os, sys
10 import multiprocessing.pool
11 import argparse
12
13 parser = argparse.ArgumentParser(description=__doc__)
14 parser.add_argument(
15 "--num_processes",
16 default=1,
17 type=int,
18 help="Number of cpu processes to build package. (default: %(default)d)")
19 args = parser.parse_known_args()
20
21 # reconstruct sys.argv to pass to setup below
22 sys.argv = [sys.argv[0]] + args[1]
23
24
25 # monkey-patch for parallel compilation
26 # See: https://stackoverflow.com/a/13176803
27 def parallelCCompile(self,
28 sources,
29 output_dir=None,
30 macros=None,
31 include_dirs=None,
32 debug=0,
33 extra_preargs=None,
34 extra_postargs=None,
35 depends=None):
36 # those lines are copied from distutils.ccompiler.CCompiler directly
37 macros, objects, extra_postargs, pp_opts, build = self._setup_compile(
38 output_dir, macros, include_dirs, sources, depends, extra_postargs)
39 cc_args = self._get_cc_args(pp_opts, debug, extra_preargs)
40
41 # parallel code
42 def _single_compile(obj):
43 try:
44 src, ext = build[obj]
45 except KeyError:
46 return
47 self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts)
48
49 # convert to list, imap is evaluated on-demand
50 thread_pool = multiprocessing.pool.ThreadPool(args[0].num_processes)
51 list(thread_pool.imap(_single_compile, objects))
52 return objects
53
54
55 def compile_test(header, library):
56 dummy_path = os.path.join(os.path.dirname(__file__), "dummy")
57 command = "bash -c \"g++ -include " + header \
58 + " -l" + library + " -x c++ - <<<'int main() {}' -o " \
59 + dummy_path + " >/dev/null 2>/dev/null && rm " \
60 + dummy_path + " 2>/dev/null\""
61 return os.system(command) == 0
62
63
64 # hack compile to support parallel compiling
65 distutils.ccompiler.CCompiler.compile = parallelCCompile
66
67 FILES = glob.glob('kenlm/util/*.cc') \
68 + glob.glob('kenlm/lm/*.cc') \
69 + glob.glob('kenlm/util/double-conversion/*.cc')
70
71 FILES += glob.glob('openfst-1.6.3/src/lib/*.cc')
72
73 # FILES + glob.glob('glog/src/*.cc')
74 FILES = [
75 fn for fn in FILES
76 if not (fn.endswith('main.cc') or fn.endswith('test.cc') or fn.endswith(
77 'unittest.cc'))
78 ]
79
80 LIBS = ['stdc++']
81 if platform.system() != 'Darwin':
82 LIBS.append('rt')
83
84 ARGS = ['-O3', '-DNDEBUG', '-DKENLM_MAX_ORDER=6', '-std=c++11']
85
86 if compile_test('zlib.h', 'z'):
87 ARGS.append('-DHAVE_ZLIB')
88 LIBS.append('z')
89
90 if compile_test('bzlib.h', 'bz2'):
91 ARGS.append('-DHAVE_BZLIB')
92 LIBS.append('bz2')
93
94 if compile_test('lzma.h', 'lzma'):
95 ARGS.append('-DHAVE_XZLIB')
96 LIBS.append('lzma')
97
98 os.system('swig -python -c++ ./decoders.i')
99
100 decoders_module = [
101 Extension(
102 name='_swig_decoders',
103 sources=FILES + glob.glob('*.cxx') + glob.glob('*.cpp'),
104 language='c++',
105 include_dirs=[
106 '.',
107 'kenlm',
108 'openfst-1.6.3/src/include',
109 'ThreadPool',
110 #'glog/src'
111 ],
112 libraries=LIBS,
113 extra_compile_args=ARGS)
114 ]
115
116 setup(
117 name='swig_decoders',
118 version='0.1',
119 description="""CTC decoders""",
120 ext_modules=decoders_module,
121 py_modules=['swig_decoders'], )
122
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/deep_speech_2/decoders/swig/setup.py b/deep_speech_2/decoders/swig/setup.py
--- a/deep_speech_2/decoders/swig/setup.py
+++ b/deep_speech_2/decoders/swig/setup.py
@@ -70,7 +70,6 @@
FILES += glob.glob('openfst-1.6.3/src/lib/*.cc')
-# FILES + glob.glob('glog/src/*.cc')
FILES = [
fn for fn in FILES
if not (fn.endswith('main.cc') or fn.endswith('test.cc') or fn.endswith(
@@ -107,7 +106,6 @@
'kenlm',
'openfst-1.6.3/src/include',
'ThreadPool',
- #'glog/src'
],
libraries=LIBS,
extra_compile_args=ARGS)
@@ -115,7 +113,7 @@
setup(
name='swig_decoders',
- version='0.1',
+ version='1.0',
description="""CTC decoders""",
ext_modules=decoders_module,
py_modules=['swig_decoders'], )
|
{"golden_diff": "diff --git a/deep_speech_2/decoders/swig/setup.py b/deep_speech_2/decoders/swig/setup.py\n--- a/deep_speech_2/decoders/swig/setup.py\n+++ b/deep_speech_2/decoders/swig/setup.py\n@@ -70,7 +70,6 @@\n \n FILES += glob.glob('openfst-1.6.3/src/lib/*.cc')\n \n-# FILES + glob.glob('glog/src/*.cc')\n FILES = [\n fn for fn in FILES\n if not (fn.endswith('main.cc') or fn.endswith('test.cc') or fn.endswith(\n@@ -107,7 +106,6 @@\n 'kenlm',\n 'openfst-1.6.3/src/include',\n 'ThreadPool',\n- #'glog/src'\n ],\n libraries=LIBS,\n extra_compile_args=ARGS)\n@@ -115,7 +113,7 @@\n \n setup(\n name='swig_decoders',\n- version='0.1',\n+ version='1.0',\n description=\"\"\"CTC decoders\"\"\",\n ext_modules=decoders_module,\n py_modules=['swig_decoders'], )\n", "issue": "Need to set the version of CTC decoders formally\n\n", "before_files": [{"content": "\"\"\"Script to build and install decoder package.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom setuptools import setup, Extension, distutils\nimport glob\nimport platform\nimport os, sys\nimport multiprocessing.pool\nimport argparse\n\nparser = argparse.ArgumentParser(description=__doc__)\nparser.add_argument(\n \"--num_processes\",\n default=1,\n type=int,\n help=\"Number of cpu processes to build package. (default: %(default)d)\")\nargs = parser.parse_known_args()\n\n# reconstruct sys.argv to pass to setup below\nsys.argv = [sys.argv[0]] + args[1]\n\n\n# monkey-patch for parallel compilation\n# See: https://stackoverflow.com/a/13176803\ndef parallelCCompile(self,\n sources,\n output_dir=None,\n macros=None,\n include_dirs=None,\n debug=0,\n extra_preargs=None,\n extra_postargs=None,\n depends=None):\n # those lines are copied from distutils.ccompiler.CCompiler directly\n macros, objects, extra_postargs, pp_opts, build = self._setup_compile(\n output_dir, macros, include_dirs, sources, depends, extra_postargs)\n cc_args = self._get_cc_args(pp_opts, debug, extra_preargs)\n\n # parallel code\n def _single_compile(obj):\n try:\n src, ext = build[obj]\n except KeyError:\n return\n self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts)\n\n # convert to list, imap is evaluated on-demand\n thread_pool = multiprocessing.pool.ThreadPool(args[0].num_processes)\n list(thread_pool.imap(_single_compile, objects))\n return objects\n\n\ndef compile_test(header, library):\n dummy_path = os.path.join(os.path.dirname(__file__), \"dummy\")\n command = \"bash -c \\\"g++ -include \" + header \\\n + \" -l\" + library + \" -x c++ - <<<'int main() {}' -o \" \\\n + dummy_path + \" >/dev/null 2>/dev/null && rm \" \\\n + dummy_path + \" 2>/dev/null\\\"\"\n return os.system(command) == 0\n\n\n# hack compile to support parallel compiling\ndistutils.ccompiler.CCompiler.compile = parallelCCompile\n\nFILES = glob.glob('kenlm/util/*.cc') \\\n + glob.glob('kenlm/lm/*.cc') \\\n + glob.glob('kenlm/util/double-conversion/*.cc')\n\nFILES += glob.glob('openfst-1.6.3/src/lib/*.cc')\n\n# FILES + glob.glob('glog/src/*.cc')\nFILES = [\n fn for fn in FILES\n if not (fn.endswith('main.cc') or fn.endswith('test.cc') or fn.endswith(\n 'unittest.cc'))\n]\n\nLIBS = ['stdc++']\nif platform.system() != 'Darwin':\n LIBS.append('rt')\n\nARGS = ['-O3', '-DNDEBUG', '-DKENLM_MAX_ORDER=6', '-std=c++11']\n\nif compile_test('zlib.h', 'z'):\n ARGS.append('-DHAVE_ZLIB')\n LIBS.append('z')\n\nif compile_test('bzlib.h', 'bz2'):\n ARGS.append('-DHAVE_BZLIB')\n LIBS.append('bz2')\n\nif compile_test('lzma.h', 'lzma'):\n ARGS.append('-DHAVE_XZLIB')\n LIBS.append('lzma')\n\nos.system('swig -python -c++ ./decoders.i')\n\ndecoders_module = [\n Extension(\n name='_swig_decoders',\n sources=FILES + glob.glob('*.cxx') + glob.glob('*.cpp'),\n language='c++',\n include_dirs=[\n '.',\n 'kenlm',\n 'openfst-1.6.3/src/include',\n 'ThreadPool',\n #'glog/src'\n ],\n libraries=LIBS,\n extra_compile_args=ARGS)\n]\n\nsetup(\n name='swig_decoders',\n version='0.1',\n description=\"\"\"CTC decoders\"\"\",\n ext_modules=decoders_module,\n py_modules=['swig_decoders'], )\n", "path": "deep_speech_2/decoders/swig/setup.py"}], "after_files": [{"content": "\"\"\"Script to build and install decoder package.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom setuptools import setup, Extension, distutils\nimport glob\nimport platform\nimport os, sys\nimport multiprocessing.pool\nimport argparse\n\nparser = argparse.ArgumentParser(description=__doc__)\nparser.add_argument(\n \"--num_processes\",\n default=1,\n type=int,\n help=\"Number of cpu processes to build package. (default: %(default)d)\")\nargs = parser.parse_known_args()\n\n# reconstruct sys.argv to pass to setup below\nsys.argv = [sys.argv[0]] + args[1]\n\n\n# monkey-patch for parallel compilation\n# See: https://stackoverflow.com/a/13176803\ndef parallelCCompile(self,\n sources,\n output_dir=None,\n macros=None,\n include_dirs=None,\n debug=0,\n extra_preargs=None,\n extra_postargs=None,\n depends=None):\n # those lines are copied from distutils.ccompiler.CCompiler directly\n macros, objects, extra_postargs, pp_opts, build = self._setup_compile(\n output_dir, macros, include_dirs, sources, depends, extra_postargs)\n cc_args = self._get_cc_args(pp_opts, debug, extra_preargs)\n\n # parallel code\n def _single_compile(obj):\n try:\n src, ext = build[obj]\n except KeyError:\n return\n self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts)\n\n # convert to list, imap is evaluated on-demand\n thread_pool = multiprocessing.pool.ThreadPool(args[0].num_processes)\n list(thread_pool.imap(_single_compile, objects))\n return objects\n\n\ndef compile_test(header, library):\n dummy_path = os.path.join(os.path.dirname(__file__), \"dummy\")\n command = \"bash -c \\\"g++ -include \" + header \\\n + \" -l\" + library + \" -x c++ - <<<'int main() {}' -o \" \\\n + dummy_path + \" >/dev/null 2>/dev/null && rm \" \\\n + dummy_path + \" 2>/dev/null\\\"\"\n return os.system(command) == 0\n\n\n# hack compile to support parallel compiling\ndistutils.ccompiler.CCompiler.compile = parallelCCompile\n\nFILES = glob.glob('kenlm/util/*.cc') \\\n + glob.glob('kenlm/lm/*.cc') \\\n + glob.glob('kenlm/util/double-conversion/*.cc')\n\nFILES += glob.glob('openfst-1.6.3/src/lib/*.cc')\n\nFILES = [\n fn for fn in FILES\n if not (fn.endswith('main.cc') or fn.endswith('test.cc') or fn.endswith(\n 'unittest.cc'))\n]\n\nLIBS = ['stdc++']\nif platform.system() != 'Darwin':\n LIBS.append('rt')\n\nARGS = ['-O3', '-DNDEBUG', '-DKENLM_MAX_ORDER=6', '-std=c++11']\n\nif compile_test('zlib.h', 'z'):\n ARGS.append('-DHAVE_ZLIB')\n LIBS.append('z')\n\nif compile_test('bzlib.h', 'bz2'):\n ARGS.append('-DHAVE_BZLIB')\n LIBS.append('bz2')\n\nif compile_test('lzma.h', 'lzma'):\n ARGS.append('-DHAVE_XZLIB')\n LIBS.append('lzma')\n\nos.system('swig -python -c++ ./decoders.i')\n\ndecoders_module = [\n Extension(\n name='_swig_decoders',\n sources=FILES + glob.glob('*.cxx') + glob.glob('*.cpp'),\n language='c++',\n include_dirs=[\n '.',\n 'kenlm',\n 'openfst-1.6.3/src/include',\n 'ThreadPool',\n ],\n libraries=LIBS,\n extra_compile_args=ARGS)\n]\n\nsetup(\n name='swig_decoders',\n version='1.0',\n description=\"\"\"CTC decoders\"\"\",\n ext_modules=decoders_module,\n py_modules=['swig_decoders'], )\n", "path": "deep_speech_2/decoders/swig/setup.py"}]}
| 1,444 | 265 |
gh_patches_debug_23631
|
rasdani/github-patches
|
git_diff
|
e-valuation__EvaP-762
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Test management commands
Because in three years, run_tasks will silently fail on the production system and nobody will notice.
- [x] **run_tasks** - shouldn't be too hard and is rather important
- [x] **anonymize** - might be a bit of work to cover it properly, but should be straightforward.
- [x] **refresh_results_cache** - should be easy
- [x] **dump_testdata** - don't know how not to overwrite the file during testing, but should be possible
the other commands are already tested or rather unsuitable for testing
- [x] **merge_users** - already has a test (#703) and is shown to be pretty broken.
- [x] **run** - don't know how to test this and there isn't really anything that could break. still, somehow running it to check that it doesn't crash right away on e.g. imports would be cool
- [x] **reload_testdata** - don't know whether it's possible at all to test that, i mean it drops the whole database...
- [ ] **import_ad** - we never used it and i don't know whether it's feasible to mock ldap
use `self.stdout.write` instead of `print` and `call_command("command_name", stdout=StringIO())` to avoid console output during tests. don't know what to do about calls to `input`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `evap/evaluation/management/commands/import_ad.py`
Content:
```
1 import getpass
2 import ldap
3 import sys
4
5 from django.core.management.base import BaseCommand
6
7 from evap.evaluation.models import UserProfile
8
9
10 class Command(BaseCommand):
11 args = '<ldap server> <username>'
12 help = 'Imports user data from Active Directory. The username should be specified with realm.'
13
14 def handle(self, *args, **options):
15 try:
16 # connect
17 l = ldap.initialize(args[0])
18
19 # bind
20 l.bind_s(args[1], getpass.getpass("AD Password: "))
21
22 # find all users
23 result = l.search_s("OU=INSTITUT,DC=hpi,DC=uni-potsdam,DC=de", ldap.SCOPE_SUBTREE, filterstr="(&(&(objectClass=user)(!(objectClass=computer)))(givenName=*)(sn=*)(mail=*))")
24 for _, attrs in result:
25 try:
26 user = UserProfile.objects.get(username__iexact=attrs['sAMAccountName'][0])
27 user.first_name = attrs['givenName'][0]
28 user.last_name = attrs['sn'][0]
29 user.email = attrs['mail'][0]
30 user.save()
31
32 print("Successfully updated: '{0}'".format(user.username))
33 except UserProfile.DoesNotExist:
34 pass
35 except Exception as e:
36 print(e)
37
38 l.unbind_s()
39
40 except KeyboardInterrupt:
41 sys.stderr.write("\nOperation cancelled.\n")
42 sys.exit(1)
43
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/evap/evaluation/management/commands/import_ad.py b/evap/evaluation/management/commands/import_ad.py
deleted file mode 100644
--- a/evap/evaluation/management/commands/import_ad.py
+++ /dev/null
@@ -1,42 +0,0 @@
-import getpass
-import ldap
-import sys
-
-from django.core.management.base import BaseCommand
-
-from evap.evaluation.models import UserProfile
-
-
-class Command(BaseCommand):
- args = '<ldap server> <username>'
- help = 'Imports user data from Active Directory. The username should be specified with realm.'
-
- def handle(self, *args, **options):
- try:
- # connect
- l = ldap.initialize(args[0])
-
- # bind
- l.bind_s(args[1], getpass.getpass("AD Password: "))
-
- # find all users
- result = l.search_s("OU=INSTITUT,DC=hpi,DC=uni-potsdam,DC=de", ldap.SCOPE_SUBTREE, filterstr="(&(&(objectClass=user)(!(objectClass=computer)))(givenName=*)(sn=*)(mail=*))")
- for _, attrs in result:
- try:
- user = UserProfile.objects.get(username__iexact=attrs['sAMAccountName'][0])
- user.first_name = attrs['givenName'][0]
- user.last_name = attrs['sn'][0]
- user.email = attrs['mail'][0]
- user.save()
-
- print("Successfully updated: '{0}'".format(user.username))
- except UserProfile.DoesNotExist:
- pass
- except Exception as e:
- print(e)
-
- l.unbind_s()
-
- except KeyboardInterrupt:
- sys.stderr.write("\nOperation cancelled.\n")
- sys.exit(1)
|
{"golden_diff": "diff --git a/evap/evaluation/management/commands/import_ad.py b/evap/evaluation/management/commands/import_ad.py\ndeleted file mode 100644\n--- a/evap/evaluation/management/commands/import_ad.py\n+++ /dev/null\n@@ -1,42 +0,0 @@\n-import getpass\n-import ldap\n-import sys\n-\n-from django.core.management.base import BaseCommand\n-\n-from evap.evaluation.models import UserProfile\n-\n-\n-class Command(BaseCommand):\n- args = '<ldap server> <username>'\n- help = 'Imports user data from Active Directory. The username should be specified with realm.'\n-\n- def handle(self, *args, **options):\n- try:\n- # connect\n- l = ldap.initialize(args[0])\n-\n- # bind\n- l.bind_s(args[1], getpass.getpass(\"AD Password: \"))\n-\n- # find all users\n- result = l.search_s(\"OU=INSTITUT,DC=hpi,DC=uni-potsdam,DC=de\", ldap.SCOPE_SUBTREE, filterstr=\"(&(&(objectClass=user)(!(objectClass=computer)))(givenName=*)(sn=*)(mail=*))\")\n- for _, attrs in result:\n- try:\n- user = UserProfile.objects.get(username__iexact=attrs['sAMAccountName'][0])\n- user.first_name = attrs['givenName'][0]\n- user.last_name = attrs['sn'][0]\n- user.email = attrs['mail'][0]\n- user.save()\n-\n- print(\"Successfully updated: '{0}'\".format(user.username))\n- except UserProfile.DoesNotExist:\n- pass\n- except Exception as e:\n- print(e)\n-\n- l.unbind_s()\n-\n- except KeyboardInterrupt:\n- sys.stderr.write(\"\\nOperation cancelled.\\n\")\n- sys.exit(1)\n", "issue": "Test management commands\nBecause in three years, run_tasks will silently fail on the production system and nobody will notice.\n- [x] **run_tasks** - shouldn't be too hard and is rather important\n- [x] **anonymize** - might be a bit of work to cover it properly, but should be straightforward.\n- [x] **refresh_results_cache** - should be easy\n- [x] **dump_testdata** - don't know how not to overwrite the file during testing, but should be possible\n\nthe other commands are already tested or rather unsuitable for testing\n- [x] **merge_users** - already has a test (#703) and is shown to be pretty broken.\n- [x] **run** - don't know how to test this and there isn't really anything that could break. still, somehow running it to check that it doesn't crash right away on e.g. imports would be cool\n- [x] **reload_testdata** - don't know whether it's possible at all to test that, i mean it drops the whole database...\n- [ ] **import_ad** - we never used it and i don't know whether it's feasible to mock ldap\n\nuse `self.stdout.write` instead of `print` and `call_command(\"command_name\", stdout=StringIO())` to avoid console output during tests. don't know what to do about calls to `input`.\n\n", "before_files": [{"content": "import getpass\nimport ldap\nimport sys\n\nfrom django.core.management.base import BaseCommand\n\nfrom evap.evaluation.models import UserProfile\n\n\nclass Command(BaseCommand):\n args = '<ldap server> <username>'\n help = 'Imports user data from Active Directory. The username should be specified with realm.'\n\n def handle(self, *args, **options):\n try:\n # connect\n l = ldap.initialize(args[0])\n\n # bind\n l.bind_s(args[1], getpass.getpass(\"AD Password: \"))\n\n # find all users\n result = l.search_s(\"OU=INSTITUT,DC=hpi,DC=uni-potsdam,DC=de\", ldap.SCOPE_SUBTREE, filterstr=\"(&(&(objectClass=user)(!(objectClass=computer)))(givenName=*)(sn=*)(mail=*))\")\n for _, attrs in result:\n try:\n user = UserProfile.objects.get(username__iexact=attrs['sAMAccountName'][0])\n user.first_name = attrs['givenName'][0]\n user.last_name = attrs['sn'][0]\n user.email = attrs['mail'][0]\n user.save()\n\n print(\"Successfully updated: '{0}'\".format(user.username))\n except UserProfile.DoesNotExist:\n pass\n except Exception as e:\n print(e)\n\n l.unbind_s()\n\n except KeyboardInterrupt:\n sys.stderr.write(\"\\nOperation cancelled.\\n\")\n sys.exit(1)\n", "path": "evap/evaluation/management/commands/import_ad.py"}], "after_files": [{"content": null, "path": "evap/evaluation/management/commands/import_ad.py"}]}
| 951 | 410 |
gh_patches_debug_11447
|
rasdani/github-patches
|
git_diff
|
aws-cloudformation__cfn-lint-2236
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Support for the docker lambda runtime
cfn-lint 0.43.0
This is a feature request for supporting docker lambda sam templates.
Please provide as much information as possible:
* SAM templates produced by aws-sam-cli with the docker deploy option don't pass validation
Running on the template.yaml in the base directory outputs the following:
```
% cfn-lint template.yaml
E0001 Error transforming template: Resource with id [HelloWorldFunction] is invalid. 'ImageUri' must be set.
template.yaml:1:1
```
Running on the packaged template at `.aws-sam/build/template.yaml` produces the following:
```
E3002 Invalid Property Resources/HelloWorldFunction/Properties/Code/ImageUri
.aws-sam/build/template.yaml:12:3
E3002 Invalid Property Resources/HelloWorldFunction/Properties/PackageType
.aws-sam/build/template.yaml:12:3
E3003 Property Handler missing at Resources/HelloWorldFunction/Properties
.aws-sam/build/template.yaml:12:3
E3003 Property Runtime missing at Resources/HelloWorldFunction/Properties
.aws-sam/build/template.yaml:12:3
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cfnlint/transform.py`
Content:
```
1 """
2 Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
3 SPDX-License-Identifier: MIT-0
4 """
5 import os
6 import logging
7 import six
8 import samtranslator
9 from samtranslator.parser import parser
10 from samtranslator.translator.translator import Translator
11 from samtranslator.public.exceptions import InvalidDocumentException
12
13 from cfnlint.helpers import load_resource, convert_dict, format_json_string
14 from cfnlint.data import Serverless
15 from cfnlint.rules import Match, TransformError
16 LOGGER = logging.getLogger('cfnlint')
17
18 samtranslator_logger = logging.getLogger('samtranslator')
19 samtranslator_logger.setLevel(logging.CRITICAL)
20
21 class Transform(object):
22 """
23 Application Serverless Module tranform Wrapper.
24 Based on code from AWS SAM CLI:
25 https://github.com/awslabs/aws-sam-cli/blob/develop/samcli/commands/validate/lib/sam_template_validator.py
26 """
27
28 def __init__(self, filename, template, region):
29 """
30 Initialize Transform class
31 """
32 self._filename = filename
33 self._template = template
34 self._region = region
35 self._parameters = {}
36
37 self._managed_policy_map = self.load_managed_policies()
38 self._sam_parser = parser.Parser()
39
40 def template(self):
41 """Get the template"""
42 return self._template
43
44 def load_managed_policies(self):
45 """
46 Load the ManagedPolicies locally, based on the AWS-CLI:
47 https://github.com/awslabs/aws-sam-cli/blob/develop/samcli/lib/samlib/default_managed_policies.json
48 """
49 return load_resource(Serverless, 'ManagedPolicies.json')
50
51 def _replace_local_codeuri(self):
52 """
53 Replaces the CodeUri in AWS::Serverless::Function and DefinitionUri in
54 AWS::Serverless::Api to a fake S3 Uri. This is to support running the
55 SAM Translator with valid values for these fields. If this is not done,
56 the template is invalid in the eyes of SAM Translator (the translator
57 does not support local paths)
58 """
59
60 all_resources = self._template.get('Resources', {})
61
62 template_globals = self._template.get('Globals', {})
63 auto_publish_alias = template_globals.get('Function', {}).get('AutoPublishAlias')
64 if isinstance(auto_publish_alias, dict):
65 if len(auto_publish_alias) == 1:
66 for k, v in auto_publish_alias.items():
67 if k == 'Ref':
68 if v in self._template.get('Parameters'):
69 self._parameters[v] = 'Alias'
70
71
72 for _, resource in all_resources.items():
73
74 resource_type = resource.get('Type')
75 resource_dict = resource.get('Properties')
76
77 if resource_type == 'AWS::Serverless::Function':
78
79 Transform._update_to_s3_uri('CodeUri', resource_dict)
80 auto_publish_alias = resource_dict.get('AutoPublishAlias')
81 if isinstance(auto_publish_alias, dict):
82 if len(auto_publish_alias) == 1:
83 for k, v in auto_publish_alias.items():
84 if k == 'Ref':
85 if v in self._template.get('Parameters'):
86 self._parameters[v] = 'Alias'
87 if resource_type in ['AWS::Serverless::LayerVersion']:
88 if resource_dict.get('ContentUri'):
89 Transform._update_to_s3_uri('ContentUri', resource_dict)
90 if resource_type == 'AWS::Serverless::Application':
91 if resource_dict.get('Location'):
92 resource_dict['Location'] = ''
93 Transform._update_to_s3_uri('Location', resource_dict)
94 if resource_type == 'AWS::Serverless::Api':
95 if ('DefinitionBody' not in resource_dict and
96 'Auth' not in resource_dict and 'Cors' not in resource_dict):
97 Transform._update_to_s3_uri('DefinitionUri', resource_dict)
98 else:
99 resource_dict['DefinitionBody'] = ''
100 if resource_type == 'AWS::Serverless::StateMachine' and resource_dict.get('DefinitionUri'):
101 Transform._update_to_s3_uri('DefinitionUri', resource_dict)
102
103 def transform_template(self):
104 """
105 Transform the Template using the Serverless Application Model.
106 """
107 matches = []
108
109 try:
110 # Output the SAM Translator version in debug mode
111 LOGGER.info('SAM Translator: %s', samtranslator.__version__)
112
113 sam_translator = Translator(
114 managed_policy_map=self._managed_policy_map,
115 sam_parser=self._sam_parser)
116
117 self._replace_local_codeuri()
118
119 # Tell SAM to use the region we're linting in, this has to be
120 # controlled using the default AWS mechanisms, see also:
121 # https://github.com/awslabs/serverless-application-model/blob/master/samtranslator/translator/arn_generator.py
122 LOGGER.info('Setting AWS_DEFAULT_REGION to %s', self._region)
123 os.environ['AWS_DEFAULT_REGION'] = self._region
124
125 self._template = convert_dict(
126 sam_translator.translate(sam_template=self._template,
127 parameter_values=self._parameters))
128
129 LOGGER.info('Transformed template: \n%s',
130 format_json_string(self._template))
131 except InvalidDocumentException as e:
132 message = 'Error transforming template: {0}'
133 for cause in e.causes:
134 matches.append(Match(
135 1, 1,
136 1, 1,
137 self._filename,
138 TransformError(), message.format(cause.message)))
139 except Exception as e: # pylint: disable=W0703
140 LOGGER.debug('Error transforming template: %s', str(e))
141 LOGGER.debug('Stack trace: %s', e, exc_info=True)
142 message = 'Error transforming template: {0}'
143 matches.append(Match(
144 1, 1,
145 1, 1,
146 self._filename,
147 TransformError(), message.format(str(e))))
148
149 return matches
150
151 @staticmethod
152 def is_s3_uri(uri):
153 """
154 Checks the uri and determines if it is a valid S3 Uri
155 Parameters
156 ----------
157 uri str, required
158 Uri to check
159 Returns
160 -------
161 bool
162 Returns True if the uri given is an S3 uri, otherwise False
163 """
164 return isinstance(uri, six.string_types) and uri.startswith('s3://')
165
166 @staticmethod
167 def _update_to_s3_uri(
168 property_key, resource_property_dict,
169 s3_uri_value='s3://bucket/value'):
170 """
171 Updates the 'property_key' in the 'resource_property_dict' to the
172 value of 's3_uri_value'
173 Note: The function will mutate the resource_property_dict that is pass
174 in Parameters
175 ----------
176 property_key str, required
177 Key in the resource_property_dict
178 resource_property_dict dict, required
179 Property dictionary of a Resource in the template to replace
180 s3_uri_value str, optional
181 Value to update the value of the property_key to
182 """
183 uri_property = resource_property_dict.get(property_key, '.')
184
185 # ignore if dict or already an S3 Uri
186 if isinstance(uri_property, dict):
187 if len(uri_property) == 1:
188 for k in uri_property.keys():
189 if k == 'Ref':
190 resource_property_dict[property_key] = s3_uri_value
191 return
192 if Transform.is_s3_uri(uri_property):
193 return
194
195 resource_property_dict[property_key] = s3_uri_value
196
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/cfnlint/transform.py b/src/cfnlint/transform.py
--- a/src/cfnlint/transform.py
+++ b/src/cfnlint/transform.py
@@ -76,7 +76,10 @@
if resource_type == 'AWS::Serverless::Function':
- Transform._update_to_s3_uri('CodeUri', resource_dict)
+ if resource_dict.get('PackageType') == 'Image':
+ Transform._update_to_s3_uri('ImageUri', resource_dict)
+ else:
+ Transform._update_to_s3_uri('CodeUri', resource_dict)
auto_publish_alias = resource_dict.get('AutoPublishAlias')
if isinstance(auto_publish_alias, dict):
if len(auto_publish_alias) == 1:
|
{"golden_diff": "diff --git a/src/cfnlint/transform.py b/src/cfnlint/transform.py\n--- a/src/cfnlint/transform.py\n+++ b/src/cfnlint/transform.py\n@@ -76,7 +76,10 @@\n \n if resource_type == 'AWS::Serverless::Function':\n \n- Transform._update_to_s3_uri('CodeUri', resource_dict)\n+ if resource_dict.get('PackageType') == 'Image':\n+ Transform._update_to_s3_uri('ImageUri', resource_dict)\n+ else:\n+ Transform._update_to_s3_uri('CodeUri', resource_dict)\n auto_publish_alias = resource_dict.get('AutoPublishAlias')\n if isinstance(auto_publish_alias, dict):\n if len(auto_publish_alias) == 1:\n", "issue": "Support for the docker lambda runtime\ncfn-lint 0.43.0\r\n\r\nThis is a feature request for supporting docker lambda sam templates.\r\n\r\nPlease provide as much information as possible:\r\n* SAM templates produced by aws-sam-cli with the docker deploy option don't pass validation\r\n\r\nRunning on the template.yaml in the base directory outputs the following:\r\n```\r\n% cfn-lint template.yaml \r\nE0001 Error transforming template: Resource with id [HelloWorldFunction] is invalid. 'ImageUri' must be set.\r\ntemplate.yaml:1:1\r\n\r\n```\r\n\r\nRunning on the packaged template at `.aws-sam/build/template.yaml` produces the following:\r\n```\r\nE3002 Invalid Property Resources/HelloWorldFunction/Properties/Code/ImageUri\r\n.aws-sam/build/template.yaml:12:3\r\n\r\nE3002 Invalid Property Resources/HelloWorldFunction/Properties/PackageType\r\n.aws-sam/build/template.yaml:12:3\r\n\r\nE3003 Property Handler missing at Resources/HelloWorldFunction/Properties\r\n.aws-sam/build/template.yaml:12:3\r\n\r\nE3003 Property Runtime missing at Resources/HelloWorldFunction/Properties\r\n.aws-sam/build/template.yaml:12:3\r\n\r\n```\n", "before_files": [{"content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nimport os\nimport logging\nimport six\nimport samtranslator\nfrom samtranslator.parser import parser\nfrom samtranslator.translator.translator import Translator\nfrom samtranslator.public.exceptions import InvalidDocumentException\n\nfrom cfnlint.helpers import load_resource, convert_dict, format_json_string\nfrom cfnlint.data import Serverless\nfrom cfnlint.rules import Match, TransformError\nLOGGER = logging.getLogger('cfnlint')\n\nsamtranslator_logger = logging.getLogger('samtranslator')\nsamtranslator_logger.setLevel(logging.CRITICAL)\n\nclass Transform(object):\n \"\"\"\n Application Serverless Module tranform Wrapper.\n Based on code from AWS SAM CLI:\n https://github.com/awslabs/aws-sam-cli/blob/develop/samcli/commands/validate/lib/sam_template_validator.py\n \"\"\"\n\n def __init__(self, filename, template, region):\n \"\"\"\n Initialize Transform class\n \"\"\"\n self._filename = filename\n self._template = template\n self._region = region\n self._parameters = {}\n\n self._managed_policy_map = self.load_managed_policies()\n self._sam_parser = parser.Parser()\n\n def template(self):\n \"\"\"Get the template\"\"\"\n return self._template\n\n def load_managed_policies(self):\n \"\"\"\n Load the ManagedPolicies locally, based on the AWS-CLI:\n https://github.com/awslabs/aws-sam-cli/blob/develop/samcli/lib/samlib/default_managed_policies.json\n \"\"\"\n return load_resource(Serverless, 'ManagedPolicies.json')\n\n def _replace_local_codeuri(self):\n \"\"\"\n Replaces the CodeUri in AWS::Serverless::Function and DefinitionUri in\n AWS::Serverless::Api to a fake S3 Uri. This is to support running the\n SAM Translator with valid values for these fields. If this is not done,\n the template is invalid in the eyes of SAM Translator (the translator\n does not support local paths)\n \"\"\"\n\n all_resources = self._template.get('Resources', {})\n\n template_globals = self._template.get('Globals', {})\n auto_publish_alias = template_globals.get('Function', {}).get('AutoPublishAlias')\n if isinstance(auto_publish_alias, dict):\n if len(auto_publish_alias) == 1:\n for k, v in auto_publish_alias.items():\n if k == 'Ref':\n if v in self._template.get('Parameters'):\n self._parameters[v] = 'Alias'\n\n\n for _, resource in all_resources.items():\n\n resource_type = resource.get('Type')\n resource_dict = resource.get('Properties')\n\n if resource_type == 'AWS::Serverless::Function':\n\n Transform._update_to_s3_uri('CodeUri', resource_dict)\n auto_publish_alias = resource_dict.get('AutoPublishAlias')\n if isinstance(auto_publish_alias, dict):\n if len(auto_publish_alias) == 1:\n for k, v in auto_publish_alias.items():\n if k == 'Ref':\n if v in self._template.get('Parameters'):\n self._parameters[v] = 'Alias'\n if resource_type in ['AWS::Serverless::LayerVersion']:\n if resource_dict.get('ContentUri'):\n Transform._update_to_s3_uri('ContentUri', resource_dict)\n if resource_type == 'AWS::Serverless::Application':\n if resource_dict.get('Location'):\n resource_dict['Location'] = ''\n Transform._update_to_s3_uri('Location', resource_dict)\n if resource_type == 'AWS::Serverless::Api':\n if ('DefinitionBody' not in resource_dict and\n 'Auth' not in resource_dict and 'Cors' not in resource_dict):\n Transform._update_to_s3_uri('DefinitionUri', resource_dict)\n else:\n resource_dict['DefinitionBody'] = ''\n if resource_type == 'AWS::Serverless::StateMachine' and resource_dict.get('DefinitionUri'):\n Transform._update_to_s3_uri('DefinitionUri', resource_dict)\n\n def transform_template(self):\n \"\"\"\n Transform the Template using the Serverless Application Model.\n \"\"\"\n matches = []\n\n try:\n # Output the SAM Translator version in debug mode\n LOGGER.info('SAM Translator: %s', samtranslator.__version__)\n\n sam_translator = Translator(\n managed_policy_map=self._managed_policy_map,\n sam_parser=self._sam_parser)\n\n self._replace_local_codeuri()\n\n # Tell SAM to use the region we're linting in, this has to be\n # controlled using the default AWS mechanisms, see also:\n # https://github.com/awslabs/serverless-application-model/blob/master/samtranslator/translator/arn_generator.py\n LOGGER.info('Setting AWS_DEFAULT_REGION to %s', self._region)\n os.environ['AWS_DEFAULT_REGION'] = self._region\n\n self._template = convert_dict(\n sam_translator.translate(sam_template=self._template,\n parameter_values=self._parameters))\n\n LOGGER.info('Transformed template: \\n%s',\n format_json_string(self._template))\n except InvalidDocumentException as e:\n message = 'Error transforming template: {0}'\n for cause in e.causes:\n matches.append(Match(\n 1, 1,\n 1, 1,\n self._filename,\n TransformError(), message.format(cause.message)))\n except Exception as e: # pylint: disable=W0703\n LOGGER.debug('Error transforming template: %s', str(e))\n LOGGER.debug('Stack trace: %s', e, exc_info=True)\n message = 'Error transforming template: {0}'\n matches.append(Match(\n 1, 1,\n 1, 1,\n self._filename,\n TransformError(), message.format(str(e))))\n\n return matches\n\n @staticmethod\n def is_s3_uri(uri):\n \"\"\"\n Checks the uri and determines if it is a valid S3 Uri\n Parameters\n ----------\n uri str, required\n Uri to check\n Returns\n -------\n bool\n Returns True if the uri given is an S3 uri, otherwise False\n \"\"\"\n return isinstance(uri, six.string_types) and uri.startswith('s3://')\n\n @staticmethod\n def _update_to_s3_uri(\n property_key, resource_property_dict,\n s3_uri_value='s3://bucket/value'):\n \"\"\"\n Updates the 'property_key' in the 'resource_property_dict' to the\n value of 's3_uri_value'\n Note: The function will mutate the resource_property_dict that is pass\n in Parameters\n ----------\n property_key str, required\n Key in the resource_property_dict\n resource_property_dict dict, required\n Property dictionary of a Resource in the template to replace\n s3_uri_value str, optional\n Value to update the value of the property_key to\n \"\"\"\n uri_property = resource_property_dict.get(property_key, '.')\n\n # ignore if dict or already an S3 Uri\n if isinstance(uri_property, dict):\n if len(uri_property) == 1:\n for k in uri_property.keys():\n if k == 'Ref':\n resource_property_dict[property_key] = s3_uri_value\n return\n if Transform.is_s3_uri(uri_property):\n return\n\n resource_property_dict[property_key] = s3_uri_value\n", "path": "src/cfnlint/transform.py"}], "after_files": [{"content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nimport os\nimport logging\nimport six\nimport samtranslator\nfrom samtranslator.parser import parser\nfrom samtranslator.translator.translator import Translator\nfrom samtranslator.public.exceptions import InvalidDocumentException\n\nfrom cfnlint.helpers import load_resource, convert_dict, format_json_string\nfrom cfnlint.data import Serverless\nfrom cfnlint.rules import Match, TransformError\nLOGGER = logging.getLogger('cfnlint')\n\nsamtranslator_logger = logging.getLogger('samtranslator')\nsamtranslator_logger.setLevel(logging.CRITICAL)\n\nclass Transform(object):\n \"\"\"\n Application Serverless Module tranform Wrapper.\n Based on code from AWS SAM CLI:\n https://github.com/awslabs/aws-sam-cli/blob/develop/samcli/commands/validate/lib/sam_template_validator.py\n \"\"\"\n\n def __init__(self, filename, template, region):\n \"\"\"\n Initialize Transform class\n \"\"\"\n self._filename = filename\n self._template = template\n self._region = region\n self._parameters = {}\n\n self._managed_policy_map = self.load_managed_policies()\n self._sam_parser = parser.Parser()\n\n def template(self):\n \"\"\"Get the template\"\"\"\n return self._template\n\n def load_managed_policies(self):\n \"\"\"\n Load the ManagedPolicies locally, based on the AWS-CLI:\n https://github.com/awslabs/aws-sam-cli/blob/develop/samcli/lib/samlib/default_managed_policies.json\n \"\"\"\n return load_resource(Serverless, 'ManagedPolicies.json')\n\n def _replace_local_codeuri(self):\n \"\"\"\n Replaces the CodeUri in AWS::Serverless::Function and DefinitionUri in\n AWS::Serverless::Api to a fake S3 Uri. This is to support running the\n SAM Translator with valid values for these fields. If this is not done,\n the template is invalid in the eyes of SAM Translator (the translator\n does not support local paths)\n \"\"\"\n\n all_resources = self._template.get('Resources', {})\n\n template_globals = self._template.get('Globals', {})\n auto_publish_alias = template_globals.get('Function', {}).get('AutoPublishAlias')\n if isinstance(auto_publish_alias, dict):\n if len(auto_publish_alias) == 1:\n for k, v in auto_publish_alias.items():\n if k == 'Ref':\n if v in self._template.get('Parameters'):\n self._parameters[v] = 'Alias'\n\n\n for _, resource in all_resources.items():\n\n resource_type = resource.get('Type')\n resource_dict = resource.get('Properties')\n\n if resource_type == 'AWS::Serverless::Function':\n\n if resource_dict.get('PackageType') == 'Image':\n Transform._update_to_s3_uri('ImageUri', resource_dict)\n else:\n Transform._update_to_s3_uri('CodeUri', resource_dict)\n auto_publish_alias = resource_dict.get('AutoPublishAlias')\n if isinstance(auto_publish_alias, dict):\n if len(auto_publish_alias) == 1:\n for k, v in auto_publish_alias.items():\n if k == 'Ref':\n if v in self._template.get('Parameters'):\n self._parameters[v] = 'Alias'\n if resource_type in ['AWS::Serverless::LayerVersion']:\n if resource_dict.get('ContentUri'):\n Transform._update_to_s3_uri('ContentUri', resource_dict)\n if resource_type == 'AWS::Serverless::Application':\n if resource_dict.get('Location'):\n resource_dict['Location'] = ''\n Transform._update_to_s3_uri('Location', resource_dict)\n if resource_type == 'AWS::Serverless::Api':\n if ('DefinitionBody' not in resource_dict and\n 'Auth' not in resource_dict and 'Cors' not in resource_dict):\n Transform._update_to_s3_uri('DefinitionUri', resource_dict)\n else:\n resource_dict['DefinitionBody'] = ''\n if resource_type == 'AWS::Serverless::StateMachine' and resource_dict.get('DefinitionUri'):\n Transform._update_to_s3_uri('DefinitionUri', resource_dict)\n\n def transform_template(self):\n \"\"\"\n Transform the Template using the Serverless Application Model.\n \"\"\"\n matches = []\n\n try:\n # Output the SAM Translator version in debug mode\n LOGGER.info('SAM Translator: %s', samtranslator.__version__)\n\n sam_translator = Translator(\n managed_policy_map=self._managed_policy_map,\n sam_parser=self._sam_parser)\n\n self._replace_local_codeuri()\n\n # Tell SAM to use the region we're linting in, this has to be\n # controlled using the default AWS mechanisms, see also:\n # https://github.com/awslabs/serverless-application-model/blob/master/samtranslator/translator/arn_generator.py\n LOGGER.info('Setting AWS_DEFAULT_REGION to %s', self._region)\n os.environ['AWS_DEFAULT_REGION'] = self._region\n\n self._template = convert_dict(\n sam_translator.translate(sam_template=self._template,\n parameter_values=self._parameters))\n\n LOGGER.info('Transformed template: \\n%s',\n format_json_string(self._template))\n except InvalidDocumentException as e:\n message = 'Error transforming template: {0}'\n for cause in e.causes:\n matches.append(Match(\n 1, 1,\n 1, 1,\n self._filename,\n TransformError(), message.format(cause.message)))\n except Exception as e: # pylint: disable=W0703\n LOGGER.debug('Error transforming template: %s', str(e))\n LOGGER.debug('Stack trace: %s', e, exc_info=True)\n message = 'Error transforming template: {0}'\n matches.append(Match(\n 1, 1,\n 1, 1,\n self._filename,\n TransformError(), message.format(str(e))))\n\n return matches\n\n @staticmethod\n def is_s3_uri(uri):\n \"\"\"\n Checks the uri and determines if it is a valid S3 Uri\n Parameters\n ----------\n uri str, required\n Uri to check\n Returns\n -------\n bool\n Returns True if the uri given is an S3 uri, otherwise False\n \"\"\"\n return isinstance(uri, six.string_types) and uri.startswith('s3://')\n\n @staticmethod\n def _update_to_s3_uri(\n property_key, resource_property_dict,\n s3_uri_value='s3://bucket/value'):\n \"\"\"\n Updates the 'property_key' in the 'resource_property_dict' to the\n value of 's3_uri_value'\n Note: The function will mutate the resource_property_dict that is pass\n in Parameters\n ----------\n property_key str, required\n Key in the resource_property_dict\n resource_property_dict dict, required\n Property dictionary of a Resource in the template to replace\n s3_uri_value str, optional\n Value to update the value of the property_key to\n \"\"\"\n uri_property = resource_property_dict.get(property_key, '.')\n\n # ignore if dict or already an S3 Uri\n if isinstance(uri_property, dict):\n if len(uri_property) == 1:\n for k in uri_property.keys():\n if k == 'Ref':\n resource_property_dict[property_key] = s3_uri_value\n return\n if Transform.is_s3_uri(uri_property):\n return\n\n resource_property_dict[property_key] = s3_uri_value\n", "path": "src/cfnlint/transform.py"}]}
| 2,578 | 167 |
gh_patches_debug_19945
|
rasdani/github-patches
|
git_diff
|
onnx__onnx-5736
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Refine docs for check_model
Current version:
> Check the consistency of a model. An exception is raised if the test fails.
It would be good if we document the kind of checks done and the type of exception raised so users know what to catch for; as well as clarify that it also runs shape inference when strict is True. (Right now it says `if True, the function checks shapes can be inferred`)
Should we default `strict` to `True`? @jcwchen
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `onnx/checker.py`
Content:
```
1 # Copyright (c) ONNX Project Contributors
2 #
3 # SPDX-License-Identifier: Apache-2.0
4 """Graph utilities for checking whether an ONNX proto message is legal."""
5
6 from __future__ import annotations
7
8 __all__ = [
9 "check_attribute",
10 "check_function",
11 "check_graph",
12 "check_model",
13 "check_node",
14 "check_sparse_tensor",
15 "check_tensor",
16 "check_value_info",
17 "DEFAULT_CONTEXT",
18 "ValidationError",
19 "C",
20 "MAXIMUM_PROTOBUF",
21 ]
22
23 import os
24 import sys
25 from typing import Any, Callable, TypeVar
26
27 from google.protobuf.message import Message
28
29 import onnx.defs
30 import onnx.onnx_cpp2py_export.checker as C # noqa: N812
31 import onnx.shape_inference
32 from onnx import (
33 IR_VERSION,
34 AttributeProto,
35 FunctionProto,
36 GraphProto,
37 ModelProto,
38 NodeProto,
39 SparseTensorProto,
40 TensorProto,
41 ValueInfoProto,
42 helper,
43 )
44
45 # Limitation of single protobuf file is 2GB
46 MAXIMUM_PROTOBUF = 2000000000
47
48 # TODO: This thing where we reserialize the protobuf back into the
49 # string, only to deserialize it at the call site, is really goofy.
50 # Stop doing that.
51
52
53 # NB: Please don't edit this context!
54 DEFAULT_CONTEXT = C.CheckerContext()
55 DEFAULT_CONTEXT.ir_version = IR_VERSION
56 # TODO: Maybe ONNX-ML should also be defaulted?
57 DEFAULT_CONTEXT.opset_imports = {"": onnx.defs.onnx_opset_version()}
58
59
60 FuncType = TypeVar("FuncType", bound=Callable[..., Any])
61
62
63 def _ensure_proto_type(proto: Message, proto_type: type[Message]) -> None:
64 if not isinstance(proto, proto_type):
65 raise TypeError(
66 f"The proto message needs to be of type '{proto_type.__name__}'"
67 )
68
69
70 def check_value_info(
71 value_info: ValueInfoProto, ctx: C.CheckerContext = DEFAULT_CONTEXT
72 ) -> None:
73 _ensure_proto_type(value_info, ValueInfoProto)
74 return C.check_value_info(value_info.SerializeToString(), ctx)
75
76
77 def check_tensor(tensor: TensorProto, ctx: C.CheckerContext = DEFAULT_CONTEXT) -> None:
78 _ensure_proto_type(tensor, TensorProto)
79 return C.check_tensor(tensor.SerializeToString(), ctx)
80
81
82 def check_attribute(
83 attr: AttributeProto, ctx: C.CheckerContext = DEFAULT_CONTEXT
84 ) -> None:
85 _ensure_proto_type(attr, AttributeProto)
86 return C.check_attribute(attr.SerializeToString(), ctx)
87
88
89 def check_node(node: NodeProto, ctx: C.CheckerContext = DEFAULT_CONTEXT) -> None:
90 _ensure_proto_type(node, NodeProto)
91 return C.check_node(node.SerializeToString(), ctx)
92
93
94 def check_function(
95 function: FunctionProto, ctx: C.CheckerContext | None = None
96 ) -> None:
97 _ensure_proto_type(function, FunctionProto)
98 if ctx is None:
99 ctx = C.CheckerContext()
100 ctx.ir_version = helper.find_min_ir_version_for(
101 list(function.opset_import), True
102 )
103 function_opset_dic = {}
104 for domain_version in function.opset_import:
105 function_opset_dic[domain_version.domain] = domain_version.version
106 ctx.opset_imports = function_opset_dic
107 C.check_function(function.SerializeToString(), ctx)
108
109
110 def check_graph(graph: GraphProto, ctx: C.CheckerContext = DEFAULT_CONTEXT) -> None:
111 _ensure_proto_type(graph, GraphProto)
112 return C.check_graph(graph.SerializeToString(), ctx)
113
114
115 def check_sparse_tensor(
116 sparse: SparseTensorProto, ctx: C.CheckerContext = DEFAULT_CONTEXT
117 ) -> None:
118 _ensure_proto_type(sparse, SparseTensorProto)
119 C.check_sparse_tensor(sparse.SerializeToString(), ctx)
120
121
122 def check_model(
123 model: ModelProto | str | bytes | os.PathLike,
124 full_check: bool = False,
125 skip_opset_compatibility_check: bool = False,
126 ) -> None:
127 """Check the consistency of a model. An exception is raised if the test fails.
128
129 Args:
130 model: Model to check.
131 full_check: If True, the function also checks for shapes that can be inferred.
132 skip_opset_compatibility_check: If True, the function skips the check for
133 opset compatibility.
134 """
135 # If model is a path instead of ModelProto
136 if isinstance(model, (str, os.PathLike)):
137 C.check_model_path(os.fspath(model), full_check, skip_opset_compatibility_check)
138 else:
139 protobuf_string = (
140 model if isinstance(model, bytes) else model.SerializeToString()
141 )
142 # If the protobuf is larger than 2GB,
143 # remind users should use the model path to check
144 if sys.getsizeof(protobuf_string) > MAXIMUM_PROTOBUF:
145 raise ValueError(
146 "This protobuf of onnx model is too large (>2GB). Call check_model with model path instead."
147 )
148 C.check_model(protobuf_string, full_check, skip_opset_compatibility_check)
149
150
151 ValidationError = C.ValidationError
152
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/onnx/checker.py b/onnx/checker.py
--- a/onnx/checker.py
+++ b/onnx/checker.py
@@ -124,11 +124,20 @@
full_check: bool = False,
skip_opset_compatibility_check: bool = False,
) -> None:
- """Check the consistency of a model. An exception is raised if the test fails.
+ """Check the consistency of a model.
+
+ An exception will be raised if the model's ir_version is not set
+ properly or is higher than checker's ir_version, or if the model
+ has duplicate keys in metadata_props.
+
+ If IR version >= 3, the model must specify opset_import.
+ If IR version < 3, the model cannot have any opset_import specified.
Args:
- model: Model to check.
- full_check: If True, the function also checks for shapes that can be inferred.
+ model: Model to check. If model is a path, the function checks model
+ path first. If the model bytes size is larger than 2GB, function
+ should be called using model path.
+ full_check: If True, the function also runs shape inference check.
skip_opset_compatibility_check: If True, the function skips the check for
opset compatibility.
"""
|
{"golden_diff": "diff --git a/onnx/checker.py b/onnx/checker.py\n--- a/onnx/checker.py\n+++ b/onnx/checker.py\n@@ -124,11 +124,20 @@\n full_check: bool = False,\n skip_opset_compatibility_check: bool = False,\n ) -> None:\n- \"\"\"Check the consistency of a model. An exception is raised if the test fails.\n+ \"\"\"Check the consistency of a model.\n+\n+ An exception will be raised if the model's ir_version is not set\n+ properly or is higher than checker's ir_version, or if the model\n+ has duplicate keys in metadata_props.\n+\n+ If IR version >= 3, the model must specify opset_import.\n+ If IR version < 3, the model cannot have any opset_import specified.\n \n Args:\n- model: Model to check.\n- full_check: If True, the function also checks for shapes that can be inferred.\n+ model: Model to check. If model is a path, the function checks model\n+ path first. If the model bytes size is larger than 2GB, function\n+ should be called using model path.\n+ full_check: If True, the function also runs shape inference check.\n skip_opset_compatibility_check: If True, the function skips the check for\n opset compatibility.\n \"\"\"\n", "issue": "Refine docs for check_model\nCurrent version:\r\n\r\n> Check the consistency of a model. An exception is raised if the test fails.\r\n\r\nIt would be good if we document the kind of checks done and the type of exception raised so users know what to catch for; as well as clarify that it also runs shape inference when strict is True. (Right now it says `if True, the function checks shapes can be inferred`\uff09\r\n\r\nShould we default `strict` to `True`? @jcwchen \n", "before_files": [{"content": "# Copyright (c) ONNX Project Contributors\n#\n# SPDX-License-Identifier: Apache-2.0\n\"\"\"Graph utilities for checking whether an ONNX proto message is legal.\"\"\"\n\nfrom __future__ import annotations\n\n__all__ = [\n \"check_attribute\",\n \"check_function\",\n \"check_graph\",\n \"check_model\",\n \"check_node\",\n \"check_sparse_tensor\",\n \"check_tensor\",\n \"check_value_info\",\n \"DEFAULT_CONTEXT\",\n \"ValidationError\",\n \"C\",\n \"MAXIMUM_PROTOBUF\",\n]\n\nimport os\nimport sys\nfrom typing import Any, Callable, TypeVar\n\nfrom google.protobuf.message import Message\n\nimport onnx.defs\nimport onnx.onnx_cpp2py_export.checker as C # noqa: N812\nimport onnx.shape_inference\nfrom onnx import (\n IR_VERSION,\n AttributeProto,\n FunctionProto,\n GraphProto,\n ModelProto,\n NodeProto,\n SparseTensorProto,\n TensorProto,\n ValueInfoProto,\n helper,\n)\n\n# Limitation of single protobuf file is 2GB\nMAXIMUM_PROTOBUF = 2000000000\n\n# TODO: This thing where we reserialize the protobuf back into the\n# string, only to deserialize it at the call site, is really goofy.\n# Stop doing that.\n\n\n# NB: Please don't edit this context!\nDEFAULT_CONTEXT = C.CheckerContext()\nDEFAULT_CONTEXT.ir_version = IR_VERSION\n# TODO: Maybe ONNX-ML should also be defaulted?\nDEFAULT_CONTEXT.opset_imports = {\"\": onnx.defs.onnx_opset_version()}\n\n\nFuncType = TypeVar(\"FuncType\", bound=Callable[..., Any])\n\n\ndef _ensure_proto_type(proto: Message, proto_type: type[Message]) -> None:\n if not isinstance(proto, proto_type):\n raise TypeError(\n f\"The proto message needs to be of type '{proto_type.__name__}'\"\n )\n\n\ndef check_value_info(\n value_info: ValueInfoProto, ctx: C.CheckerContext = DEFAULT_CONTEXT\n) -> None:\n _ensure_proto_type(value_info, ValueInfoProto)\n return C.check_value_info(value_info.SerializeToString(), ctx)\n\n\ndef check_tensor(tensor: TensorProto, ctx: C.CheckerContext = DEFAULT_CONTEXT) -> None:\n _ensure_proto_type(tensor, TensorProto)\n return C.check_tensor(tensor.SerializeToString(), ctx)\n\n\ndef check_attribute(\n attr: AttributeProto, ctx: C.CheckerContext = DEFAULT_CONTEXT\n) -> None:\n _ensure_proto_type(attr, AttributeProto)\n return C.check_attribute(attr.SerializeToString(), ctx)\n\n\ndef check_node(node: NodeProto, ctx: C.CheckerContext = DEFAULT_CONTEXT) -> None:\n _ensure_proto_type(node, NodeProto)\n return C.check_node(node.SerializeToString(), ctx)\n\n\ndef check_function(\n function: FunctionProto, ctx: C.CheckerContext | None = None\n) -> None:\n _ensure_proto_type(function, FunctionProto)\n if ctx is None:\n ctx = C.CheckerContext()\n ctx.ir_version = helper.find_min_ir_version_for(\n list(function.opset_import), True\n )\n function_opset_dic = {}\n for domain_version in function.opset_import:\n function_opset_dic[domain_version.domain] = domain_version.version\n ctx.opset_imports = function_opset_dic\n C.check_function(function.SerializeToString(), ctx)\n\n\ndef check_graph(graph: GraphProto, ctx: C.CheckerContext = DEFAULT_CONTEXT) -> None:\n _ensure_proto_type(graph, GraphProto)\n return C.check_graph(graph.SerializeToString(), ctx)\n\n\ndef check_sparse_tensor(\n sparse: SparseTensorProto, ctx: C.CheckerContext = DEFAULT_CONTEXT\n) -> None:\n _ensure_proto_type(sparse, SparseTensorProto)\n C.check_sparse_tensor(sparse.SerializeToString(), ctx)\n\n\ndef check_model(\n model: ModelProto | str | bytes | os.PathLike,\n full_check: bool = False,\n skip_opset_compatibility_check: bool = False,\n) -> None:\n \"\"\"Check the consistency of a model. An exception is raised if the test fails.\n\n Args:\n model: Model to check.\n full_check: If True, the function also checks for shapes that can be inferred.\n skip_opset_compatibility_check: If True, the function skips the check for\n opset compatibility.\n \"\"\"\n # If model is a path instead of ModelProto\n if isinstance(model, (str, os.PathLike)):\n C.check_model_path(os.fspath(model), full_check, skip_opset_compatibility_check)\n else:\n protobuf_string = (\n model if isinstance(model, bytes) else model.SerializeToString()\n )\n # If the protobuf is larger than 2GB,\n # remind users should use the model path to check\n if sys.getsizeof(protobuf_string) > MAXIMUM_PROTOBUF:\n raise ValueError(\n \"This protobuf of onnx model is too large (>2GB). Call check_model with model path instead.\"\n )\n C.check_model(protobuf_string, full_check, skip_opset_compatibility_check)\n\n\nValidationError = C.ValidationError\n", "path": "onnx/checker.py"}], "after_files": [{"content": "# Copyright (c) ONNX Project Contributors\n#\n# SPDX-License-Identifier: Apache-2.0\n\"\"\"Graph utilities for checking whether an ONNX proto message is legal.\"\"\"\n\nfrom __future__ import annotations\n\n__all__ = [\n \"check_attribute\",\n \"check_function\",\n \"check_graph\",\n \"check_model\",\n \"check_node\",\n \"check_sparse_tensor\",\n \"check_tensor\",\n \"check_value_info\",\n \"DEFAULT_CONTEXT\",\n \"ValidationError\",\n \"C\",\n \"MAXIMUM_PROTOBUF\",\n]\n\nimport os\nimport sys\nfrom typing import Any, Callable, TypeVar\n\nfrom google.protobuf.message import Message\n\nimport onnx.defs\nimport onnx.onnx_cpp2py_export.checker as C # noqa: N812\nimport onnx.shape_inference\nfrom onnx import (\n IR_VERSION,\n AttributeProto,\n FunctionProto,\n GraphProto,\n ModelProto,\n NodeProto,\n SparseTensorProto,\n TensorProto,\n ValueInfoProto,\n helper,\n)\n\n# Limitation of single protobuf file is 2GB\nMAXIMUM_PROTOBUF = 2000000000\n\n# TODO: This thing where we reserialize the protobuf back into the\n# string, only to deserialize it at the call site, is really goofy.\n# Stop doing that.\n\n\n# NB: Please don't edit this context!\nDEFAULT_CONTEXT = C.CheckerContext()\nDEFAULT_CONTEXT.ir_version = IR_VERSION\n# TODO: Maybe ONNX-ML should also be defaulted?\nDEFAULT_CONTEXT.opset_imports = {\"\": onnx.defs.onnx_opset_version()}\n\n\nFuncType = TypeVar(\"FuncType\", bound=Callable[..., Any])\n\n\ndef _ensure_proto_type(proto: Message, proto_type: type[Message]) -> None:\n if not isinstance(proto, proto_type):\n raise TypeError(\n f\"The proto message needs to be of type '{proto_type.__name__}'\"\n )\n\n\ndef check_value_info(\n value_info: ValueInfoProto, ctx: C.CheckerContext = DEFAULT_CONTEXT\n) -> None:\n _ensure_proto_type(value_info, ValueInfoProto)\n return C.check_value_info(value_info.SerializeToString(), ctx)\n\n\ndef check_tensor(tensor: TensorProto, ctx: C.CheckerContext = DEFAULT_CONTEXT) -> None:\n _ensure_proto_type(tensor, TensorProto)\n return C.check_tensor(tensor.SerializeToString(), ctx)\n\n\ndef check_attribute(\n attr: AttributeProto, ctx: C.CheckerContext = DEFAULT_CONTEXT\n) -> None:\n _ensure_proto_type(attr, AttributeProto)\n return C.check_attribute(attr.SerializeToString(), ctx)\n\n\ndef check_node(node: NodeProto, ctx: C.CheckerContext = DEFAULT_CONTEXT) -> None:\n _ensure_proto_type(node, NodeProto)\n return C.check_node(node.SerializeToString(), ctx)\n\n\ndef check_function(\n function: FunctionProto, ctx: C.CheckerContext | None = None\n) -> None:\n _ensure_proto_type(function, FunctionProto)\n if ctx is None:\n ctx = C.CheckerContext()\n ctx.ir_version = helper.find_min_ir_version_for(\n list(function.opset_import), True\n )\n function_opset_dic = {}\n for domain_version in function.opset_import:\n function_opset_dic[domain_version.domain] = domain_version.version\n ctx.opset_imports = function_opset_dic\n C.check_function(function.SerializeToString(), ctx)\n\n\ndef check_graph(graph: GraphProto, ctx: C.CheckerContext = DEFAULT_CONTEXT) -> None:\n _ensure_proto_type(graph, GraphProto)\n return C.check_graph(graph.SerializeToString(), ctx)\n\n\ndef check_sparse_tensor(\n sparse: SparseTensorProto, ctx: C.CheckerContext = DEFAULT_CONTEXT\n) -> None:\n _ensure_proto_type(sparse, SparseTensorProto)\n C.check_sparse_tensor(sparse.SerializeToString(), ctx)\n\n\ndef check_model(\n model: ModelProto | str | bytes | os.PathLike,\n full_check: bool = False,\n skip_opset_compatibility_check: bool = False,\n) -> None:\n \"\"\"Check the consistency of a model.\n\n An exception will be raised if the model's ir_version is not set\n properly or is higher than checker's ir_version, or if the model\n has duplicate keys in metadata_props.\n\n If IR version >= 3, the model must specify opset_import.\n If IR version < 3, the model cannot have any opset_import specified.\n\n Args:\n model: Model to check. If model is a path, the function checks model\n path first. If the model bytes size is larger than 2GB, function\n should be called using model path.\n full_check: If True, the function also runs shape inference check.\n skip_opset_compatibility_check: If True, the function skips the check for\n opset compatibility.\n \"\"\"\n # If model is a path instead of ModelProto\n if isinstance(model, (str, os.PathLike)):\n C.check_model_path(os.fspath(model), full_check, skip_opset_compatibility_check)\n else:\n protobuf_string = (\n model if isinstance(model, bytes) else model.SerializeToString()\n )\n # If the protobuf is larger than 2GB,\n # remind users should use the model path to check\n if sys.getsizeof(protobuf_string) > MAXIMUM_PROTOBUF:\n raise ValueError(\n \"This protobuf of onnx model is too large (>2GB). Call check_model with model path instead.\"\n )\n C.check_model(protobuf_string, full_check, skip_opset_compatibility_check)\n\n\nValidationError = C.ValidationError\n", "path": "onnx/checker.py"}]}
| 1,818 | 301 |
gh_patches_debug_15335
|
rasdani/github-patches
|
git_diff
|
google__osv.dev-84
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Switch ecosystem from "" to "OSS-Fuzz" for oss-fuzz sourced bugs
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gcp/api/server.py`
Content:
```
1 # Copyright 2021 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """API server implementation."""
15
16 import argparse
17 from concurrent import futures
18 import functools
19 import logging
20 import os
21 import sys
22 import time
23
24 from google.cloud import ndb
25 import grpc
26
27 import osv
28 import osv_service_v1_pb2
29 import osv_service_v1_pb2_grpc
30
31 _PROJECT = 'oss-vdb'
32 _OSS_FUZZ_TRACKER_URL = 'https://bugs.chromium.org/p/oss-fuzz/issues/detail?id='
33
34 _SHUTDOWN_GRACE_DURATION = 5
35
36 _AUTHORIZATION_HEADER_PREFIX = 'Bearer '
37 _EXPECTED_AUDIENCE = 'https://db.oss-fuzz.com'
38
39 _ndb_client = ndb.Client()
40
41
42 def ndb_context(func):
43 """Wrapper to create an NDB context."""
44
45 @functools.wraps(func)
46 def wrapper(*args, **kwargs):
47 with _ndb_client.context():
48 return func(*args, **kwargs)
49
50 return wrapper
51
52
53 class BaseServicer:
54 """Base servicer."""
55
56 def is_privileged(self, context):
57 """Check whether if the calling client is privileged."""
58 for key, _ in context.invocation_metadata():
59 # If we have this metadata value, it means it passed JWT validation.
60 if key == 'x-endpoint-api-userinfo':
61 return True
62
63 return False
64
65
66 class OSVServicer(osv_service_v1_pb2_grpc.OSVServicer, BaseServicer):
67 """V1 OSV servicer."""
68
69 @ndb_context
70 def GetVulnById(self, request, context):
71 """Return a `Vulnerability` object for a given OSV ID.
72 """
73 bug = osv.Bug.get_by_id(request.id)
74 if not bug or bug.status == osv.BugStatus.UNPROCESSED:
75 context.abort(grpc.StatusCode.NOT_FOUND, 'Bug not found.')
76 return None
77
78 if not bug.public and not self.is_privileged(context):
79 context.abort(grpc.StatusCode.PERMISSION_DENIED, 'Permission denied.')
80 return None
81
82 return bug_to_response(bug)
83
84 @ndb_context
85 def QueryAffected(self, request, context):
86 """Query vulnerabilities for a particular project at a given commit or
87 version."""
88 privileged = self.is_privileged(context)
89 if request.query.HasField('package'):
90 package_name = request.query.package.name
91 ecosystem = request.query.package.ecosystem
92 else:
93 package_name = ''
94 ecosystem = ''
95
96 if request.query.WhichOneof('param') == 'commit':
97 bugs = query_by_commit(
98 package_name,
99 ecosystem,
100 request.query.commit,
101 privileged,
102 to_response=bug_to_response)
103 elif request.query.WhichOneof('param') == 'version':
104 bugs = query_by_version(
105 package_name,
106 ecosystem,
107 request.query.version,
108 privileged,
109 to_response=bug_to_response)
110 else:
111 context.abort(grpc.StatusCode.INVALID_ARGUMENT, 'Invalid query.')
112
113 return osv_service_v1_pb2.VulnerabilityList(vulns=bugs)
114
115 def GetVulnByIdNew(self, request, context):
116 """Return a `Vulnerability` object for a given OSV ID.
117 """
118 return self.GetVulnById(request, context)
119
120 def QueryAffectedNew(self, request, context):
121 """Query vulnerabilities for a particular project at a given commit or
122 version."""
123 return self.QueryAffected(request, context)
124
125
126 def bug_to_response(bug):
127 """Convert a Bug entity to a response object."""
128 return bug.to_vulnerability()
129
130
131 def _get_bugs(bug_ids, to_response=bug_to_response):
132 """Get bugs from bug ids."""
133 bugs = ndb.get_multi([ndb.Key(osv.Bug, bug_id) for bug_id in bug_ids])
134 return [
135 to_response(bug)
136 for bug in bugs
137 if bug and bug.status == osv.BugStatus.PROCESSED
138 ]
139
140
141 def query_by_commit(project,
142 ecosystem,
143 commit,
144 privileged,
145 to_response=bug_to_response):
146 """Query by commit."""
147 query = osv.AffectedCommit.query(osv.AffectedCommit.commit == commit)
148
149 if project:
150 query = query.filter(osv.AffectedCommit.project == project)
151
152 if ecosystem:
153 query = query.filter(osv.AffectedCommit.ecosystem == ecosystem)
154
155 if not privileged:
156 query = query.filter(osv.AffectedCommit.public == True) # pylint: disable=singleton-comparison
157
158 bug_ids = []
159 for affected_commit in query:
160 bug_ids.append(affected_commit.bug_id)
161
162 return _get_bugs(bug_ids, to_response=to_response)
163
164
165 def query_by_tag(project,
166 ecosystem,
167 tag,
168 privileged,
169 to_response=bug_to_response):
170 """Query by tag."""
171 query = osv.Bug.query(osv.Bug.project == project,
172 osv.Bug.ecosystem == ecosystem, osv.Bug.affected == tag)
173
174 if not privileged:
175 query = query.filter(osv.Bug.public == True) # pylint: disable=singleton-comparison
176
177 bugs = []
178 for bug in query:
179 bugs.append(bug)
180
181 return [to_response(bug) for bug in bugs]
182
183
184 def query_by_version(project,
185 ecosystem,
186 version,
187 privileged,
188 to_response=bug_to_response):
189 """Query by (fuzzy) version."""
190 query = osv.Bug.query(osv.Bug.status == osv.BugStatus.PROCESSED,
191 osv.Bug.project == project,
192 osv.Bug.ecosystem == ecosystem,
193 osv.Bug.affected_fuzzy == osv.normalize_tag(version))
194
195 if not privileged:
196 query = query.filter(osv.Bug.public == True) # pylint: disable=singleton-comparison
197
198 bugs = []
199 for bug in query:
200 bugs.append(bug)
201
202 return [to_response(bug) for bug in bugs]
203
204
205 def serve(port):
206 """Configures and runs the bookstore API server."""
207 server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
208 osv_service_v1_pb2_grpc.add_OSVServicer_to_server(OSVServicer(), server)
209 server.add_insecure_port('[::]:{}'.format(port))
210 server.start()
211
212 print('Listening on port {}'.format(port))
213 try:
214 while True:
215 time.sleep(3600)
216 except KeyboardInterrupt:
217 server.stop(_SHUTDOWN_GRACE_DURATION)
218
219
220 def main():
221 """Entrypoint."""
222 logging.basicConfig(stream=sys.stderr)
223 logging.getLogger().setLevel(logging.INFO)
224
225 parser = argparse.ArgumentParser(
226 formatter_class=argparse.RawDescriptionHelpFormatter)
227 parser.add_argument(
228 '--port',
229 type=int,
230 default=None,
231 help='The port to listen on.'
232 'If arg is not set, will listen on the $PORT env var.'
233 'If env var is empty, defaults to 8000.')
234
235 args = parser.parse_args()
236 port = args.port
237 if not port:
238 port = os.environ.get('PORT')
239 if not port:
240 port = 8000
241
242 serve(port)
243
244
245 if __name__ == '__main__':
246 main()
247
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/gcp/api/server.py b/gcp/api/server.py
--- a/gcp/api/server.py
+++ b/gcp/api/server.py
@@ -187,11 +187,14 @@
privileged,
to_response=bug_to_response):
"""Query by (fuzzy) version."""
+
query = osv.Bug.query(osv.Bug.status == osv.BugStatus.PROCESSED,
osv.Bug.project == project,
- osv.Bug.ecosystem == ecosystem,
osv.Bug.affected_fuzzy == osv.normalize_tag(version))
+ if ecosystem:
+ query = query.filter(osv.Bug.ecosystem == ecosystem)
+
if not privileged:
query = query.filter(osv.Bug.public == True) # pylint: disable=singleton-comparison
|
{"golden_diff": "diff --git a/gcp/api/server.py b/gcp/api/server.py\n--- a/gcp/api/server.py\n+++ b/gcp/api/server.py\n@@ -187,11 +187,14 @@\n privileged,\n to_response=bug_to_response):\n \"\"\"Query by (fuzzy) version.\"\"\"\n+\n query = osv.Bug.query(osv.Bug.status == osv.BugStatus.PROCESSED,\n osv.Bug.project == project,\n- osv.Bug.ecosystem == ecosystem,\n osv.Bug.affected_fuzzy == osv.normalize_tag(version))\n \n+ if ecosystem:\n+ query = query.filter(osv.Bug.ecosystem == ecosystem)\n+\n if not privileged:\n query = query.filter(osv.Bug.public == True) # pylint: disable=singleton-comparison\n", "issue": "Switch ecosystem from \"\" to \"OSS-Fuzz\" for oss-fuzz sourced bugs\n\n", "before_files": [{"content": "# Copyright 2021 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"API server implementation.\"\"\"\n\nimport argparse\nfrom concurrent import futures\nimport functools\nimport logging\nimport os\nimport sys\nimport time\n\nfrom google.cloud import ndb\nimport grpc\n\nimport osv\nimport osv_service_v1_pb2\nimport osv_service_v1_pb2_grpc\n\n_PROJECT = 'oss-vdb'\n_OSS_FUZZ_TRACKER_URL = 'https://bugs.chromium.org/p/oss-fuzz/issues/detail?id='\n\n_SHUTDOWN_GRACE_DURATION = 5\n\n_AUTHORIZATION_HEADER_PREFIX = 'Bearer '\n_EXPECTED_AUDIENCE = 'https://db.oss-fuzz.com'\n\n_ndb_client = ndb.Client()\n\n\ndef ndb_context(func):\n \"\"\"Wrapper to create an NDB context.\"\"\"\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n with _ndb_client.context():\n return func(*args, **kwargs)\n\n return wrapper\n\n\nclass BaseServicer:\n \"\"\"Base servicer.\"\"\"\n\n def is_privileged(self, context):\n \"\"\"Check whether if the calling client is privileged.\"\"\"\n for key, _ in context.invocation_metadata():\n # If we have this metadata value, it means it passed JWT validation.\n if key == 'x-endpoint-api-userinfo':\n return True\n\n return False\n\n\nclass OSVServicer(osv_service_v1_pb2_grpc.OSVServicer, BaseServicer):\n \"\"\"V1 OSV servicer.\"\"\"\n\n @ndb_context\n def GetVulnById(self, request, context):\n \"\"\"Return a `Vulnerability` object for a given OSV ID.\n \"\"\"\n bug = osv.Bug.get_by_id(request.id)\n if not bug or bug.status == osv.BugStatus.UNPROCESSED:\n context.abort(grpc.StatusCode.NOT_FOUND, 'Bug not found.')\n return None\n\n if not bug.public and not self.is_privileged(context):\n context.abort(grpc.StatusCode.PERMISSION_DENIED, 'Permission denied.')\n return None\n\n return bug_to_response(bug)\n\n @ndb_context\n def QueryAffected(self, request, context):\n \"\"\"Query vulnerabilities for a particular project at a given commit or\n version.\"\"\"\n privileged = self.is_privileged(context)\n if request.query.HasField('package'):\n package_name = request.query.package.name\n ecosystem = request.query.package.ecosystem\n else:\n package_name = ''\n ecosystem = ''\n\n if request.query.WhichOneof('param') == 'commit':\n bugs = query_by_commit(\n package_name,\n ecosystem,\n request.query.commit,\n privileged,\n to_response=bug_to_response)\n elif request.query.WhichOneof('param') == 'version':\n bugs = query_by_version(\n package_name,\n ecosystem,\n request.query.version,\n privileged,\n to_response=bug_to_response)\n else:\n context.abort(grpc.StatusCode.INVALID_ARGUMENT, 'Invalid query.')\n\n return osv_service_v1_pb2.VulnerabilityList(vulns=bugs)\n\n def GetVulnByIdNew(self, request, context):\n \"\"\"Return a `Vulnerability` object for a given OSV ID.\n \"\"\"\n return self.GetVulnById(request, context)\n\n def QueryAffectedNew(self, request, context):\n \"\"\"Query vulnerabilities for a particular project at a given commit or\n version.\"\"\"\n return self.QueryAffected(request, context)\n\n\ndef bug_to_response(bug):\n \"\"\"Convert a Bug entity to a response object.\"\"\"\n return bug.to_vulnerability()\n\n\ndef _get_bugs(bug_ids, to_response=bug_to_response):\n \"\"\"Get bugs from bug ids.\"\"\"\n bugs = ndb.get_multi([ndb.Key(osv.Bug, bug_id) for bug_id in bug_ids])\n return [\n to_response(bug)\n for bug in bugs\n if bug and bug.status == osv.BugStatus.PROCESSED\n ]\n\n\ndef query_by_commit(project,\n ecosystem,\n commit,\n privileged,\n to_response=bug_to_response):\n \"\"\"Query by commit.\"\"\"\n query = osv.AffectedCommit.query(osv.AffectedCommit.commit == commit)\n\n if project:\n query = query.filter(osv.AffectedCommit.project == project)\n\n if ecosystem:\n query = query.filter(osv.AffectedCommit.ecosystem == ecosystem)\n\n if not privileged:\n query = query.filter(osv.AffectedCommit.public == True) # pylint: disable=singleton-comparison\n\n bug_ids = []\n for affected_commit in query:\n bug_ids.append(affected_commit.bug_id)\n\n return _get_bugs(bug_ids, to_response=to_response)\n\n\ndef query_by_tag(project,\n ecosystem,\n tag,\n privileged,\n to_response=bug_to_response):\n \"\"\"Query by tag.\"\"\"\n query = osv.Bug.query(osv.Bug.project == project,\n osv.Bug.ecosystem == ecosystem, osv.Bug.affected == tag)\n\n if not privileged:\n query = query.filter(osv.Bug.public == True) # pylint: disable=singleton-comparison\n\n bugs = []\n for bug in query:\n bugs.append(bug)\n\n return [to_response(bug) for bug in bugs]\n\n\ndef query_by_version(project,\n ecosystem,\n version,\n privileged,\n to_response=bug_to_response):\n \"\"\"Query by (fuzzy) version.\"\"\"\n query = osv.Bug.query(osv.Bug.status == osv.BugStatus.PROCESSED,\n osv.Bug.project == project,\n osv.Bug.ecosystem == ecosystem,\n osv.Bug.affected_fuzzy == osv.normalize_tag(version))\n\n if not privileged:\n query = query.filter(osv.Bug.public == True) # pylint: disable=singleton-comparison\n\n bugs = []\n for bug in query:\n bugs.append(bug)\n\n return [to_response(bug) for bug in bugs]\n\n\ndef serve(port):\n \"\"\"Configures and runs the bookstore API server.\"\"\"\n server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))\n osv_service_v1_pb2_grpc.add_OSVServicer_to_server(OSVServicer(), server)\n server.add_insecure_port('[::]:{}'.format(port))\n server.start()\n\n print('Listening on port {}'.format(port))\n try:\n while True:\n time.sleep(3600)\n except KeyboardInterrupt:\n server.stop(_SHUTDOWN_GRACE_DURATION)\n\n\ndef main():\n \"\"\"Entrypoint.\"\"\"\n logging.basicConfig(stream=sys.stderr)\n logging.getLogger().setLevel(logging.INFO)\n\n parser = argparse.ArgumentParser(\n formatter_class=argparse.RawDescriptionHelpFormatter)\n parser.add_argument(\n '--port',\n type=int,\n default=None,\n help='The port to listen on.'\n 'If arg is not set, will listen on the $PORT env var.'\n 'If env var is empty, defaults to 8000.')\n\n args = parser.parse_args()\n port = args.port\n if not port:\n port = os.environ.get('PORT')\n if not port:\n port = 8000\n\n serve(port)\n\n\nif __name__ == '__main__':\n main()\n", "path": "gcp/api/server.py"}], "after_files": [{"content": "# Copyright 2021 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"API server implementation.\"\"\"\n\nimport argparse\nfrom concurrent import futures\nimport functools\nimport logging\nimport os\nimport sys\nimport time\n\nfrom google.cloud import ndb\nimport grpc\n\nimport osv\nimport osv_service_v1_pb2\nimport osv_service_v1_pb2_grpc\n\n_PROJECT = 'oss-vdb'\n_OSS_FUZZ_TRACKER_URL = 'https://bugs.chromium.org/p/oss-fuzz/issues/detail?id='\n\n_SHUTDOWN_GRACE_DURATION = 5\n\n_AUTHORIZATION_HEADER_PREFIX = 'Bearer '\n_EXPECTED_AUDIENCE = 'https://db.oss-fuzz.com'\n\n_ndb_client = ndb.Client()\n\n\ndef ndb_context(func):\n \"\"\"Wrapper to create an NDB context.\"\"\"\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n with _ndb_client.context():\n return func(*args, **kwargs)\n\n return wrapper\n\n\nclass BaseServicer:\n \"\"\"Base servicer.\"\"\"\n\n def is_privileged(self, context):\n \"\"\"Check whether if the calling client is privileged.\"\"\"\n for key, _ in context.invocation_metadata():\n # If we have this metadata value, it means it passed JWT validation.\n if key == 'x-endpoint-api-userinfo':\n return True\n\n return False\n\n\nclass OSVServicer(osv_service_v1_pb2_grpc.OSVServicer, BaseServicer):\n \"\"\"V1 OSV servicer.\"\"\"\n\n @ndb_context\n def GetVulnById(self, request, context):\n \"\"\"Return a `Vulnerability` object for a given OSV ID.\n \"\"\"\n bug = osv.Bug.get_by_id(request.id)\n if not bug or bug.status == osv.BugStatus.UNPROCESSED:\n context.abort(grpc.StatusCode.NOT_FOUND, 'Bug not found.')\n return None\n\n if not bug.public and not self.is_privileged(context):\n context.abort(grpc.StatusCode.PERMISSION_DENIED, 'Permission denied.')\n return None\n\n return bug_to_response(bug)\n\n @ndb_context\n def QueryAffected(self, request, context):\n \"\"\"Query vulnerabilities for a particular project at a given commit or\n version.\"\"\"\n privileged = self.is_privileged(context)\n if request.query.HasField('package'):\n package_name = request.query.package.name\n ecosystem = request.query.package.ecosystem\n else:\n package_name = ''\n ecosystem = ''\n\n if request.query.WhichOneof('param') == 'commit':\n bugs = query_by_commit(\n package_name,\n ecosystem,\n request.query.commit,\n privileged,\n to_response=bug_to_response)\n elif request.query.WhichOneof('param') == 'version':\n bugs = query_by_version(\n package_name,\n ecosystem,\n request.query.version,\n privileged,\n to_response=bug_to_response)\n else:\n context.abort(grpc.StatusCode.INVALID_ARGUMENT, 'Invalid query.')\n\n return osv_service_v1_pb2.VulnerabilityList(vulns=bugs)\n\n def GetVulnByIdNew(self, request, context):\n \"\"\"Return a `Vulnerability` object for a given OSV ID.\n \"\"\"\n return self.GetVulnById(request, context)\n\n def QueryAffectedNew(self, request, context):\n \"\"\"Query vulnerabilities for a particular project at a given commit or\n version.\"\"\"\n return self.QueryAffected(request, context)\n\n\ndef bug_to_response(bug):\n \"\"\"Convert a Bug entity to a response object.\"\"\"\n return bug.to_vulnerability()\n\n\ndef _get_bugs(bug_ids, to_response=bug_to_response):\n \"\"\"Get bugs from bug ids.\"\"\"\n bugs = ndb.get_multi([ndb.Key(osv.Bug, bug_id) for bug_id in bug_ids])\n return [\n to_response(bug)\n for bug in bugs\n if bug and bug.status == osv.BugStatus.PROCESSED\n ]\n\n\ndef query_by_commit(project,\n ecosystem,\n commit,\n privileged,\n to_response=bug_to_response):\n \"\"\"Query by commit.\"\"\"\n query = osv.AffectedCommit.query(osv.AffectedCommit.commit == commit)\n\n if project:\n query = query.filter(osv.AffectedCommit.project == project)\n\n if ecosystem:\n query = query.filter(osv.AffectedCommit.ecosystem == ecosystem)\n\n if not privileged:\n query = query.filter(osv.AffectedCommit.public == True) # pylint: disable=singleton-comparison\n\n bug_ids = []\n for affected_commit in query:\n bug_ids.append(affected_commit.bug_id)\n\n return _get_bugs(bug_ids, to_response=to_response)\n\n\ndef query_by_tag(project,\n ecosystem,\n tag,\n privileged,\n to_response=bug_to_response):\n \"\"\"Query by tag.\"\"\"\n query = osv.Bug.query(osv.Bug.project == project,\n osv.Bug.ecosystem == ecosystem, osv.Bug.affected == tag)\n\n if not privileged:\n query = query.filter(osv.Bug.public == True) # pylint: disable=singleton-comparison\n\n bugs = []\n for bug in query:\n bugs.append(bug)\n\n return [to_response(bug) for bug in bugs]\n\n\ndef query_by_version(project,\n ecosystem,\n version,\n privileged,\n to_response=bug_to_response):\n \"\"\"Query by (fuzzy) version.\"\"\"\n\n query = osv.Bug.query(osv.Bug.status == osv.BugStatus.PROCESSED,\n osv.Bug.project == project,\n osv.Bug.affected_fuzzy == osv.normalize_tag(version))\n\n if ecosystem:\n query = query.filter(osv.Bug.ecosystem == ecosystem)\n\n if not privileged:\n query = query.filter(osv.Bug.public == True) # pylint: disable=singleton-comparison\n\n bugs = []\n for bug in query:\n bugs.append(bug)\n\n return [to_response(bug) for bug in bugs]\n\n\ndef serve(port):\n \"\"\"Configures and runs the bookstore API server.\"\"\"\n server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))\n osv_service_v1_pb2_grpc.add_OSVServicer_to_server(OSVServicer(), server)\n server.add_insecure_port('[::]:{}'.format(port))\n server.start()\n\n print('Listening on port {}'.format(port))\n try:\n while True:\n time.sleep(3600)\n except KeyboardInterrupt:\n server.stop(_SHUTDOWN_GRACE_DURATION)\n\n\ndef main():\n \"\"\"Entrypoint.\"\"\"\n logging.basicConfig(stream=sys.stderr)\n logging.getLogger().setLevel(logging.INFO)\n\n parser = argparse.ArgumentParser(\n formatter_class=argparse.RawDescriptionHelpFormatter)\n parser.add_argument(\n '--port',\n type=int,\n default=None,\n help='The port to listen on.'\n 'If arg is not set, will listen on the $PORT env var.'\n 'If env var is empty, defaults to 8000.')\n\n args = parser.parse_args()\n port = args.port\n if not port:\n port = os.environ.get('PORT')\n if not port:\n port = 8000\n\n serve(port)\n\n\nif __name__ == '__main__':\n main()\n", "path": "gcp/api/server.py"}]}
| 2,622 | 181 |
gh_patches_debug_19401
|
rasdani/github-patches
|
git_diff
|
geopandas__geopandas-643
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
GeoDataFrame.to_file fail on bool column
When converting GeoDataFrame with bool column to shp file, got following error
```sh
ValueError: 'bool' is not in list
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `geopandas/io/file.py`
Content:
```
1 import os
2
3 import fiona
4 import numpy as np
5 import six
6
7 from geopandas import GeoDataFrame
8
9 # Adapted from pandas.io.common
10 if six.PY3:
11 from urllib.request import urlopen as _urlopen
12 from urllib.parse import urlparse as parse_url
13 from urllib.parse import uses_relative, uses_netloc, uses_params
14 else:
15 from urllib2 import urlopen as _urlopen
16 from urlparse import urlparse as parse_url
17 from urlparse import uses_relative, uses_netloc, uses_params
18
19 _VALID_URLS = set(uses_relative + uses_netloc + uses_params)
20 _VALID_URLS.discard('')
21
22
23 def _is_url(url):
24 """Check to see if *url* has a valid protocol."""
25 try:
26 return parse_url(url).scheme in _VALID_URLS
27 except:
28 return False
29
30
31 def read_file(filename, **kwargs):
32 """
33 Returns a GeoDataFrame from a file or URL.
34
35 Parameters
36 ----------
37 filename: str
38 Either the absolute or relative path to the file or URL to
39 be opened.
40 **kwargs:
41 Keyword args to be passed to the `open` or `BytesCollection` method
42 in the fiona library when opening the file. For more information on
43 possible keywords, type:
44 ``import fiona; help(fiona.open)``
45
46 Examples
47 --------
48 >>> df = geopandas.read_file("nybb.shp")
49
50 Returns
51 -------
52 geodataframe : GeoDataFrame
53 """
54 bbox = kwargs.pop('bbox', None)
55 if _is_url(filename):
56 req = _urlopen(filename)
57 path_or_bytes = req.read()
58 reader = fiona.BytesCollection
59 else:
60 path_or_bytes = filename
61 reader = fiona.open
62 with reader(path_or_bytes, **kwargs) as f:
63 crs = f.crs
64 if bbox is not None:
65 assert len(bbox) == 4
66 f_filt = f.filter(bbox=bbox)
67 else:
68 f_filt = f
69 gdf = GeoDataFrame.from_features(f_filt, crs=crs)
70 # re-order with column order from metadata, with geometry last
71 columns = list(f.meta["schema"]["properties"]) + ["geometry"]
72 gdf = gdf[columns]
73
74 return gdf
75
76
77 def to_file(df, filename, driver="ESRI Shapefile", schema=None,
78 **kwargs):
79 """
80 Write this GeoDataFrame to an OGR data source
81
82 A dictionary of supported OGR providers is available via:
83 >>> import fiona
84 >>> fiona.supported_drivers
85
86 Parameters
87 ----------
88 df : GeoDataFrame to be written
89 filename : string
90 File path or file handle to write to.
91 driver : string, default 'ESRI Shapefile'
92 The OGR format driver used to write the vector file.
93 schema : dict, default None
94 If specified, the schema dictionary is passed to Fiona to
95 better control how the file is written. If None, GeoPandas
96 will determine the schema based on each column's dtype
97
98 The *kwargs* are passed to fiona.open and can be used to write
99 to multi-layer data, store data within archives (zip files), etc.
100 """
101 if schema is None:
102 schema = infer_schema(df)
103 filename = os.path.abspath(os.path.expanduser(filename))
104 with fiona.drivers():
105 with fiona.open(filename, 'w', driver=driver, crs=df.crs,
106 schema=schema, **kwargs) as colxn:
107 colxn.writerecords(df.iterfeatures())
108
109
110 def infer_schema(df):
111 try:
112 from collections import OrderedDict
113 except ImportError:
114 from ordereddict import OrderedDict
115
116 def convert_type(in_type):
117 if in_type == object:
118 return 'str'
119 out_type = type(np.asscalar(np.zeros(1, in_type))).__name__
120 if out_type == 'long':
121 out_type = 'int'
122 return out_type
123
124 properties = OrderedDict([
125 (col, convert_type(_type)) for col, _type in
126 zip(df.columns, df.dtypes) if col != df._geometry_column_name
127 ])
128
129 geom_type = _common_geom_type(df)
130 if not geom_type:
131 raise ValueError("Geometry column cannot contain mutiple "
132 "geometry types when writing to file.")
133
134 schema = {'geometry': geom_type, 'properties': properties}
135
136 return schema
137
138
139 def _common_geom_type(df):
140 # Need to check geom_types before we write to file...
141 # Some (most?) providers expect a single geometry type:
142 # Point, LineString, or Polygon
143 geom_types = df.geometry.geom_type.unique()
144
145 from os.path import commonprefix # To find longest common prefix
146 geom_type = commonprefix([g[::-1] for g in geom_types if g])[::-1] # Reverse
147 if not geom_type:
148 geom_type = None
149
150 return geom_type
151
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/geopandas/io/file.py b/geopandas/io/file.py
--- a/geopandas/io/file.py
+++ b/geopandas/io/file.py
@@ -113,16 +113,20 @@
except ImportError:
from ordereddict import OrderedDict
- def convert_type(in_type):
+ def convert_type(column, in_type):
if in_type == object:
return 'str'
out_type = type(np.asscalar(np.zeros(1, in_type))).__name__
if out_type == 'long':
out_type = 'int'
+ if out_type == 'bool':
+ raise ValueError('column "{}" is boolean type, '.format(column) +
+ 'which is unsupported in file writing. '
+ 'Consider casting the column to int type.')
return out_type
properties = OrderedDict([
- (col, convert_type(_type)) for col, _type in
+ (col, convert_type(col, _type)) for col, _type in
zip(df.columns, df.dtypes) if col != df._geometry_column_name
])
|
{"golden_diff": "diff --git a/geopandas/io/file.py b/geopandas/io/file.py\n--- a/geopandas/io/file.py\n+++ b/geopandas/io/file.py\n@@ -113,16 +113,20 @@\n except ImportError:\n from ordereddict import OrderedDict\n \n- def convert_type(in_type):\n+ def convert_type(column, in_type):\n if in_type == object:\n return 'str'\n out_type = type(np.asscalar(np.zeros(1, in_type))).__name__\n if out_type == 'long':\n out_type = 'int'\n+ if out_type == 'bool':\n+ raise ValueError('column \"{}\" is boolean type, '.format(column) +\n+ 'which is unsupported in file writing. '\n+ 'Consider casting the column to int type.')\n return out_type\n \n properties = OrderedDict([\n- (col, convert_type(_type)) for col, _type in\n+ (col, convert_type(col, _type)) for col, _type in\n zip(df.columns, df.dtypes) if col != df._geometry_column_name\n ])\n", "issue": "GeoDataFrame.to_file fail on bool column\nWhen converting GeoDataFrame with bool column to shp file, got following error\r\n```sh\r\nValueError: 'bool' is not in list\r\n```\n", "before_files": [{"content": "import os\n\nimport fiona\nimport numpy as np\nimport six\n\nfrom geopandas import GeoDataFrame\n\n# Adapted from pandas.io.common\nif six.PY3:\n from urllib.request import urlopen as _urlopen\n from urllib.parse import urlparse as parse_url\n from urllib.parse import uses_relative, uses_netloc, uses_params\nelse:\n from urllib2 import urlopen as _urlopen\n from urlparse import urlparse as parse_url\n from urlparse import uses_relative, uses_netloc, uses_params\n\n_VALID_URLS = set(uses_relative + uses_netloc + uses_params)\n_VALID_URLS.discard('')\n\n\ndef _is_url(url):\n \"\"\"Check to see if *url* has a valid protocol.\"\"\"\n try:\n return parse_url(url).scheme in _VALID_URLS\n except:\n return False\n\n\ndef read_file(filename, **kwargs):\n \"\"\"\n Returns a GeoDataFrame from a file or URL.\n\n Parameters\n ----------\n filename: str\n Either the absolute or relative path to the file or URL to\n be opened.\n **kwargs:\n Keyword args to be passed to the `open` or `BytesCollection` method\n in the fiona library when opening the file. For more information on\n possible keywords, type:\n ``import fiona; help(fiona.open)``\n\n Examples\n --------\n >>> df = geopandas.read_file(\"nybb.shp\")\n\n Returns\n -------\n geodataframe : GeoDataFrame\n \"\"\"\n bbox = kwargs.pop('bbox', None)\n if _is_url(filename):\n req = _urlopen(filename)\n path_or_bytes = req.read()\n reader = fiona.BytesCollection\n else:\n path_or_bytes = filename\n reader = fiona.open\n with reader(path_or_bytes, **kwargs) as f:\n crs = f.crs\n if bbox is not None:\n assert len(bbox) == 4\n f_filt = f.filter(bbox=bbox)\n else:\n f_filt = f\n gdf = GeoDataFrame.from_features(f_filt, crs=crs)\n # re-order with column order from metadata, with geometry last\n columns = list(f.meta[\"schema\"][\"properties\"]) + [\"geometry\"]\n gdf = gdf[columns]\n\n return gdf\n\n\ndef to_file(df, filename, driver=\"ESRI Shapefile\", schema=None,\n **kwargs):\n \"\"\"\n Write this GeoDataFrame to an OGR data source\n\n A dictionary of supported OGR providers is available via:\n >>> import fiona\n >>> fiona.supported_drivers\n\n Parameters\n ----------\n df : GeoDataFrame to be written\n filename : string\n File path or file handle to write to.\n driver : string, default 'ESRI Shapefile'\n The OGR format driver used to write the vector file.\n schema : dict, default None\n If specified, the schema dictionary is passed to Fiona to\n better control how the file is written. If None, GeoPandas\n will determine the schema based on each column's dtype\n\n The *kwargs* are passed to fiona.open and can be used to write\n to multi-layer data, store data within archives (zip files), etc.\n \"\"\"\n if schema is None:\n schema = infer_schema(df)\n filename = os.path.abspath(os.path.expanduser(filename))\n with fiona.drivers():\n with fiona.open(filename, 'w', driver=driver, crs=df.crs,\n schema=schema, **kwargs) as colxn:\n colxn.writerecords(df.iterfeatures())\n\n\ndef infer_schema(df):\n try:\n from collections import OrderedDict\n except ImportError:\n from ordereddict import OrderedDict\n\n def convert_type(in_type):\n if in_type == object:\n return 'str'\n out_type = type(np.asscalar(np.zeros(1, in_type))).__name__\n if out_type == 'long':\n out_type = 'int'\n return out_type\n\n properties = OrderedDict([\n (col, convert_type(_type)) for col, _type in\n zip(df.columns, df.dtypes) if col != df._geometry_column_name\n ])\n\n geom_type = _common_geom_type(df)\n if not geom_type:\n raise ValueError(\"Geometry column cannot contain mutiple \"\n \"geometry types when writing to file.\")\n\n schema = {'geometry': geom_type, 'properties': properties}\n\n return schema\n\n\ndef _common_geom_type(df):\n # Need to check geom_types before we write to file...\n # Some (most?) providers expect a single geometry type:\n # Point, LineString, or Polygon\n geom_types = df.geometry.geom_type.unique()\n\n from os.path import commonprefix # To find longest common prefix\n geom_type = commonprefix([g[::-1] for g in geom_types if g])[::-1] # Reverse\n if not geom_type:\n geom_type = None\n\n return geom_type\n", "path": "geopandas/io/file.py"}], "after_files": [{"content": "import os\n\nimport fiona\nimport numpy as np\nimport six\n\nfrom geopandas import GeoDataFrame\n\n# Adapted from pandas.io.common\nif six.PY3:\n from urllib.request import urlopen as _urlopen\n from urllib.parse import urlparse as parse_url\n from urllib.parse import uses_relative, uses_netloc, uses_params\nelse:\n from urllib2 import urlopen as _urlopen\n from urlparse import urlparse as parse_url\n from urlparse import uses_relative, uses_netloc, uses_params\n\n_VALID_URLS = set(uses_relative + uses_netloc + uses_params)\n_VALID_URLS.discard('')\n\n\ndef _is_url(url):\n \"\"\"Check to see if *url* has a valid protocol.\"\"\"\n try:\n return parse_url(url).scheme in _VALID_URLS\n except:\n return False\n\n\ndef read_file(filename, **kwargs):\n \"\"\"\n Returns a GeoDataFrame from a file or URL.\n\n Parameters\n ----------\n filename: str\n Either the absolute or relative path to the file or URL to\n be opened.\n **kwargs:\n Keyword args to be passed to the `open` or `BytesCollection` method\n in the fiona library when opening the file. For more information on\n possible keywords, type:\n ``import fiona; help(fiona.open)``\n\n Examples\n --------\n >>> df = geopandas.read_file(\"nybb.shp\")\n\n Returns\n -------\n geodataframe : GeoDataFrame\n \"\"\"\n bbox = kwargs.pop('bbox', None)\n if _is_url(filename):\n req = _urlopen(filename)\n path_or_bytes = req.read()\n reader = fiona.BytesCollection\n else:\n path_or_bytes = filename\n reader = fiona.open\n with reader(path_or_bytes, **kwargs) as f:\n crs = f.crs\n if bbox is not None:\n assert len(bbox) == 4\n f_filt = f.filter(bbox=bbox)\n else:\n f_filt = f\n gdf = GeoDataFrame.from_features(f_filt, crs=crs)\n # re-order with column order from metadata, with geometry last\n columns = list(f.meta[\"schema\"][\"properties\"]) + [\"geometry\"]\n gdf = gdf[columns]\n\n return gdf\n\n\ndef to_file(df, filename, driver=\"ESRI Shapefile\", schema=None,\n **kwargs):\n \"\"\"\n Write this GeoDataFrame to an OGR data source\n\n A dictionary of supported OGR providers is available via:\n >>> import fiona\n >>> fiona.supported_drivers\n\n Parameters\n ----------\n df : GeoDataFrame to be written\n filename : string\n File path or file handle to write to.\n driver : string, default 'ESRI Shapefile'\n The OGR format driver used to write the vector file.\n schema : dict, default None\n If specified, the schema dictionary is passed to Fiona to\n better control how the file is written. If None, GeoPandas\n will determine the schema based on each column's dtype\n\n The *kwargs* are passed to fiona.open and can be used to write\n to multi-layer data, store data within archives (zip files), etc.\n \"\"\"\n if schema is None:\n schema = infer_schema(df)\n filename = os.path.abspath(os.path.expanduser(filename))\n with fiona.drivers():\n with fiona.open(filename, 'w', driver=driver, crs=df.crs,\n schema=schema, **kwargs) as colxn:\n colxn.writerecords(df.iterfeatures())\n\n\ndef infer_schema(df):\n try:\n from collections import OrderedDict\n except ImportError:\n from ordereddict import OrderedDict\n\n def convert_type(column, in_type):\n if in_type == object:\n return 'str'\n out_type = type(np.asscalar(np.zeros(1, in_type))).__name__\n if out_type == 'long':\n out_type = 'int'\n if out_type == 'bool':\n raise ValueError('column \"{}\" is boolean type, '.format(column) +\n 'which is unsupported in file writing. '\n 'Consider casting the column to int type.')\n return out_type\n\n properties = OrderedDict([\n (col, convert_type(col, _type)) for col, _type in\n zip(df.columns, df.dtypes) if col != df._geometry_column_name\n ])\n\n geom_type = _common_geom_type(df)\n if not geom_type:\n raise ValueError(\"Geometry column cannot contain mutiple \"\n \"geometry types when writing to file.\")\n\n schema = {'geometry': geom_type, 'properties': properties}\n\n return schema\n\n\ndef _common_geom_type(df):\n # Need to check geom_types before we write to file...\n # Some (most?) providers expect a single geometry type:\n # Point, LineString, or Polygon\n geom_types = df.geometry.geom_type.unique()\n\n from os.path import commonprefix # To find longest common prefix\n geom_type = commonprefix([g[::-1] for g in geom_types if g])[::-1] # Reverse\n if not geom_type:\n geom_type = None\n\n return geom_type\n", "path": "geopandas/io/file.py"}]}
| 1,732 | 242 |
gh_patches_debug_31489
|
rasdani/github-patches
|
git_diff
|
tensorlayer__TensorLayer-550
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
install error
### New Issue Checklist
- [ ]ImportError: No module named progressbar
### Issue Description
when i use the command pip install -e .,error occurs as
Complete output from command python setup.py egg_info:
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "/home/dega/Downloads/degawong/tensorflow/tensorlayer/setup.py", line 18, in <module>
from tensorlayer import (
File "tensorlayer/__init__.py", line 14, in <module>
from . import files
File "tensorlayer/files.py", line 12, in <module>
import progressbar
ImportError: No module named progressbar
----------------------------------------
Command "python setup.py egg_info" failed with error code 1 in /home/.../tensorflow/tensorlayer/
but if i use import progressbar,it works well and get current screen picture
someone helps me,thanks
### Reproducible Code
- ubuntu 17.10
[INSERT CODE HERE]
```python
pip install -e .
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 import os
3 import codecs
4
5 try:
6 from setuptools import (
7 setup,
8 find_packages
9 )
10
11 except ImportError:
12 from distutils.core import (
13 setup,
14 find_packages
15 )
16
17
18 from tensorlayer import (
19 __contact_emails__,
20 __contact_names__,
21 __description__,
22 __download_url__,
23 __homepage__,
24 __keywords__,
25 __license__,
26 __package_name__,
27 __repository_url__,
28 __version__
29 )
30
31
32 # =================== Reading Readme file as TXT files ===================
33
34 if os.path.exists('README.rst'):
35 # codec is used for consistent encoding
36 long_description = codecs.open(
37 os.path.join(os.path.abspath(os.path.dirname(__file__)), 'README.rst'),
38 'r', 'utf-8'
39 ).read()
40
41 else:
42 long_description = 'See ' + __homepage__
43
44 # ======================= Reading Requirements files as TXT files =======================
45
46 def req_file(filename):
47 with open(filename) as f:
48 content = f.readlines()
49 # you may also want to remove whitespace characters
50 # Example: `\n` at the end of each line
51 return [x.strip() for x in content]
52
53 # ======================= Defining the requirements var =======================
54
55 install_requires = req_file("requirements.txt")
56
57 extras_require = {
58 'tf_cpu': ['tensorflow>=1.8.0,<1.9'],
59 'tf_gpu': ['tensorflow-gpu>=1.8.0,<1.9'],
60 'dev': req_file("requirements_dev.txt"),
61 'doc': req_file("docs/requirements.txt"),
62 'test': req_file("tests/requirements.txt")
63 }
64
65 # Readthedocs requires TF 1.5.0 to build properly
66 if os.environ.get('READTHEDOCS', None) == 'True':
67 install_requires.append("tensorflow==1.5.0")
68
69 # ======================= Define the package setup =======================
70
71 setup(
72 name=__package_name__,
73
74 # Versions should comply with PEP440. For a discussion on single-sourcing
75 # the version across setup.py and the project code, see
76 # https://packaging.python.org/en/latest/single_source_version.html
77 version=__version__,
78
79 description=__description__,
80 long_description=long_description,
81
82 # The project's main homepage.
83 url=__repository_url__,
84 download_url=__download_url__,
85
86 # Author details
87 author=__contact_names__,
88 author_email=__contact_emails__,
89
90 # maintainer Details
91 maintainer=__contact_names__,
92 maintainer_email=__contact_emails__,
93
94 # The licence under which the project is released
95 license=__license__,
96
97 classifiers=[
98 # How mature is this project? Common values are
99 # 1 - Planning
100 # 2 - Pre-Alpha
101 # 3 - Alpha
102 # 4 - Beta
103 # 5 - Production/Stable
104 # 6 - Mature
105 # 7 - Inactive
106 'Development Status :: 5 - Production/Stable',
107
108 # Indicate who your project is intended for
109 'Intended Audience :: Developers',
110 'Intended Audience :: Science/Research',
111 'Intended Audience :: Information Technology',
112
113 # Indicate what your project relates to
114 'Topic :: Scientific/Engineering',
115 'Topic :: Scientific/Engineering :: Image Recognition',
116 'Topic :: Scientific/Engineering :: Artificial Intelligence',
117 'Topic :: Software Development :: Libraries',
118 'Topic :: Utilities',
119
120 # Pick your license as you wish (should match "license" above)
121 'License :: OSI Approved :: Apache Software License',
122
123 # Specify the Python versions you support here. In particular, ensure
124 # that you indicate whether you support Python 2, Python 3 or both.
125 'Programming Language :: Python :: 2',
126 'Programming Language :: Python :: 2.7',
127 'Programming Language :: Python :: 3',
128 'Programming Language :: Python :: 3.4',
129 'Programming Language :: Python :: 3.5',
130 'Programming Language :: Python :: 3.6',
131
132 # Additionnal Settings
133 'Environment :: Console',
134 'Natural Language :: English',
135 'Operating System :: OS Independent',
136 ],
137
138 keywords=__keywords__,
139 packages=find_packages(),
140
141 # List run-time dependencies here. These will be installed by pip when
142 # your project is installed. For an analysis of "install_requires" vs pip's
143 # requirements files see:
144 # https://packaging.python.org/en/latest/requirements.html
145 install_requires=install_requires,
146
147 # List additional groups of dependencies here (e.g. development
148 # dependencies). You can install these using the following syntax,
149 # $ pip install -e .[test]
150 extras_require=extras_require,
151 scripts=[
152 'tl',
153 ],
154 platform=['any'],
155 )
156
```
Path: `tensorlayer/__init__.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 """Deep learning and Reinforcement learning library for Researchers and Engineers"""
4 from __future__ import absolute_import
5
6 import pkg_resources
7 installed_packages = [d for d in pkg_resources.working_set]
8
9 TF_is_installed = False
10 TL_is_installed = False
11
12 for package in installed_packages:
13 if 'tensorflow' in package.project_name:
14 TF_is_installed = True
15 if 'tensorlayer' in package.project_name and 'site-packages' in package.location:
16 TL_is_installed = True
17
18 if TF_is_installed: # The tensorlayer package is installed
19 import tensorflow
20
21 from . import activation
22 from . import cost
23 from . import files
24 from . import iterate
25 from . import layers
26 from . import models
27 from . import utils
28 from . import visualize
29 from . import prepro
30 from . import nlp
31 from . import rein
32 from . import distributed
33
34 # alias
35 act = activation
36 vis = visualize
37
38 global_flag = {}
39 global_dict = {}
40
41 elif TL_is_installed:
42 install_instr = "Please make sure you install a recent enough version of TensorFlow."
43 raise ImportError("__init__.py : Could not import TensorFlow. {}".format(install_instr))
44
45 # Use the following formating: (major, minor, patch, prerelease)
46 VERSION = (1, 8, 5, 'rc2')
47 __shortversion__ = '.'.join(map(str, VERSION[:3]))
48 __version__ = '.'.join(map(str, VERSION[:3])) + "".join(VERSION[3:])
49
50 __package_name__ = 'tensorlayer'
51 __contact_names__ = 'TensorLayer Contributors'
52 __contact_emails__ = '[email protected]'
53 __homepage__ = 'http://tensorlayer.readthedocs.io/en/latest/'
54 __repository_url__ = 'https://github.com/tensorlayer/tensorlayer'
55 __download_url__ = 'https://github.com/tensorlayer/tensorlayer'
56 __description__ = 'Reinforcement Learning and Deep Learning Library for Researcher and Engineer.'
57 __license__ = 'apache'
58 __keywords__ = 'deep learning, machine learning, computer vision, nlp, supervised learning, unsupervised learning, reinforcement learning, tensorflow'
59
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -57,6 +57,7 @@
extras_require = {
'tf_cpu': ['tensorflow>=1.8.0,<1.9'],
'tf_gpu': ['tensorflow-gpu>=1.8.0,<1.9'],
+ 'db': req_file("requirements_db.txt"),
'dev': req_file("requirements_dev.txt"),
'doc': req_file("docs/requirements.txt"),
'test': req_file("tests/requirements.txt")
diff --git a/tensorlayer/__init__.py b/tensorlayer/__init__.py
--- a/tensorlayer/__init__.py
+++ b/tensorlayer/__init__.py
@@ -3,19 +3,7 @@
"""Deep learning and Reinforcement learning library for Researchers and Engineers"""
from __future__ import absolute_import
-import pkg_resources
-installed_packages = [d for d in pkg_resources.working_set]
-
-TF_is_installed = False
-TL_is_installed = False
-
-for package in installed_packages:
- if 'tensorflow' in package.project_name:
- TF_is_installed = True
- if 'tensorlayer' in package.project_name and 'site-packages' in package.location:
- TL_is_installed = True
-
-if TF_is_installed: # The tensorlayer package is installed
+try:
import tensorflow
from . import activation
@@ -38,9 +26,14 @@
global_flag = {}
global_dict = {}
-elif TL_is_installed:
- install_instr = "Please make sure you install a recent enough version of TensorFlow."
- raise ImportError("__init__.py : Could not import TensorFlow. {}".format(install_instr))
+except Exception as e:
+
+ import pkg_resources
+ installed_packages = [d for d in pkg_resources.working_set]
+
+ for package in installed_packages:
+ if 'tensorlayer' in package.project_name and 'site-packages' in package.location:
+ raise ImportError("__init__.py : Could not import TensorLayer.\nError: {}".format(e))
# Use the following formating: (major, minor, patch, prerelease)
VERSION = (1, 8, 5, 'rc2')
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -57,6 +57,7 @@\n extras_require = {\n 'tf_cpu': ['tensorflow>=1.8.0,<1.9'],\n 'tf_gpu': ['tensorflow-gpu>=1.8.0,<1.9'],\n+\t'db': req_file(\"requirements_db.txt\"),\n \t'dev': req_file(\"requirements_dev.txt\"),\n \t'doc': req_file(\"docs/requirements.txt\"),\n \t'test': req_file(\"tests/requirements.txt\")\ndiff --git a/tensorlayer/__init__.py b/tensorlayer/__init__.py\n--- a/tensorlayer/__init__.py\n+++ b/tensorlayer/__init__.py\n@@ -3,19 +3,7 @@\n \"\"\"Deep learning and Reinforcement learning library for Researchers and Engineers\"\"\"\n from __future__ import absolute_import\n \n-import pkg_resources\n-installed_packages = [d for d in pkg_resources.working_set]\n-\n-TF_is_installed = False\n-TL_is_installed = False\n-\n-for package in installed_packages:\n- if 'tensorflow' in package.project_name:\n- TF_is_installed = True\n- if 'tensorlayer' in package.project_name and 'site-packages' in package.location:\n- TL_is_installed = True\n-\n-if TF_is_installed: # The tensorlayer package is installed\n+try:\n import tensorflow\n \n from . import activation\n@@ -38,9 +26,14 @@\n global_flag = {}\n global_dict = {}\n \n-elif TL_is_installed:\n- install_instr = \"Please make sure you install a recent enough version of TensorFlow.\"\n- raise ImportError(\"__init__.py : Could not import TensorFlow. {}\".format(install_instr))\n+except Exception as e:\n+\n+ import pkg_resources\n+ installed_packages = [d for d in pkg_resources.working_set]\n+\n+ for package in installed_packages:\n+ if 'tensorlayer' in package.project_name and 'site-packages' in package.location:\n+ raise ImportError(\"__init__.py : Could not import TensorLayer.\\nError: {}\".format(e))\n \n # Use the following formating: (major, minor, patch, prerelease)\n VERSION = (1, 8, 5, 'rc2')\n", "issue": "install error\n### New Issue Checklist\r\n\r\n- [ ]ImportError: No module named progressbar\r\n\r\n### Issue Description\r\n\r\n when i use the command pip install -e .,error occurs as \r\n Complete output from command python setup.py egg_info:\r\n Traceback (most recent call last):\r\n File \"<string>\", line 1, in <module>\r\n File \"/home/dega/Downloads/degawong/tensorflow/tensorlayer/setup.py\", line 18, in <module>\r\n from tensorlayer import (\r\n File \"tensorlayer/__init__.py\", line 14, in <module>\r\n from . import files\r\n File \"tensorlayer/files.py\", line 12, in <module>\r\n import progressbar\r\n ImportError: No module named progressbar\r\n \r\n ----------------------------------------\r\nCommand \"python setup.py egg_info\" failed with error code 1 in /home/.../tensorflow/tensorlayer/\r\n\r\nbut if i use import progressbar,it works well and get current screen picture\r\nsomeone helps me,thanks\r\n\r\n### Reproducible Code\r\n\r\n- ubuntu 17.10\r\n\r\n[INSERT CODE HERE]\r\n\r\n```python\r\npip install -e .\r\n\r\n\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\nimport os\nimport codecs\n\ntry:\n from setuptools import (\n setup,\n find_packages\n )\n\nexcept ImportError:\n from distutils.core import (\n setup,\n find_packages\n )\n\n\nfrom tensorlayer import (\n __contact_emails__,\n __contact_names__,\n __description__,\n __download_url__,\n __homepage__,\n __keywords__,\n __license__,\n __package_name__,\n __repository_url__,\n __version__\n)\n\n\n# =================== Reading Readme file as TXT files ===================\n\nif os.path.exists('README.rst'):\n # codec is used for consistent encoding\n long_description = codecs.open(\n os.path.join(os.path.abspath(os.path.dirname(__file__)), 'README.rst'),\n 'r', 'utf-8'\n ).read()\n\nelse:\n long_description = 'See ' + __homepage__\n\n# ======================= Reading Requirements files as TXT files =======================\n\ndef req_file(filename):\n with open(filename) as f:\n content = f.readlines()\n # you may also want to remove whitespace characters\n # Example: `\\n` at the end of each line\n return [x.strip() for x in content]\n\n# ======================= Defining the requirements var =======================\n\ninstall_requires = req_file(\"requirements.txt\")\n\nextras_require = {\n 'tf_cpu': ['tensorflow>=1.8.0,<1.9'],\n 'tf_gpu': ['tensorflow-gpu>=1.8.0,<1.9'],\n\t'dev': req_file(\"requirements_dev.txt\"),\n\t'doc': req_file(\"docs/requirements.txt\"),\n\t'test': req_file(\"tests/requirements.txt\")\n}\n\n# Readthedocs requires TF 1.5.0 to build properly\nif os.environ.get('READTHEDOCS', None) == 'True':\n install_requires.append(\"tensorflow==1.5.0\")\n\n# ======================= Define the package setup =======================\n\nsetup(\n name=__package_name__,\n\n # Versions should comply with PEP440. For a discussion on single-sourcing\n # the version across setup.py and the project code, see\n # https://packaging.python.org/en/latest/single_source_version.html\n version=__version__,\n\n description=__description__,\n long_description=long_description,\n\n # The project's main homepage.\n url=__repository_url__,\n download_url=__download_url__,\n\n # Author details\n author=__contact_names__,\n author_email=__contact_emails__,\n\n # maintainer Details\n maintainer=__contact_names__,\n maintainer_email=__contact_emails__,\n\n # The licence under which the project is released\n license=__license__,\n\n classifiers=[\n # How mature is this project? Common values are\n # 1 - Planning\n # 2 - Pre-Alpha\n # 3 - Alpha\n # 4 - Beta\n # 5 - Production/Stable\n # 6 - Mature\n # 7 - Inactive\n 'Development Status :: 5 - Production/Stable',\n\n # Indicate who your project is intended for\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'Intended Audience :: Information Technology',\n\n # Indicate what your project relates to\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Image Recognition',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Software Development :: Libraries',\n 'Topic :: Utilities',\n\n # Pick your license as you wish (should match \"license\" above)\n 'License :: OSI Approved :: Apache Software License',\n\n # Specify the Python versions you support here. In particular, ensure\n # that you indicate whether you support Python 2, Python 3 or both.\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n\n # Additionnal Settings\n 'Environment :: Console',\n 'Natural Language :: English',\n 'Operating System :: OS Independent',\n ],\n\n keywords=__keywords__,\n packages=find_packages(),\n\n # List run-time dependencies here. These will be installed by pip when\n # your project is installed. For an analysis of \"install_requires\" vs pip's\n # requirements files see:\n # https://packaging.python.org/en/latest/requirements.html\n install_requires=install_requires,\n\n # List additional groups of dependencies here (e.g. development\n # dependencies). You can install these using the following syntax,\n # $ pip install -e .[test]\n extras_require=extras_require,\n scripts=[\n 'tl',\n ],\n platform=['any'],\n)\n", "path": "setup.py"}, {"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"Deep learning and Reinforcement learning library for Researchers and Engineers\"\"\"\nfrom __future__ import absolute_import\n\nimport pkg_resources\ninstalled_packages = [d for d in pkg_resources.working_set]\n\nTF_is_installed = False\nTL_is_installed = False\n\nfor package in installed_packages:\n if 'tensorflow' in package.project_name:\n TF_is_installed = True\n if 'tensorlayer' in package.project_name and 'site-packages' in package.location:\n TL_is_installed = True\n\nif TF_is_installed: # The tensorlayer package is installed\n import tensorflow\n\n from . import activation\n from . import cost\n from . import files\n from . import iterate\n from . import layers\n from . import models\n from . import utils\n from . import visualize\n from . import prepro\n from . import nlp\n from . import rein\n from . import distributed\n\n # alias\n act = activation\n vis = visualize\n\n global_flag = {}\n global_dict = {}\n\nelif TL_is_installed:\n install_instr = \"Please make sure you install a recent enough version of TensorFlow.\"\n raise ImportError(\"__init__.py : Could not import TensorFlow. {}\".format(install_instr))\n\n# Use the following formating: (major, minor, patch, prerelease)\nVERSION = (1, 8, 5, 'rc2')\n__shortversion__ = '.'.join(map(str, VERSION[:3]))\n__version__ = '.'.join(map(str, VERSION[:3])) + \"\".join(VERSION[3:])\n\n__package_name__ = 'tensorlayer'\n__contact_names__ = 'TensorLayer Contributors'\n__contact_emails__ = '[email protected]'\n__homepage__ = 'http://tensorlayer.readthedocs.io/en/latest/'\n__repository_url__ = 'https://github.com/tensorlayer/tensorlayer'\n__download_url__ = 'https://github.com/tensorlayer/tensorlayer'\n__description__ = 'Reinforcement Learning and Deep Learning Library for Researcher and Engineer.'\n__license__ = 'apache'\n__keywords__ = 'deep learning, machine learning, computer vision, nlp, supervised learning, unsupervised learning, reinforcement learning, tensorflow'\n", "path": "tensorlayer/__init__.py"}], "after_files": [{"content": "#!/usr/bin/env python\nimport os\nimport codecs\n\ntry:\n from setuptools import (\n setup,\n find_packages\n )\n\nexcept ImportError:\n from distutils.core import (\n setup,\n find_packages\n )\n\n\nfrom tensorlayer import (\n __contact_emails__,\n __contact_names__,\n __description__,\n __download_url__,\n __homepage__,\n __keywords__,\n __license__,\n __package_name__,\n __repository_url__,\n __version__\n)\n\n\n# =================== Reading Readme file as TXT files ===================\n\nif os.path.exists('README.rst'):\n # codec is used for consistent encoding\n long_description = codecs.open(\n os.path.join(os.path.abspath(os.path.dirname(__file__)), 'README.rst'),\n 'r', 'utf-8'\n ).read()\n\nelse:\n long_description = 'See ' + __homepage__\n\n# ======================= Reading Requirements files as TXT files =======================\n\ndef req_file(filename):\n with open(filename) as f:\n content = f.readlines()\n # you may also want to remove whitespace characters\n # Example: `\\n` at the end of each line\n return [x.strip() for x in content]\n\n# ======================= Defining the requirements var =======================\n\ninstall_requires = req_file(\"requirements.txt\")\n\nextras_require = {\n 'tf_cpu': ['tensorflow>=1.8.0,<1.9'],\n 'tf_gpu': ['tensorflow-gpu>=1.8.0,<1.9'],\n\t'db': req_file(\"requirements_db.txt\"),\n\t'dev': req_file(\"requirements_dev.txt\"),\n\t'doc': req_file(\"docs/requirements.txt\"),\n\t'test': req_file(\"tests/requirements.txt\")\n}\n\n# Readthedocs requires TF 1.5.0 to build properly\nif os.environ.get('READTHEDOCS', None) == 'True':\n install_requires.append(\"tensorflow==1.5.0\")\n\n# ======================= Define the package setup =======================\n\nsetup(\n name=__package_name__,\n\n # Versions should comply with PEP440. For a discussion on single-sourcing\n # the version across setup.py and the project code, see\n # https://packaging.python.org/en/latest/single_source_version.html\n version=__version__,\n\n description=__description__,\n long_description=long_description,\n\n # The project's main homepage.\n url=__repository_url__,\n download_url=__download_url__,\n\n # Author details\n author=__contact_names__,\n author_email=__contact_emails__,\n\n # maintainer Details\n maintainer=__contact_names__,\n maintainer_email=__contact_emails__,\n\n # The licence under which the project is released\n license=__license__,\n\n classifiers=[\n # How mature is this project? Common values are\n # 1 - Planning\n # 2 - Pre-Alpha\n # 3 - Alpha\n # 4 - Beta\n # 5 - Production/Stable\n # 6 - Mature\n # 7 - Inactive\n 'Development Status :: 5 - Production/Stable',\n\n # Indicate who your project is intended for\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'Intended Audience :: Information Technology',\n\n # Indicate what your project relates to\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Image Recognition',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Software Development :: Libraries',\n 'Topic :: Utilities',\n\n # Pick your license as you wish (should match \"license\" above)\n 'License :: OSI Approved :: Apache Software License',\n\n # Specify the Python versions you support here. In particular, ensure\n # that you indicate whether you support Python 2, Python 3 or both.\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n\n # Additionnal Settings\n 'Environment :: Console',\n 'Natural Language :: English',\n 'Operating System :: OS Independent',\n ],\n\n keywords=__keywords__,\n packages=find_packages(),\n\n # List run-time dependencies here. These will be installed by pip when\n # your project is installed. For an analysis of \"install_requires\" vs pip's\n # requirements files see:\n # https://packaging.python.org/en/latest/requirements.html\n install_requires=install_requires,\n\n # List additional groups of dependencies here (e.g. development\n # dependencies). You can install these using the following syntax,\n # $ pip install -e .[test]\n extras_require=extras_require,\n scripts=[\n 'tl',\n ],\n platform=['any'],\n)\n", "path": "setup.py"}, {"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"Deep learning and Reinforcement learning library for Researchers and Engineers\"\"\"\nfrom __future__ import absolute_import\n\ntry:\n import tensorflow\n\n from . import activation\n from . import cost\n from . import files\n from . import iterate\n from . import layers\n from . import models\n from . import utils\n from . import visualize\n from . import prepro\n from . import nlp\n from . import rein\n from . import distributed\n\n # alias\n act = activation\n vis = visualize\n\n global_flag = {}\n global_dict = {}\n\nexcept Exception as e:\n\n import pkg_resources\n installed_packages = [d for d in pkg_resources.working_set]\n\n for package in installed_packages:\n if 'tensorlayer' in package.project_name and 'site-packages' in package.location:\n raise ImportError(\"__init__.py : Could not import TensorLayer.\\nError: {}\".format(e))\n\n# Use the following formating: (major, minor, patch, prerelease)\nVERSION = (1, 8, 5, 'rc2')\n__shortversion__ = '.'.join(map(str, VERSION[:3]))\n__version__ = '.'.join(map(str, VERSION[:3])) + \"\".join(VERSION[3:])\n\n__package_name__ = 'tensorlayer'\n__contact_names__ = 'TensorLayer Contributors'\n__contact_emails__ = '[email protected]'\n__homepage__ = 'http://tensorlayer.readthedocs.io/en/latest/'\n__repository_url__ = 'https://github.com/tensorlayer/tensorlayer'\n__download_url__ = 'https://github.com/tensorlayer/tensorlayer'\n__description__ = 'Reinforcement Learning and Deep Learning Library for Researcher and Engineer.'\n__license__ = 'apache'\n__keywords__ = 'deep learning, machine learning, computer vision, nlp, supervised learning, unsupervised learning, reinforcement learning, tensorflow'\n", "path": "tensorlayer/__init__.py"}]}
| 2,538 | 493 |
gh_patches_debug_10215
|
rasdani/github-patches
|
git_diff
|
pfnet__pytorch-pfn-extras-150
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add test for `LogWriterSaveFunc`
Add tests check if the output of `LogWriterSaveFunc` follows the specified format.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pytorch_pfn_extras/training/extensions/log_report.py`
Content:
```
1 import json
2
3 from pytorch_pfn_extras import reporting
4 from pytorch_pfn_extras.training import extension
5 from pytorch_pfn_extras.training import trigger as trigger_module
6
7 try:
8 import pandas
9
10 _pandas_available = True
11 except ImportError:
12 _pandas_available = False
13
14
15 class LogWriterSaveFunc:
16
17 def __init__(self, format, append):
18 self._format = format
19 self._append = append
20
21 def __call__(self, target, file_o):
22 if self._format == 'json':
23 if self._append:
24 raise ValueError(
25 'LogReport does not support json format with append mode.')
26 log = json.dumps(target, indent=4)
27 elif self._format == 'json-lines':
28 if self._append:
29 target = target[-1]
30 log = '\n'.join([json.dumps(x) for x in target])
31 elif self._format == 'yaml':
32 if self._append:
33 target = [target[-1]]
34 import yaml
35 log = yaml.dump(target)
36 else:
37 raise ValueError('Unknown format: {}'.format(self._format))
38 file_o.write(bytes(log.encode('ascii')))
39
40
41 class LogReport(extension.Extension):
42
43 """__init__(\
44 keys=None, trigger=(1, 'epoch'), postprocess=None, filename='log', writer=None)
45
46 An extension to output the accumulated results to a log file.
47
48 This extension accumulates the observations of the manager to
49 :class:`~pytorch_pfn_extras.DictSummary` at a regular interval specified
50 by a supplied trigger, and writes them into a log file in JSON format.
51
52 There are two triggers to handle this extension. One is the trigger to
53 invoke this extension, which is used to handle the timing of accumulating
54 the results. It is set to ``1, 'iteration'`` by default. The other is the
55 trigger to determine when to emit the result. When this trigger returns
56 True, this extension appends the summary of accumulated values to the list
57 of past summaries, and writes the list to the log file. Then, this
58 extension makes a new fresh summary object which is used until the next
59 time that the trigger fires.
60
61 It also adds some entries to each result dictionary.
62
63 - ``'epoch'`` and ``'iteration'`` are the epoch and iteration counts at the
64 output, respectively.
65 - ``'elapsed_time'`` is the elapsed time in seconds since the training
66 begins. The value is taken from :attr:`ExtensionsManager.elapsed_time`.
67
68 Args:
69 keys (iterable of strs): Keys of values to accumulate. If this is None,
70 all the values are accumulated and output to the log file.
71 trigger: Trigger that decides when to aggregate the result and output
72 the values. This is distinct from the trigger of this extension
73 itself. If it is a tuple in the form ``<int>, 'epoch'`` or
74 ``<int>, 'iteration'``, it is passed to :class:`IntervalTrigger`.
75 postprocess: Callback to postprocess the result dictionaries. Each
76 result dictionary is passed to this callback on the output. This
77 callback can modify the result dictionaries, which are used to
78 output to the log file.
79 filename (str): Name of the log file under the output directory. It can
80 be a format string: the last result dictionary is passed for the
81 formatting. For example, users can use '{iteration}' to separate
82 the log files for different iterations. If the log name is None, it
83 does not output the log to any file.
84 For historical reasons ``log_name`` is also accepted as an alias
85 of this argument.
86 writer (writer object, optional): must be callable.
87 object to dump the log to. If specified, it needs to have a correct
88 `savefun` defined. The writer can override the save location in
89 the :class:`pytorch_pfn_extras.training.ExtensionsManager` object
90
91 """
92
93 def __init__(self, keys=None, trigger=(1, 'epoch'), postprocess=None,
94 filename=None, append=False, format=None, **kwargs):
95 self._keys = keys
96 self._trigger = trigger_module.get_trigger(trigger)
97 self._postprocess = postprocess
98 self._log = []
99 # When using a writer, it needs to have a savefun defined
100 # to deal with a string.
101 self._writer = kwargs.get('writer', None)
102
103 log_name = kwargs.get('log_name', 'log')
104 if filename is None:
105 filename = log_name
106 del log_name # avoid accidental use
107 self._log_name = filename
108
109 if format is None and filename is not None:
110 if filename.endswith('.jsonl'):
111 format = 'json-lines'
112 elif filename.endswith('.yaml'):
113 format = 'yaml'
114 else:
115 format = 'json'
116
117 self._append = append
118 self._format = format
119 self._init_summary()
120
121 def __call__(self, manager):
122 # accumulate the observations
123 keys = self._keys
124 observation = manager.observation
125 summary = self._summary
126
127 if keys is None:
128 summary.add(observation)
129 else:
130 summary.add({k: observation[k] for k in keys if k in observation})
131
132 writer = manager.writer if self._writer is None else self._writer
133
134 if manager.is_before_training or self._trigger(manager):
135 # output the result
136 stats = self._summary.compute_mean()
137 stats_cpu = {}
138 for name, value in stats.items():
139 stats_cpu[name] = float(value) # copy to CPU
140
141 stats_cpu['epoch'] = manager.epoch
142 stats_cpu['iteration'] = manager.iteration
143 stats_cpu['elapsed_time'] = manager.elapsed_time
144
145 if self._postprocess is not None:
146 self._postprocess(stats_cpu)
147
148 self._log.append(stats_cpu)
149
150 # write to the log file
151 if self._log_name is not None:
152 log_name = self._log_name.format(**stats_cpu)
153 out = manager.out
154 savefun = LogWriterSaveFunc(self._format, self._append)
155 writer(log_name, out, self._log,
156 savefun=savefun, append=self._append)
157
158 # reset the summary for the next output
159 self._init_summary()
160
161 @property
162 def log(self):
163 """The current list of observation dictionaries."""
164 return self._log
165
166 def state_dict(self):
167 state = {}
168 if hasattr(self._trigger, 'state_dict'):
169 state['_trigger'] = self._trigger.state_dict()
170
171 try:
172 state['_summary'] = self._summary.state_dict()
173 except KeyError:
174 pass
175 state['_log'] = json.dumps(self._log)
176 return state
177
178 def load_state_dict(self, to_load):
179 if hasattr(self._trigger, 'load_state_dict'):
180 self._trigger.load_state_dict(to_load['_trigger'])
181 self._summary.load_state_dict(to_load['_summary'])
182 self._log = json.loads(to_load['_log'])
183
184 def _init_summary(self):
185 self._summary = reporting.DictSummary()
186
187 def to_dataframe(self):
188 if not _pandas_available:
189 raise ImportError(
190 "Need to install pandas to use `to_dataframe` method."
191 )
192 return pandas.DataFrame(self._log)
193
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pytorch_pfn_extras/training/extensions/log_report.py b/pytorch_pfn_extras/training/extensions/log_report.py
--- a/pytorch_pfn_extras/training/extensions/log_report.py
+++ b/pytorch_pfn_extras/training/extensions/log_report.py
@@ -26,8 +26,9 @@
log = json.dumps(target, indent=4)
elif self._format == 'json-lines':
if self._append:
- target = target[-1]
- log = '\n'.join([json.dumps(x) for x in target])
+ target = [target[-1]]
+ # Add a new line at the end for subsequent appends
+ log = '\n'.join([json.dumps(x) for x in target]) + '\n'
elif self._format == 'yaml':
if self._append:
target = [target[-1]]
|
{"golden_diff": "diff --git a/pytorch_pfn_extras/training/extensions/log_report.py b/pytorch_pfn_extras/training/extensions/log_report.py\n--- a/pytorch_pfn_extras/training/extensions/log_report.py\n+++ b/pytorch_pfn_extras/training/extensions/log_report.py\n@@ -26,8 +26,9 @@\n log = json.dumps(target, indent=4)\n elif self._format == 'json-lines':\n if self._append:\n- target = target[-1]\n- log = '\\n'.join([json.dumps(x) for x in target])\n+ target = [target[-1]]\n+ # Add a new line at the end for subsequent appends\n+ log = '\\n'.join([json.dumps(x) for x in target]) + '\\n'\n elif self._format == 'yaml':\n if self._append:\n target = [target[-1]]\n", "issue": "Add test for `LogWriterSaveFunc`\nAdd tests check if the output of `LogWriterSaveFunc` follows the specified format.\n", "before_files": [{"content": "import json\n\nfrom pytorch_pfn_extras import reporting\nfrom pytorch_pfn_extras.training import extension\nfrom pytorch_pfn_extras.training import trigger as trigger_module\n\ntry:\n import pandas\n\n _pandas_available = True\nexcept ImportError:\n _pandas_available = False\n\n\nclass LogWriterSaveFunc:\n\n def __init__(self, format, append):\n self._format = format\n self._append = append\n\n def __call__(self, target, file_o):\n if self._format == 'json':\n if self._append:\n raise ValueError(\n 'LogReport does not support json format with append mode.')\n log = json.dumps(target, indent=4)\n elif self._format == 'json-lines':\n if self._append:\n target = target[-1]\n log = '\\n'.join([json.dumps(x) for x in target])\n elif self._format == 'yaml':\n if self._append:\n target = [target[-1]]\n import yaml\n log = yaml.dump(target)\n else:\n raise ValueError('Unknown format: {}'.format(self._format))\n file_o.write(bytes(log.encode('ascii')))\n\n\nclass LogReport(extension.Extension):\n\n \"\"\"__init__(\\\nkeys=None, trigger=(1, 'epoch'), postprocess=None, filename='log', writer=None)\n\n An extension to output the accumulated results to a log file.\n\n This extension accumulates the observations of the manager to\n :class:`~pytorch_pfn_extras.DictSummary` at a regular interval specified\n by a supplied trigger, and writes them into a log file in JSON format.\n\n There are two triggers to handle this extension. One is the trigger to\n invoke this extension, which is used to handle the timing of accumulating\n the results. It is set to ``1, 'iteration'`` by default. The other is the\n trigger to determine when to emit the result. When this trigger returns\n True, this extension appends the summary of accumulated values to the list\n of past summaries, and writes the list to the log file. Then, this\n extension makes a new fresh summary object which is used until the next\n time that the trigger fires.\n\n It also adds some entries to each result dictionary.\n\n - ``'epoch'`` and ``'iteration'`` are the epoch and iteration counts at the\n output, respectively.\n - ``'elapsed_time'`` is the elapsed time in seconds since the training\n begins. The value is taken from :attr:`ExtensionsManager.elapsed_time`.\n\n Args:\n keys (iterable of strs): Keys of values to accumulate. If this is None,\n all the values are accumulated and output to the log file.\n trigger: Trigger that decides when to aggregate the result and output\n the values. This is distinct from the trigger of this extension\n itself. If it is a tuple in the form ``<int>, 'epoch'`` or\n ``<int>, 'iteration'``, it is passed to :class:`IntervalTrigger`.\n postprocess: Callback to postprocess the result dictionaries. Each\n result dictionary is passed to this callback on the output. This\n callback can modify the result dictionaries, which are used to\n output to the log file.\n filename (str): Name of the log file under the output directory. It can\n be a format string: the last result dictionary is passed for the\n formatting. For example, users can use '{iteration}' to separate\n the log files for different iterations. If the log name is None, it\n does not output the log to any file.\n For historical reasons ``log_name`` is also accepted as an alias\n of this argument.\n writer (writer object, optional): must be callable.\n object to dump the log to. If specified, it needs to have a correct\n `savefun` defined. The writer can override the save location in\n the :class:`pytorch_pfn_extras.training.ExtensionsManager` object\n\n \"\"\"\n\n def __init__(self, keys=None, trigger=(1, 'epoch'), postprocess=None,\n filename=None, append=False, format=None, **kwargs):\n self._keys = keys\n self._trigger = trigger_module.get_trigger(trigger)\n self._postprocess = postprocess\n self._log = []\n # When using a writer, it needs to have a savefun defined\n # to deal with a string.\n self._writer = kwargs.get('writer', None)\n\n log_name = kwargs.get('log_name', 'log')\n if filename is None:\n filename = log_name\n del log_name # avoid accidental use\n self._log_name = filename\n\n if format is None and filename is not None:\n if filename.endswith('.jsonl'):\n format = 'json-lines'\n elif filename.endswith('.yaml'):\n format = 'yaml'\n else:\n format = 'json'\n\n self._append = append\n self._format = format\n self._init_summary()\n\n def __call__(self, manager):\n # accumulate the observations\n keys = self._keys\n observation = manager.observation\n summary = self._summary\n\n if keys is None:\n summary.add(observation)\n else:\n summary.add({k: observation[k] for k in keys if k in observation})\n\n writer = manager.writer if self._writer is None else self._writer\n\n if manager.is_before_training or self._trigger(manager):\n # output the result\n stats = self._summary.compute_mean()\n stats_cpu = {}\n for name, value in stats.items():\n stats_cpu[name] = float(value) # copy to CPU\n\n stats_cpu['epoch'] = manager.epoch\n stats_cpu['iteration'] = manager.iteration\n stats_cpu['elapsed_time'] = manager.elapsed_time\n\n if self._postprocess is not None:\n self._postprocess(stats_cpu)\n\n self._log.append(stats_cpu)\n\n # write to the log file\n if self._log_name is not None:\n log_name = self._log_name.format(**stats_cpu)\n out = manager.out\n savefun = LogWriterSaveFunc(self._format, self._append)\n writer(log_name, out, self._log,\n savefun=savefun, append=self._append)\n\n # reset the summary for the next output\n self._init_summary()\n\n @property\n def log(self):\n \"\"\"The current list of observation dictionaries.\"\"\"\n return self._log\n\n def state_dict(self):\n state = {}\n if hasattr(self._trigger, 'state_dict'):\n state['_trigger'] = self._trigger.state_dict()\n\n try:\n state['_summary'] = self._summary.state_dict()\n except KeyError:\n pass\n state['_log'] = json.dumps(self._log)\n return state\n\n def load_state_dict(self, to_load):\n if hasattr(self._trigger, 'load_state_dict'):\n self._trigger.load_state_dict(to_load['_trigger'])\n self._summary.load_state_dict(to_load['_summary'])\n self._log = json.loads(to_load['_log'])\n\n def _init_summary(self):\n self._summary = reporting.DictSummary()\n\n def to_dataframe(self):\n if not _pandas_available:\n raise ImportError(\n \"Need to install pandas to use `to_dataframe` method.\"\n )\n return pandas.DataFrame(self._log)\n", "path": "pytorch_pfn_extras/training/extensions/log_report.py"}], "after_files": [{"content": "import json\n\nfrom pytorch_pfn_extras import reporting\nfrom pytorch_pfn_extras.training import extension\nfrom pytorch_pfn_extras.training import trigger as trigger_module\n\ntry:\n import pandas\n\n _pandas_available = True\nexcept ImportError:\n _pandas_available = False\n\n\nclass LogWriterSaveFunc:\n\n def __init__(self, format, append):\n self._format = format\n self._append = append\n\n def __call__(self, target, file_o):\n if self._format == 'json':\n if self._append:\n raise ValueError(\n 'LogReport does not support json format with append mode.')\n log = json.dumps(target, indent=4)\n elif self._format == 'json-lines':\n if self._append:\n target = [target[-1]]\n # Add a new line at the end for subsequent appends\n log = '\\n'.join([json.dumps(x) for x in target]) + '\\n'\n elif self._format == 'yaml':\n if self._append:\n target = [target[-1]]\n import yaml\n log = yaml.dump(target)\n else:\n raise ValueError('Unknown format: {}'.format(self._format))\n file_o.write(bytes(log.encode('ascii')))\n\n\nclass LogReport(extension.Extension):\n\n \"\"\"__init__(\\\nkeys=None, trigger=(1, 'epoch'), postprocess=None, filename='log', writer=None)\n\n An extension to output the accumulated results to a log file.\n\n This extension accumulates the observations of the manager to\n :class:`~pytorch_pfn_extras.DictSummary` at a regular interval specified\n by a supplied trigger, and writes them into a log file in JSON format.\n\n There are two triggers to handle this extension. One is the trigger to\n invoke this extension, which is used to handle the timing of accumulating\n the results. It is set to ``1, 'iteration'`` by default. The other is the\n trigger to determine when to emit the result. When this trigger returns\n True, this extension appends the summary of accumulated values to the list\n of past summaries, and writes the list to the log file. Then, this\n extension makes a new fresh summary object which is used until the next\n time that the trigger fires.\n\n It also adds some entries to each result dictionary.\n\n - ``'epoch'`` and ``'iteration'`` are the epoch and iteration counts at the\n output, respectively.\n - ``'elapsed_time'`` is the elapsed time in seconds since the training\n begins. The value is taken from :attr:`ExtensionsManager.elapsed_time`.\n\n Args:\n keys (iterable of strs): Keys of values to accumulate. If this is None,\n all the values are accumulated and output to the log file.\n trigger: Trigger that decides when to aggregate the result and output\n the values. This is distinct from the trigger of this extension\n itself. If it is a tuple in the form ``<int>, 'epoch'`` or\n ``<int>, 'iteration'``, it is passed to :class:`IntervalTrigger`.\n postprocess: Callback to postprocess the result dictionaries. Each\n result dictionary is passed to this callback on the output. This\n callback can modify the result dictionaries, which are used to\n output to the log file.\n filename (str): Name of the log file under the output directory. It can\n be a format string: the last result dictionary is passed for the\n formatting. For example, users can use '{iteration}' to separate\n the log files for different iterations. If the log name is None, it\n does not output the log to any file.\n For historical reasons ``log_name`` is also accepted as an alias\n of this argument.\n writer (writer object, optional): must be callable.\n object to dump the log to. If specified, it needs to have a correct\n `savefun` defined. The writer can override the save location in\n the :class:`pytorch_pfn_extras.training.ExtensionsManager` object\n\n \"\"\"\n\n def __init__(self, keys=None, trigger=(1, 'epoch'), postprocess=None,\n filename=None, append=False, format=None, **kwargs):\n self._keys = keys\n self._trigger = trigger_module.get_trigger(trigger)\n self._postprocess = postprocess\n self._log = []\n # When using a writer, it needs to have a savefun defined\n # to deal with a string.\n self._writer = kwargs.get('writer', None)\n\n log_name = kwargs.get('log_name', 'log')\n if filename is None:\n filename = log_name\n del log_name # avoid accidental use\n self._log_name = filename\n\n if format is None and filename is not None:\n if filename.endswith('.jsonl'):\n format = 'json-lines'\n elif filename.endswith('.yaml'):\n format = 'yaml'\n else:\n format = 'json'\n\n self._append = append\n self._format = format\n self._init_summary()\n\n def __call__(self, manager):\n # accumulate the observations\n keys = self._keys\n observation = manager.observation\n summary = self._summary\n\n if keys is None:\n summary.add(observation)\n else:\n summary.add({k: observation[k] for k in keys if k in observation})\n\n writer = manager.writer if self._writer is None else self._writer\n\n if manager.is_before_training or self._trigger(manager):\n # output the result\n stats = self._summary.compute_mean()\n stats_cpu = {}\n for name, value in stats.items():\n stats_cpu[name] = float(value) # copy to CPU\n\n stats_cpu['epoch'] = manager.epoch\n stats_cpu['iteration'] = manager.iteration\n stats_cpu['elapsed_time'] = manager.elapsed_time\n\n if self._postprocess is not None:\n self._postprocess(stats_cpu)\n\n self._log.append(stats_cpu)\n\n # write to the log file\n if self._log_name is not None:\n log_name = self._log_name.format(**stats_cpu)\n out = manager.out\n savefun = LogWriterSaveFunc(self._format, self._append)\n writer(log_name, out, self._log,\n savefun=savefun, append=self._append)\n\n # reset the summary for the next output\n self._init_summary()\n\n @property\n def log(self):\n \"\"\"The current list of observation dictionaries.\"\"\"\n return self._log\n\n def state_dict(self):\n state = {}\n if hasattr(self._trigger, 'state_dict'):\n state['_trigger'] = self._trigger.state_dict()\n\n try:\n state['_summary'] = self._summary.state_dict()\n except KeyError:\n pass\n state['_log'] = json.dumps(self._log)\n return state\n\n def load_state_dict(self, to_load):\n if hasattr(self._trigger, 'load_state_dict'):\n self._trigger.load_state_dict(to_load['_trigger'])\n self._summary.load_state_dict(to_load['_summary'])\n self._log = json.loads(to_load['_log'])\n\n def _init_summary(self):\n self._summary = reporting.DictSummary()\n\n def to_dataframe(self):\n if not _pandas_available:\n raise ImportError(\n \"Need to install pandas to use `to_dataframe` method.\"\n )\n return pandas.DataFrame(self._log)\n", "path": "pytorch_pfn_extras/training/extensions/log_report.py"}]}
| 2,368 | 194 |
gh_patches_debug_26663
|
rasdani/github-patches
|
git_diff
|
plone__Products.CMFPlone-350
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Old-style profile with JS/CSS imported but not reliably used within rendered pages after a while
Situation: Plone 5.0a3 buildout with
[email protected]:onkopedia/zopyx.existdb.git
('dexterity' branch).
I created a new Plone site and added 'zopyx.existdb' as add-on.
As expected the JS/CSS resources show up in the resource registry control panels.
For debugging purposes I added an alert() to the local.js file of the package.
After the some operations in the control panel and some content management work in Plone UI the
zopyx.existdb resources are no longer loaded - they do not longer show up in the HTML markup and of course the alert() is no longer trigger for each page load.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `Products/CMFPlone/resources/viewlets/__init__.py`
Content:
```
1
2 from Products.ResourceRegistries.interfaces.registries import IResourceRegistry
3 from Products.ResourceRegistries.interfaces.registries import ICSSRegistry
4 from Products.ResourceRegistries.interfaces.registries import IKSSRegistry
5 from Products.ResourceRegistries.interfaces.registries import IJSRegistry
6 from Products.ResourceRegistries.interfaces.registries import ICookedFile
7 from Products.ResourceRegistries.interfaces.registries import IResourceProvider
8 from Products.ResourceRegistries.interfaces.viewletmanagers import IHtmlHeadScripts
9 from Products.ResourceRegistries.interfaces.viewletmanagers import IHtmlHeadStyles
10 from Products.ResourceRegistries.interfaces.settings import IResourceRegistriesSettings
11
```
Path: `Products/CMFPlone/controlpanel/browser/resourceregistry.py`
Content:
```
1 from datetime import datetime
2 from Products.CMFPlone.interfaces import IBundleRegistry
3 from Products.CMFPlone.interfaces import IResourceRegistry
4 from Products.CMFPlone.interfaces.resources import OVERRIDE_RESOURCE_DIRECTORY_NAME # noqa
5 from Products.CMFPlone.resources.browser.configjs import RequireJsView
6 from StringIO import StringIO
7 from plone.memoize.view import memoize
8 from plone.registry.interfaces import IRegistry
9 from plone.resource.interfaces import IResourceDirectory
10 from urlparse import urlparse
11 from zExceptions import NotFound
12 from zope.component import getUtility
13 import json
14
15
16 class JSONEncoder(json.JSONEncoder):
17 def default(self, obj):
18 if hasattr(obj, 'isoformat'):
19 return obj.isoformat()
20 else:
21 return json.JSONEncoder.default(self, obj)
22
23
24 def recordsToDict(record):
25 data = {}
26 for name in record.__schema__.names():
27 data[name] = getattr(record, name)
28 return data
29
30
31 def updateRecordFromDict(record, data):
32 for name in record.__schema__.names():
33 if name in ['last_compilation']:
34 continue
35 if name in data:
36 # almost all string data needs to be str, not unicode
37 val = data[name]
38 if isinstance(val, unicode):
39 val = val.encode('utf-8')
40 if isinstance(val, list):
41 newval = []
42 for item in val:
43 if isinstance(item, unicode):
44 item = item.encode('utf-8')
45 newval.append(item)
46 val = newval
47 setattr(record, name, val)
48
49
50 class OverrideFolderManager(object):
51
52 def __init__(self, context):
53 self.context = context
54 persistent_directory = getUtility(IResourceDirectory, name="persistent") # noqa
55 if OVERRIDE_RESOURCE_DIRECTORY_NAME not in persistent_directory:
56 persistent_directory.makeDirectory(OVERRIDE_RESOURCE_DIRECTORY_NAME) # noqa
57 self.container = persistent_directory[OVERRIDE_RESOURCE_DIRECTORY_NAME]
58
59 def save_file(self, filepath, data):
60 resource_name, resource_filepath = filepath.split('/', 1)
61 if resource_name not in self.container:
62 self.container.makeDirectory(resource_name)
63 folder = self.container[resource_name]
64 fi = StringIO(data)
65 folder.writeFile(resource_filepath, fi)
66 return folder[resource_filepath]
67
68 def delete_file(self, filepath):
69 resource_name, resource_filepath = filepath.split('/', 1)
70
71 if resource_name not in self.container:
72 return
73 folder = self.container[resource_name]
74 try:
75 fi = folder[resource_filepath]
76 except NotFound:
77 return
78 parent = self.get_parent(fi)
79 del parent[fi.getId()]
80 if filepath not in self.container:
81 return
82 folder = self.container[resource_name]
83 try:
84 fi = folder[resource_filepath]
85 except NotFound:
86 return
87 parent = self.get_parent(fi)
88 del parent[fi.getId()]
89
90 def get_parent(self, item):
91 path = '/'.join(item.getPhysicalPath()[:-1])
92 return self.context.restrictedTraverse(path)
93
94 def list_dir(self, container):
95 if hasattr(container, 'listDirectory'):
96 return container.listDirectory()
97 else:
98 return container.objectIds()
99
100
101 class ResourceRegistryControlPanelView(RequireJsView):
102
103 def __call__(self):
104 req = self.request
105 if req.REQUEST_METHOD == 'POST':
106 action = req.get('action', '')
107 method = action.replace('-', '_')
108 if hasattr(self, method):
109 return getattr(self, method)()
110 else:
111 return json.dumps({
112 'success': False,
113 'msg': 'Invalid action: ' + action
114 })
115 else:
116 return self.index()
117
118 @property
119 @memoize
120 def registry(self):
121 return getUtility(IRegistry)
122
123 def update_registry_collection(self, itype, prefix, newdata):
124 rdata = self.registry.collectionOfInterface(itype, prefix=prefix)
125 for key, data in newdata.items():
126 if key not in rdata:
127 record = rdata.add(key)
128 else:
129 record = rdata[key]
130 updateRecordFromDict(record, data)
131 # remove missing ones
132 for key in set(rdata.keys()) - set(newdata.keys()):
133 del rdata[key]
134
135 def save_development_mode(self):
136 if self.request.form.get('value', '').lower() == 'true':
137 self.registry['plone.resources.development'] = True
138 else:
139 self.registry['plone.resources.development'] = False
140 return json.dumps({
141 'success': True
142 })
143
144 def save_registry(self):
145 req = self.request
146
147 self.update_registry_collection(
148 IResourceRegistry, "plone.resources",
149 json.loads(req.get('resources')))
150 self.update_registry_collection(
151 IBundleRegistry, "plone.bundles",
152 json.loads(req.get('bundles')))
153
154 return json.dumps({
155 'success': True
156 })
157
158 def save_file(self):
159 req = self.request
160 resource_path = req.form.get('filepath').split('++plone++')[-1]
161 overrides = OverrideFolderManager(self.context)
162 overrides.save_file(resource_path, req.form['data'])
163 return json.dumps({
164 'success': True
165 })
166
167 def delete_file(self):
168 req = self.request
169 resource_path = req.form.get('filepath').split('++plone++')[-1]
170 overrides = OverrideFolderManager(self.context)
171 overrides.delete_file(resource_path)
172
173 return json.dumps({
174 'success': True
175 })
176
177 def get_bundles(self):
178 return self.registry.collectionOfInterface(
179 IBundleRegistry, prefix="plone.bundles")
180
181 def get_resources(self):
182 return self.registry.collectionOfInterface(
183 IResourceRegistry, prefix="plone.resources")
184
185 def less_build_config(self):
186 site_url = self.context.portal_url()
187 bundles = self.get_bundles()
188 bundle = self.request.get('bundle', None)
189 resources = self.get_resources()
190 less_files = []
191 if bundle and bundle in bundles:
192 bundle_obj = bundles[bundle]
193 for resource in bundle_obj.resources:
194 if resource in resources:
195 for css in resources[resource].css:
196 url = urlparse(css)
197 if url.netloc == '':
198 # Local
199 src = "%s/%s" % (site_url, css)
200 else:
201 src = "%s" % (css)
202
203 extension = url.path.split('.')[-1]
204 if extension == 'less':
205 less_files.append(src)
206 return json.dumps({
207 'less': less_files,
208 })
209
210 def js_build_config(self):
211 (baseUrl, paths, shims) = self.get_requirejs_config()
212 bundles = self.get_bundles()
213 resources = self.get_resources()
214
215 bundle = self.request.get('bundle', None)
216 includes = []
217 if bundle and bundle in bundles:
218 bundle_obj = bundles[bundle]
219 for resource_name in bundle_obj.resources:
220 # need to check if this resource has a js file
221 # it could just be a css resource
222 try:
223 resource = resources[resource_name]
224 if resource.js:
225 includes.append(resource_name)
226 except KeyError:
227 # skip if missing
228 pass
229 return json.dumps({
230 'include': includes,
231 'shims': shims,
232 'paths': paths
233 })
234
235 def save_js_build(self):
236 overrides = OverrideFolderManager(self.context)
237 req = self.request
238 filepath = 'static/%s-compiled.js' % req.form['bundle']
239 overrides.save_file(filepath, req.form['data'])
240 bundle = self.get_bundles().get(req.form['bundle'])
241 if bundle:
242 bundle.last_compilation = datetime.now()
243 return json.dumps({
244 'success': True,
245 'filepath': '++plone++' + filepath
246 })
247
248 def save_less_build(self):
249 overrides = OverrideFolderManager(self.context)
250 req = self.request
251 filepath = 'static/%s-compiled.css' % req.form['bundle']
252 data = '\n'.join([req.form[k] for k in req.form.keys()
253 if k.startswith('data-')])
254 overrides.save_file(filepath, data)
255 bundle = self.get_bundles().get(req.form['bundle'])
256 if bundle:
257 bundle.last_compilation = datetime.now()
258 return json.dumps({
259 'success': True,
260 'filepath': '++plone++' + filepath
261 })
262
263 def save_less_variables(self):
264 data = {}
265 for key, val in json.loads(self.request.form.get('data')).items():
266 # need to convert to str: unicode
267 data[key.encode('utf8')] = val
268 self.registry['plone.lessvariables'] = data
269 return json.dumps({
270 'success': True
271 })
272
273 def save_pattern_options(self):
274 data = {}
275 for key, val in json.loads(self.request.form.get('data')).items():
276 # need to convert to str: unicode
277 data[key.encode('utf8')] = val
278 self.registry['plone.patternoptions'] = data
279 return json.dumps({
280 'success': True
281 })
282
283 def get_overrides(self):
284 overrides = OverrideFolderManager(self.context)
285
286 def _read_folder(folder):
287 files = []
288 for filename in folder.listDirectory():
289 item = folder[filename]
290 if folder.isDirectory(filename):
291 files.extend(_read_folder(item))
292 else:
293 files.append(item)
294 return files
295 files = _read_folder(overrides.container)
296 results = []
297 site_path = self.context.getPhysicalPath()
298 for fi in files:
299 path = fi.getPhysicalPath()
300 rel_path = path[len(site_path) + 2:]
301 results.append('++plone++%s/%s' % (
302 rel_path[0], '/'.join(rel_path[1:])))
303 return results
304
305 def config(self):
306 base_url = self.context.absolute_url()
307 resources = self.get_resources()
308
309 try:
310 less_url = self.registry['plone.resources.lessc']
311 except KeyError:
312 less_url = '++plone++static/components/less/dist/less-1.7.4.min.js'
313 try:
314 rjs_url = resources['rjs'].js
315 except KeyError:
316 rjs_url = '++plone++static/components/r.js/dist/r.js'
317
318 data = {
319 'development': self.registry['plone.resources.development'],
320 'lessvariables': self.registry['plone.lessvariables'],
321 'resources': {},
322 'bundles': {},
323 'javascripts': {},
324 'css': {},
325 'baseUrl': base_url,
326 'manageUrl': '%s/@@resourceregistry-controlpanel' % base_url,
327 'lessUrl': '%s/%s' % (base_url, less_url),
328 'lessConfigUrl': '%s/less-variables.js' % base_url,
329 'rjsUrl': rjs_url,
330 'patternoptions': self.registry['plone.patternoptions']
331 }
332 bundles = self.get_bundles()
333 for key, resource in resources.items():
334 data['resources'][key] = recordsToDict(resource)
335 for key, bundle in bundles.items():
336 data['bundles'][key] = recordsToDict(bundle)
337 data['overrides'] = self.get_overrides()
338 return json.dumps(data, cls=JSONEncoder)
339
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/Products/CMFPlone/controlpanel/browser/resourceregistry.py b/Products/CMFPlone/controlpanel/browser/resourceregistry.py
--- a/Products/CMFPlone/controlpanel/browser/resourceregistry.py
+++ b/Products/CMFPlone/controlpanel/browser/resourceregistry.py
@@ -240,6 +240,7 @@
bundle = self.get_bundles().get(req.form['bundle'])
if bundle:
bundle.last_compilation = datetime.now()
+ bundle.jscompilation = '++plone++{}'.format(filepath)
return json.dumps({
'success': True,
'filepath': '++plone++' + filepath
@@ -255,6 +256,7 @@
bundle = self.get_bundles().get(req.form['bundle'])
if bundle:
bundle.last_compilation = datetime.now()
+ bundle.csscompilation = '++plone++{}'.format(filepath)
return json.dumps({
'success': True,
'filepath': '++plone++' + filepath
diff --git a/Products/CMFPlone/resources/viewlets/__init__.py b/Products/CMFPlone/resources/viewlets/__init__.py
--- a/Products/CMFPlone/resources/viewlets/__init__.py
+++ b/Products/CMFPlone/resources/viewlets/__init__.py
@@ -1,4 +1,3 @@
-
from Products.ResourceRegistries.interfaces.registries import IResourceRegistry
from Products.ResourceRegistries.interfaces.registries import ICSSRegistry
from Products.ResourceRegistries.interfaces.registries import IKSSRegistry
|
{"golden_diff": "diff --git a/Products/CMFPlone/controlpanel/browser/resourceregistry.py b/Products/CMFPlone/controlpanel/browser/resourceregistry.py\n--- a/Products/CMFPlone/controlpanel/browser/resourceregistry.py\n+++ b/Products/CMFPlone/controlpanel/browser/resourceregistry.py\n@@ -240,6 +240,7 @@\n bundle = self.get_bundles().get(req.form['bundle'])\n if bundle:\n bundle.last_compilation = datetime.now()\n+ bundle.jscompilation = '++plone++{}'.format(filepath)\n return json.dumps({\n 'success': True,\n 'filepath': '++plone++' + filepath\n@@ -255,6 +256,7 @@\n bundle = self.get_bundles().get(req.form['bundle'])\n if bundle:\n bundle.last_compilation = datetime.now()\n+ bundle.csscompilation = '++plone++{}'.format(filepath)\n return json.dumps({\n 'success': True,\n 'filepath': '++plone++' + filepath\ndiff --git a/Products/CMFPlone/resources/viewlets/__init__.py b/Products/CMFPlone/resources/viewlets/__init__.py\n--- a/Products/CMFPlone/resources/viewlets/__init__.py\n+++ b/Products/CMFPlone/resources/viewlets/__init__.py\n@@ -1,4 +1,3 @@\n-\n from Products.ResourceRegistries.interfaces.registries import IResourceRegistry\n from Products.ResourceRegistries.interfaces.registries import ICSSRegistry\n from Products.ResourceRegistries.interfaces.registries import IKSSRegistry\n", "issue": "Old-style profile with JS/CSS imported but not reliably used within rendered pages after a while\nSituation: Plone 5.0a3 buildout with \n\[email protected]:onkopedia/zopyx.existdb.git \n\n('dexterity' branch).\n\nI created a new Plone site and added 'zopyx.existdb' as add-on.\n\nAs expected the JS/CSS resources show up in the resource registry control panels.\n\nFor debugging purposes I added an alert() to the local.js file of the package.\n\nAfter the some operations in the control panel and some content management work in Plone UI the \nzopyx.existdb resources are no longer loaded - they do not longer show up in the HTML markup and of course the alert() is no longer trigger for each page load.\n\n", "before_files": [{"content": "\nfrom Products.ResourceRegistries.interfaces.registries import IResourceRegistry\nfrom Products.ResourceRegistries.interfaces.registries import ICSSRegistry\nfrom Products.ResourceRegistries.interfaces.registries import IKSSRegistry\nfrom Products.ResourceRegistries.interfaces.registries import IJSRegistry\nfrom Products.ResourceRegistries.interfaces.registries import ICookedFile\nfrom Products.ResourceRegistries.interfaces.registries import IResourceProvider\nfrom Products.ResourceRegistries.interfaces.viewletmanagers import IHtmlHeadScripts\nfrom Products.ResourceRegistries.interfaces.viewletmanagers import IHtmlHeadStyles\nfrom Products.ResourceRegistries.interfaces.settings import IResourceRegistriesSettings\n", "path": "Products/CMFPlone/resources/viewlets/__init__.py"}, {"content": "from datetime import datetime\nfrom Products.CMFPlone.interfaces import IBundleRegistry\nfrom Products.CMFPlone.interfaces import IResourceRegistry\nfrom Products.CMFPlone.interfaces.resources import OVERRIDE_RESOURCE_DIRECTORY_NAME # noqa\nfrom Products.CMFPlone.resources.browser.configjs import RequireJsView\nfrom StringIO import StringIO\nfrom plone.memoize.view import memoize\nfrom plone.registry.interfaces import IRegistry\nfrom plone.resource.interfaces import IResourceDirectory\nfrom urlparse import urlparse\nfrom zExceptions import NotFound\nfrom zope.component import getUtility\nimport json\n\n\nclass JSONEncoder(json.JSONEncoder):\n def default(self, obj):\n if hasattr(obj, 'isoformat'):\n return obj.isoformat()\n else:\n return json.JSONEncoder.default(self, obj)\n\n\ndef recordsToDict(record):\n data = {}\n for name in record.__schema__.names():\n data[name] = getattr(record, name)\n return data\n\n\ndef updateRecordFromDict(record, data):\n for name in record.__schema__.names():\n if name in ['last_compilation']:\n continue\n if name in data:\n # almost all string data needs to be str, not unicode\n val = data[name]\n if isinstance(val, unicode):\n val = val.encode('utf-8')\n if isinstance(val, list):\n newval = []\n for item in val:\n if isinstance(item, unicode):\n item = item.encode('utf-8')\n newval.append(item)\n val = newval\n setattr(record, name, val)\n\n\nclass OverrideFolderManager(object):\n\n def __init__(self, context):\n self.context = context\n persistent_directory = getUtility(IResourceDirectory, name=\"persistent\") # noqa\n if OVERRIDE_RESOURCE_DIRECTORY_NAME not in persistent_directory:\n persistent_directory.makeDirectory(OVERRIDE_RESOURCE_DIRECTORY_NAME) # noqa\n self.container = persistent_directory[OVERRIDE_RESOURCE_DIRECTORY_NAME]\n\n def save_file(self, filepath, data):\n resource_name, resource_filepath = filepath.split('/', 1)\n if resource_name not in self.container:\n self.container.makeDirectory(resource_name)\n folder = self.container[resource_name]\n fi = StringIO(data)\n folder.writeFile(resource_filepath, fi)\n return folder[resource_filepath]\n\n def delete_file(self, filepath):\n resource_name, resource_filepath = filepath.split('/', 1)\n\n if resource_name not in self.container:\n return\n folder = self.container[resource_name]\n try:\n fi = folder[resource_filepath]\n except NotFound:\n return\n parent = self.get_parent(fi)\n del parent[fi.getId()]\n if filepath not in self.container:\n return\n folder = self.container[resource_name]\n try:\n fi = folder[resource_filepath]\n except NotFound:\n return\n parent = self.get_parent(fi)\n del parent[fi.getId()]\n\n def get_parent(self, item):\n path = '/'.join(item.getPhysicalPath()[:-1])\n return self.context.restrictedTraverse(path)\n\n def list_dir(self, container):\n if hasattr(container, 'listDirectory'):\n return container.listDirectory()\n else:\n return container.objectIds()\n\n\nclass ResourceRegistryControlPanelView(RequireJsView):\n\n def __call__(self):\n req = self.request\n if req.REQUEST_METHOD == 'POST':\n action = req.get('action', '')\n method = action.replace('-', '_')\n if hasattr(self, method):\n return getattr(self, method)()\n else:\n return json.dumps({\n 'success': False,\n 'msg': 'Invalid action: ' + action\n })\n else:\n return self.index()\n\n @property\n @memoize\n def registry(self):\n return getUtility(IRegistry)\n\n def update_registry_collection(self, itype, prefix, newdata):\n rdata = self.registry.collectionOfInterface(itype, prefix=prefix)\n for key, data in newdata.items():\n if key not in rdata:\n record = rdata.add(key)\n else:\n record = rdata[key]\n updateRecordFromDict(record, data)\n # remove missing ones\n for key in set(rdata.keys()) - set(newdata.keys()):\n del rdata[key]\n\n def save_development_mode(self):\n if self.request.form.get('value', '').lower() == 'true':\n self.registry['plone.resources.development'] = True\n else:\n self.registry['plone.resources.development'] = False\n return json.dumps({\n 'success': True\n })\n\n def save_registry(self):\n req = self.request\n\n self.update_registry_collection(\n IResourceRegistry, \"plone.resources\",\n json.loads(req.get('resources')))\n self.update_registry_collection(\n IBundleRegistry, \"plone.bundles\",\n json.loads(req.get('bundles')))\n\n return json.dumps({\n 'success': True\n })\n\n def save_file(self):\n req = self.request\n resource_path = req.form.get('filepath').split('++plone++')[-1]\n overrides = OverrideFolderManager(self.context)\n overrides.save_file(resource_path, req.form['data'])\n return json.dumps({\n 'success': True\n })\n\n def delete_file(self):\n req = self.request\n resource_path = req.form.get('filepath').split('++plone++')[-1]\n overrides = OverrideFolderManager(self.context)\n overrides.delete_file(resource_path)\n\n return json.dumps({\n 'success': True\n })\n\n def get_bundles(self):\n return self.registry.collectionOfInterface(\n IBundleRegistry, prefix=\"plone.bundles\")\n\n def get_resources(self):\n return self.registry.collectionOfInterface(\n IResourceRegistry, prefix=\"plone.resources\")\n\n def less_build_config(self):\n site_url = self.context.portal_url()\n bundles = self.get_bundles()\n bundle = self.request.get('bundle', None)\n resources = self.get_resources()\n less_files = []\n if bundle and bundle in bundles:\n bundle_obj = bundles[bundle]\n for resource in bundle_obj.resources:\n if resource in resources:\n for css in resources[resource].css:\n url = urlparse(css)\n if url.netloc == '':\n # Local\n src = \"%s/%s\" % (site_url, css)\n else:\n src = \"%s\" % (css)\n\n extension = url.path.split('.')[-1]\n if extension == 'less':\n less_files.append(src)\n return json.dumps({\n 'less': less_files,\n })\n\n def js_build_config(self):\n (baseUrl, paths, shims) = self.get_requirejs_config()\n bundles = self.get_bundles()\n resources = self.get_resources()\n\n bundle = self.request.get('bundle', None)\n includes = []\n if bundle and bundle in bundles:\n bundle_obj = bundles[bundle]\n for resource_name in bundle_obj.resources:\n # need to check if this resource has a js file\n # it could just be a css resource\n try:\n resource = resources[resource_name]\n if resource.js:\n includes.append(resource_name)\n except KeyError:\n # skip if missing\n pass\n return json.dumps({\n 'include': includes,\n 'shims': shims,\n 'paths': paths\n })\n\n def save_js_build(self):\n overrides = OverrideFolderManager(self.context)\n req = self.request\n filepath = 'static/%s-compiled.js' % req.form['bundle']\n overrides.save_file(filepath, req.form['data'])\n bundle = self.get_bundles().get(req.form['bundle'])\n if bundle:\n bundle.last_compilation = datetime.now()\n return json.dumps({\n 'success': True,\n 'filepath': '++plone++' + filepath\n })\n\n def save_less_build(self):\n overrides = OverrideFolderManager(self.context)\n req = self.request\n filepath = 'static/%s-compiled.css' % req.form['bundle']\n data = '\\n'.join([req.form[k] for k in req.form.keys()\n if k.startswith('data-')])\n overrides.save_file(filepath, data)\n bundle = self.get_bundles().get(req.form['bundle'])\n if bundle:\n bundle.last_compilation = datetime.now()\n return json.dumps({\n 'success': True,\n 'filepath': '++plone++' + filepath\n })\n\n def save_less_variables(self):\n data = {}\n for key, val in json.loads(self.request.form.get('data')).items():\n # need to convert to str: unicode\n data[key.encode('utf8')] = val\n self.registry['plone.lessvariables'] = data\n return json.dumps({\n 'success': True\n })\n\n def save_pattern_options(self):\n data = {}\n for key, val in json.loads(self.request.form.get('data')).items():\n # need to convert to str: unicode\n data[key.encode('utf8')] = val\n self.registry['plone.patternoptions'] = data\n return json.dumps({\n 'success': True\n })\n\n def get_overrides(self):\n overrides = OverrideFolderManager(self.context)\n\n def _read_folder(folder):\n files = []\n for filename in folder.listDirectory():\n item = folder[filename]\n if folder.isDirectory(filename):\n files.extend(_read_folder(item))\n else:\n files.append(item)\n return files\n files = _read_folder(overrides.container)\n results = []\n site_path = self.context.getPhysicalPath()\n for fi in files:\n path = fi.getPhysicalPath()\n rel_path = path[len(site_path) + 2:]\n results.append('++plone++%s/%s' % (\n rel_path[0], '/'.join(rel_path[1:])))\n return results\n\n def config(self):\n base_url = self.context.absolute_url()\n resources = self.get_resources()\n\n try:\n less_url = self.registry['plone.resources.lessc']\n except KeyError:\n less_url = '++plone++static/components/less/dist/less-1.7.4.min.js'\n try:\n rjs_url = resources['rjs'].js\n except KeyError:\n rjs_url = '++plone++static/components/r.js/dist/r.js'\n\n data = {\n 'development': self.registry['plone.resources.development'],\n 'lessvariables': self.registry['plone.lessvariables'],\n 'resources': {},\n 'bundles': {},\n 'javascripts': {},\n 'css': {},\n 'baseUrl': base_url,\n 'manageUrl': '%s/@@resourceregistry-controlpanel' % base_url,\n 'lessUrl': '%s/%s' % (base_url, less_url),\n 'lessConfigUrl': '%s/less-variables.js' % base_url,\n 'rjsUrl': rjs_url,\n 'patternoptions': self.registry['plone.patternoptions']\n }\n bundles = self.get_bundles()\n for key, resource in resources.items():\n data['resources'][key] = recordsToDict(resource)\n for key, bundle in bundles.items():\n data['bundles'][key] = recordsToDict(bundle)\n data['overrides'] = self.get_overrides()\n return json.dumps(data, cls=JSONEncoder)\n", "path": "Products/CMFPlone/controlpanel/browser/resourceregistry.py"}], "after_files": [{"content": "from Products.ResourceRegistries.interfaces.registries import IResourceRegistry\nfrom Products.ResourceRegistries.interfaces.registries import ICSSRegistry\nfrom Products.ResourceRegistries.interfaces.registries import IKSSRegistry\nfrom Products.ResourceRegistries.interfaces.registries import IJSRegistry\nfrom Products.ResourceRegistries.interfaces.registries import ICookedFile\nfrom Products.ResourceRegistries.interfaces.registries import IResourceProvider\nfrom Products.ResourceRegistries.interfaces.viewletmanagers import IHtmlHeadScripts\nfrom Products.ResourceRegistries.interfaces.viewletmanagers import IHtmlHeadStyles\nfrom Products.ResourceRegistries.interfaces.settings import IResourceRegistriesSettings\n", "path": "Products/CMFPlone/resources/viewlets/__init__.py"}, {"content": "from datetime import datetime\nfrom Products.CMFPlone.interfaces import IBundleRegistry\nfrom Products.CMFPlone.interfaces import IResourceRegistry\nfrom Products.CMFPlone.interfaces.resources import OVERRIDE_RESOURCE_DIRECTORY_NAME # noqa\nfrom Products.CMFPlone.resources.browser.configjs import RequireJsView\nfrom StringIO import StringIO\nfrom plone.memoize.view import memoize\nfrom plone.registry.interfaces import IRegistry\nfrom plone.resource.interfaces import IResourceDirectory\nfrom urlparse import urlparse\nfrom zExceptions import NotFound\nfrom zope.component import getUtility\nimport json\n\n\nclass JSONEncoder(json.JSONEncoder):\n def default(self, obj):\n if hasattr(obj, 'isoformat'):\n return obj.isoformat()\n else:\n return json.JSONEncoder.default(self, obj)\n\n\ndef recordsToDict(record):\n data = {}\n for name in record.__schema__.names():\n data[name] = getattr(record, name)\n return data\n\n\ndef updateRecordFromDict(record, data):\n for name in record.__schema__.names():\n if name in ['last_compilation']:\n continue\n if name in data:\n # almost all string data needs to be str, not unicode\n val = data[name]\n if isinstance(val, unicode):\n val = val.encode('utf-8')\n if isinstance(val, list):\n newval = []\n for item in val:\n if isinstance(item, unicode):\n item = item.encode('utf-8')\n newval.append(item)\n val = newval\n setattr(record, name, val)\n\n\nclass OverrideFolderManager(object):\n\n def __init__(self, context):\n self.context = context\n persistent_directory = getUtility(IResourceDirectory, name=\"persistent\") # noqa\n if OVERRIDE_RESOURCE_DIRECTORY_NAME not in persistent_directory:\n persistent_directory.makeDirectory(OVERRIDE_RESOURCE_DIRECTORY_NAME) # noqa\n self.container = persistent_directory[OVERRIDE_RESOURCE_DIRECTORY_NAME]\n\n def save_file(self, filepath, data):\n resource_name, resource_filepath = filepath.split('/', 1)\n if resource_name not in self.container:\n self.container.makeDirectory(resource_name)\n folder = self.container[resource_name]\n fi = StringIO(data)\n folder.writeFile(resource_filepath, fi)\n return folder[resource_filepath]\n\n def delete_file(self, filepath):\n resource_name, resource_filepath = filepath.split('/', 1)\n\n if resource_name not in self.container:\n return\n folder = self.container[resource_name]\n try:\n fi = folder[resource_filepath]\n except NotFound:\n return\n parent = self.get_parent(fi)\n del parent[fi.getId()]\n if filepath not in self.container:\n return\n folder = self.container[resource_name]\n try:\n fi = folder[resource_filepath]\n except NotFound:\n return\n parent = self.get_parent(fi)\n del parent[fi.getId()]\n\n def get_parent(self, item):\n path = '/'.join(item.getPhysicalPath()[:-1])\n return self.context.restrictedTraverse(path)\n\n def list_dir(self, container):\n if hasattr(container, 'listDirectory'):\n return container.listDirectory()\n else:\n return container.objectIds()\n\n\nclass ResourceRegistryControlPanelView(RequireJsView):\n\n def __call__(self):\n req = self.request\n if req.REQUEST_METHOD == 'POST':\n action = req.get('action', '')\n method = action.replace('-', '_')\n if hasattr(self, method):\n return getattr(self, method)()\n else:\n return json.dumps({\n 'success': False,\n 'msg': 'Invalid action: ' + action\n })\n else:\n return self.index()\n\n @property\n @memoize\n def registry(self):\n return getUtility(IRegistry)\n\n def update_registry_collection(self, itype, prefix, newdata):\n rdata = self.registry.collectionOfInterface(itype, prefix=prefix)\n for key, data in newdata.items():\n if key not in rdata:\n record = rdata.add(key)\n else:\n record = rdata[key]\n updateRecordFromDict(record, data)\n # remove missing ones\n for key in set(rdata.keys()) - set(newdata.keys()):\n del rdata[key]\n\n def save_development_mode(self):\n if self.request.form.get('value', '').lower() == 'true':\n self.registry['plone.resources.development'] = True\n else:\n self.registry['plone.resources.development'] = False\n return json.dumps({\n 'success': True\n })\n\n def save_registry(self):\n req = self.request\n\n self.update_registry_collection(\n IResourceRegistry, \"plone.resources\",\n json.loads(req.get('resources')))\n self.update_registry_collection(\n IBundleRegistry, \"plone.bundles\",\n json.loads(req.get('bundles')))\n\n return json.dumps({\n 'success': True\n })\n\n def save_file(self):\n req = self.request\n resource_path = req.form.get('filepath').split('++plone++')[-1]\n overrides = OverrideFolderManager(self.context)\n overrides.save_file(resource_path, req.form['data'])\n return json.dumps({\n 'success': True\n })\n\n def delete_file(self):\n req = self.request\n resource_path = req.form.get('filepath').split('++plone++')[-1]\n overrides = OverrideFolderManager(self.context)\n overrides.delete_file(resource_path)\n\n return json.dumps({\n 'success': True\n })\n\n def get_bundles(self):\n return self.registry.collectionOfInterface(\n IBundleRegistry, prefix=\"plone.bundles\")\n\n def get_resources(self):\n return self.registry.collectionOfInterface(\n IResourceRegistry, prefix=\"plone.resources\")\n\n def less_build_config(self):\n site_url = self.context.portal_url()\n bundles = self.get_bundles()\n bundle = self.request.get('bundle', None)\n resources = self.get_resources()\n less_files = []\n if bundle and bundle in bundles:\n bundle_obj = bundles[bundle]\n for resource in bundle_obj.resources:\n if resource in resources:\n for css in resources[resource].css:\n url = urlparse(css)\n if url.netloc == '':\n # Local\n src = \"%s/%s\" % (site_url, css)\n else:\n src = \"%s\" % (css)\n\n extension = url.path.split('.')[-1]\n if extension == 'less':\n less_files.append(src)\n return json.dumps({\n 'less': less_files,\n })\n\n def js_build_config(self):\n (baseUrl, paths, shims) = self.get_requirejs_config()\n bundles = self.get_bundles()\n resources = self.get_resources()\n\n bundle = self.request.get('bundle', None)\n includes = []\n if bundle and bundle in bundles:\n bundle_obj = bundles[bundle]\n for resource_name in bundle_obj.resources:\n # need to check if this resource has a js file\n # it could just be a css resource\n try:\n resource = resources[resource_name]\n if resource.js:\n includes.append(resource_name)\n except KeyError:\n # skip if missing\n pass\n return json.dumps({\n 'include': includes,\n 'shims': shims,\n 'paths': paths\n })\n\n def save_js_build(self):\n overrides = OverrideFolderManager(self.context)\n req = self.request\n filepath = 'static/%s-compiled.js' % req.form['bundle']\n overrides.save_file(filepath, req.form['data'])\n bundle = self.get_bundles().get(req.form['bundle'])\n if bundle:\n bundle.last_compilation = datetime.now()\n bundle.jscompilation = '++plone++{}'.format(filepath)\n return json.dumps({\n 'success': True,\n 'filepath': '++plone++' + filepath\n })\n\n def save_less_build(self):\n overrides = OverrideFolderManager(self.context)\n req = self.request\n filepath = 'static/%s-compiled.css' % req.form['bundle']\n data = '\\n'.join([req.form[k] for k in req.form.keys()\n if k.startswith('data-')])\n overrides.save_file(filepath, data)\n bundle = self.get_bundles().get(req.form['bundle'])\n if bundle:\n bundle.last_compilation = datetime.now()\n bundle.csscompilation = '++plone++{}'.format(filepath)\n return json.dumps({\n 'success': True,\n 'filepath': '++plone++' + filepath\n })\n\n def save_less_variables(self):\n data = {}\n for key, val in json.loads(self.request.form.get('data')).items():\n # need to convert to str: unicode\n data[key.encode('utf8')] = val\n self.registry['plone.lessvariables'] = data\n return json.dumps({\n 'success': True\n })\n\n def save_pattern_options(self):\n data = {}\n for key, val in json.loads(self.request.form.get('data')).items():\n # need to convert to str: unicode\n data[key.encode('utf8')] = val\n self.registry['plone.patternoptions'] = data\n return json.dumps({\n 'success': True\n })\n\n def get_overrides(self):\n overrides = OverrideFolderManager(self.context)\n\n def _read_folder(folder):\n files = []\n for filename in folder.listDirectory():\n item = folder[filename]\n if folder.isDirectory(filename):\n files.extend(_read_folder(item))\n else:\n files.append(item)\n return files\n files = _read_folder(overrides.container)\n results = []\n site_path = self.context.getPhysicalPath()\n for fi in files:\n path = fi.getPhysicalPath()\n rel_path = path[len(site_path) + 2:]\n results.append('++plone++%s/%s' % (\n rel_path[0], '/'.join(rel_path[1:])))\n return results\n\n def config(self):\n base_url = self.context.absolute_url()\n resources = self.get_resources()\n\n try:\n less_url = self.registry['plone.resources.lessc']\n except KeyError:\n less_url = '++plone++static/components/less/dist/less-1.7.4.min.js'\n try:\n rjs_url = resources['rjs'].js\n except KeyError:\n rjs_url = '++plone++static/components/r.js/dist/r.js'\n\n data = {\n 'development': self.registry['plone.resources.development'],\n 'lessvariables': self.registry['plone.lessvariables'],\n 'resources': {},\n 'bundles': {},\n 'javascripts': {},\n 'css': {},\n 'baseUrl': base_url,\n 'manageUrl': '%s/@@resourceregistry-controlpanel' % base_url,\n 'lessUrl': '%s/%s' % (base_url, less_url),\n 'lessConfigUrl': '%s/less-variables.js' % base_url,\n 'rjsUrl': rjs_url,\n 'patternoptions': self.registry['plone.patternoptions']\n }\n bundles = self.get_bundles()\n for key, resource in resources.items():\n data['resources'][key] = recordsToDict(resource)\n for key, bundle in bundles.items():\n data['bundles'][key] = recordsToDict(bundle)\n data['overrides'] = self.get_overrides()\n return json.dumps(data, cls=JSONEncoder)\n", "path": "Products/CMFPlone/controlpanel/browser/resourceregistry.py"}]}
| 3,954 | 362 |
gh_patches_debug_39731
|
rasdani/github-patches
|
git_diff
|
OCHA-DAP__hdx-ckan-1835
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Redirect a non-new user to Newsfeed instead of My Organisations
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ckanext-hdx_users/ckanext/hdx_users/controllers/login_controller.py`
Content:
```
1 import datetime
2 import dateutil
3
4 import ckan.controllers.user as ckan_user
5 import ckan.lib.helpers as h
6 import ckan.lib.base as base
7 from ckan.common import _, c, g, request
8 import ckan.logic as logic
9 from pylons import config
10
11 get_action = logic.get_action
12
13 class LoginController(ckan_user.UserController):
14 def logged_in(self):
15 # redirect if needed
16 came_from = request.params.get('came_from', '')
17 if self._sane_came_from(came_from):
18 return h.redirect_to(str(came_from))
19
20 if c.user:
21 context = None
22 data_dict = {'id': c.user}
23
24 user_dict = get_action('user_show')(context, data_dict)
25
26 if 'created' in user_dict:
27 time_passed = datetime.datetime.now() - dateutil.parser.parse( user_dict['created'] )
28 else:
29 time_passed = None
30
31 if not user_dict['activity'] and time_passed and time_passed.days < 3:
32 #/dataset/new
33 contribute_url = h.url_for(controller='package', action='new')
34 # message = ''' Now that you've registered an account , you can <a href="%s">start adding datasets</a>.
35 # If you want to associate this dataset with an organization, either click on "My Organizations" below
36 # to create a new organization or ask the admin of an existing organization to add you as a member.''' % contribute_url
37 #h.flash_success(_(message), True)
38 else:
39 h.flash_success(_("%s is now logged in") %
40 user_dict['display_name'])
41 #return self.me()
42 # Instead redirect to My orgs page
43 return h.redirect_to(controller='user',
44 action='dashboard_organizations')
45 else:
46 err = _('Login failed. Bad username or password.')
47 if g.openid_enabled:
48 err += _(' (Or if using OpenID, it hasn\'t been associated '
49 'with a user account.)')
50 if h.asbool(config.get('ckan.legacy_templates', 'false')):
51 h.flash_error(err)
52 h.redirect_to(controller='user',
53 action='login', came_from=came_from)
54 else:
55 return self.login(error=err)
56
57 def contribute(self, error=None):
58 self.login(error)
59 vars = {'contribute':True}
60 return base.render('user/login.html', extra_vars=vars)
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/ckanext-hdx_users/ckanext/hdx_users/controllers/login_controller.py b/ckanext-hdx_users/ckanext/hdx_users/controllers/login_controller.py
--- a/ckanext-hdx_users/ckanext/hdx_users/controllers/login_controller.py
+++ b/ckanext-hdx_users/ckanext/hdx_users/controllers/login_controller.py
@@ -10,7 +10,9 @@
get_action = logic.get_action
+
class LoginController(ckan_user.UserController):
+
def logged_in(self):
# redirect if needed
came_from = request.params.get('came_from', '')
@@ -24,24 +26,22 @@
user_dict = get_action('user_show')(context, data_dict)
if 'created' in user_dict:
- time_passed = datetime.datetime.now() - dateutil.parser.parse( user_dict['created'] )
+ time_passed = datetime.datetime.now(
+ ) - dateutil.parser.parse(user_dict['created'])
else:
- time_passed = None
-
+ time_passed = None
if not user_dict['activity'] and time_passed and time_passed.days < 3:
- #/dataset/new
- contribute_url = h.url_for(controller='package', action='new')
- # message = ''' Now that you've registered an account , you can <a href="%s">start adding datasets</a>.
- # If you want to associate this dataset with an organization, either click on "My Organizations" below
+ #/dataset/new
+ contribute_url = h.url_for(controller='package', action='new')
+ # message = ''' Now that you've registered an account , you can <a href="%s">start adding datasets</a>.
+ # If you want to associate this dataset with an organization, either click on "My Organizations" below
# to create a new organization or ask the admin of an existing organization to add you as a member.''' % contribute_url
#h.flash_success(_(message), True)
+ return h.redirect_to(controller='user', action='dashboard_organizations')
else:
h.flash_success(_("%s is now logged in") %
- user_dict['display_name'])
- #return self.me()
- # Instead redirect to My orgs page
- return h.redirect_to(controller='user',
- action='dashboard_organizations')
+ user_dict['display_name'])
+ return self.me()
else:
err = _('Login failed. Bad username or password.')
if g.openid_enabled:
@@ -53,8 +53,8 @@
action='login', came_from=came_from)
else:
return self.login(error=err)
-
+
def contribute(self, error=None):
self.login(error)
- vars = {'contribute':True}
- return base.render('user/login.html', extra_vars=vars)
\ No newline at end of file
+ vars = {'contribute': True}
+ return base.render('user/login.html', extra_vars=vars)
|
{"golden_diff": "diff --git a/ckanext-hdx_users/ckanext/hdx_users/controllers/login_controller.py b/ckanext-hdx_users/ckanext/hdx_users/controllers/login_controller.py\n--- a/ckanext-hdx_users/ckanext/hdx_users/controllers/login_controller.py\n+++ b/ckanext-hdx_users/ckanext/hdx_users/controllers/login_controller.py\n@@ -10,7 +10,9 @@\n \n get_action = logic.get_action\n \n+\n class LoginController(ckan_user.UserController):\n+\n def logged_in(self):\n # redirect if needed\n came_from = request.params.get('came_from', '')\n@@ -24,24 +26,22 @@\n user_dict = get_action('user_show')(context, data_dict)\n \n if 'created' in user_dict:\n- time_passed = datetime.datetime.now() - dateutil.parser.parse( user_dict['created'] )\n+ time_passed = datetime.datetime.now(\n+ ) - dateutil.parser.parse(user_dict['created'])\n else:\n- time_passed = None \n- \n+ time_passed = None\n if not user_dict['activity'] and time_passed and time_passed.days < 3:\n- #/dataset/new \n- contribute_url = h.url_for(controller='package', action='new')\n- # message = ''' Now that you've registered an account , you can <a href=\"%s\">start adding datasets</a>. \n- # If you want to associate this dataset with an organization, either click on \"My Organizations\" below \n+ #/dataset/new\n+ contribute_url = h.url_for(controller='package', action='new')\n+ # message = ''' Now that you've registered an account , you can <a href=\"%s\">start adding datasets</a>.\n+ # If you want to associate this dataset with an organization, either click on \"My Organizations\" below\n # to create a new organization or ask the admin of an existing organization to add you as a member.''' % contribute_url\n #h.flash_success(_(message), True)\n+ return h.redirect_to(controller='user', action='dashboard_organizations')\n else:\n h.flash_success(_(\"%s is now logged in\") %\n- user_dict['display_name'])\n- #return self.me()\n- # Instead redirect to My orgs page\n- return h.redirect_to(controller='user',\n- action='dashboard_organizations')\n+ user_dict['display_name'])\n+ return self.me()\n else:\n err = _('Login failed. Bad username or password.')\n if g.openid_enabled:\n@@ -53,8 +53,8 @@\n action='login', came_from=came_from)\n else:\n return self.login(error=err)\n- \n+\n def contribute(self, error=None):\n self.login(error)\n- vars = {'contribute':True}\n- return base.render('user/login.html', extra_vars=vars)\n\\ No newline at end of file\n+ vars = {'contribute': True}\n+ return base.render('user/login.html', extra_vars=vars)\n", "issue": "Redirect a non-new user to Newsfeed instead of My Organisations\n\n", "before_files": [{"content": "import datetime\nimport dateutil\n\nimport ckan.controllers.user as ckan_user\nimport ckan.lib.helpers as h\nimport ckan.lib.base as base\nfrom ckan.common import _, c, g, request\nimport ckan.logic as logic\nfrom pylons import config\n\nget_action = logic.get_action\n\nclass LoginController(ckan_user.UserController):\n def logged_in(self):\n # redirect if needed\n came_from = request.params.get('came_from', '')\n if self._sane_came_from(came_from):\n return h.redirect_to(str(came_from))\n\n if c.user:\n context = None\n data_dict = {'id': c.user}\n\n user_dict = get_action('user_show')(context, data_dict)\n\n if 'created' in user_dict:\n time_passed = datetime.datetime.now() - dateutil.parser.parse( user_dict['created'] )\n else:\n time_passed = None \n \n if not user_dict['activity'] and time_passed and time_passed.days < 3:\n #/dataset/new \n contribute_url = h.url_for(controller='package', action='new')\n # message = ''' Now that you've registered an account , you can <a href=\"%s\">start adding datasets</a>. \n # If you want to associate this dataset with an organization, either click on \"My Organizations\" below \n # to create a new organization or ask the admin of an existing organization to add you as a member.''' % contribute_url\n #h.flash_success(_(message), True)\n else:\n h.flash_success(_(\"%s is now logged in\") %\n user_dict['display_name'])\n #return self.me()\n # Instead redirect to My orgs page\n return h.redirect_to(controller='user',\n action='dashboard_organizations')\n else:\n err = _('Login failed. Bad username or password.')\n if g.openid_enabled:\n err += _(' (Or if using OpenID, it hasn\\'t been associated '\n 'with a user account.)')\n if h.asbool(config.get('ckan.legacy_templates', 'false')):\n h.flash_error(err)\n h.redirect_to(controller='user',\n action='login', came_from=came_from)\n else:\n return self.login(error=err)\n \n def contribute(self, error=None):\n self.login(error)\n vars = {'contribute':True}\n return base.render('user/login.html', extra_vars=vars)", "path": "ckanext-hdx_users/ckanext/hdx_users/controllers/login_controller.py"}], "after_files": [{"content": "import datetime\nimport dateutil\n\nimport ckan.controllers.user as ckan_user\nimport ckan.lib.helpers as h\nimport ckan.lib.base as base\nfrom ckan.common import _, c, g, request\nimport ckan.logic as logic\nfrom pylons import config\n\nget_action = logic.get_action\n\n\nclass LoginController(ckan_user.UserController):\n\n def logged_in(self):\n # redirect if needed\n came_from = request.params.get('came_from', '')\n if self._sane_came_from(came_from):\n return h.redirect_to(str(came_from))\n\n if c.user:\n context = None\n data_dict = {'id': c.user}\n\n user_dict = get_action('user_show')(context, data_dict)\n\n if 'created' in user_dict:\n time_passed = datetime.datetime.now(\n ) - dateutil.parser.parse(user_dict['created'])\n else:\n time_passed = None\n if not user_dict['activity'] and time_passed and time_passed.days < 3:\n #/dataset/new\n contribute_url = h.url_for(controller='package', action='new')\n # message = ''' Now that you've registered an account , you can <a href=\"%s\">start adding datasets</a>.\n # If you want to associate this dataset with an organization, either click on \"My Organizations\" below\n # to create a new organization or ask the admin of an existing organization to add you as a member.''' % contribute_url\n #h.flash_success(_(message), True)\n return h.redirect_to(controller='user', action='dashboard_organizations')\n else:\n h.flash_success(_(\"%s is now logged in\") %\n user_dict['display_name'])\n return self.me()\n else:\n err = _('Login failed. Bad username or password.')\n if g.openid_enabled:\n err += _(' (Or if using OpenID, it hasn\\'t been associated '\n 'with a user account.)')\n if h.asbool(config.get('ckan.legacy_templates', 'false')):\n h.flash_error(err)\n h.redirect_to(controller='user',\n action='login', came_from=came_from)\n else:\n return self.login(error=err)\n\n def contribute(self, error=None):\n self.login(error)\n vars = {'contribute': True}\n return base.render('user/login.html', extra_vars=vars)\n", "path": "ckanext-hdx_users/ckanext/hdx_users/controllers/login_controller.py"}]}
| 921 | 671 |
gh_patches_debug_53600
|
rasdani/github-patches
|
git_diff
|
aws__aws-cli-577
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
typo in s3api list-objects documentation
The documentation for the s3api list-objects --max-items parameter says that a `NextMarker` will be provided, while the --starting-token parameter refers to this as `NextToken` which is the actual name of the returned token in JSON.
So in short I think that the `NextMarker` should really say `NextToken` to prevent any confusion.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `awscli/customizations/paginate.py`
Content:
```
1 # Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License"). You
4 # may not use this file except in compliance with the License. A copy of
5 # the License is located at
6 #
7 # http://aws.amazon.com/apache2.0/
8 #
9 # or in the "license" file accompanying this file. This file is
10 # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 # ANY KIND, either express or implied. See the License for the specific
12 # language governing permissions and limitations under the License.
13 """This module has customizations to unify paging paramters.
14
15 For any operation that can be paginated, we will:
16
17 * Remove the service specific pagination params. This can vary across
18 services and we're going to replace them with a consistent set of
19 arguments.
20 * Add a ``--starting-token`` and a ``--max-items`` argument.
21
22 """
23 import logging
24
25 from awscli.arguments import BaseCLIArgument
26 from botocore.parameters import StringParameter
27
28 logger = logging.getLogger(__name__)
29
30
31 STARTING_TOKEN_HELP = """
32 <p>A token to specify where to start paginating. This is the
33 <code>NextToken</code> from a previously truncated response.</p>
34 """
35
36 MAX_ITEMS_HELP = """
37 <p>The total number of items to return. If the total number
38 of items available is more than the value specified in
39 max-items then a <code>NextMarker</code> will
40 be provided in the output that you can use to resume pagination.
41 """
42
43
44 def unify_paging_params(argument_table, operation, **kwargs):
45 if not operation.can_paginate:
46 # We only apply these customizations to paginated responses.
47 return
48 logger.debug("Modifying paging parameters for operation: %s", operation)
49 _remove_existing_paging_arguments(argument_table, operation)
50 argument_table['starting-token'] = PageArgument('starting-token',
51 STARTING_TOKEN_HELP,
52 operation,
53 parse_type='string')
54 argument_table['max-items'] = PageArgument('max-items', MAX_ITEMS_HELP,
55 operation, parse_type='integer')
56
57
58 def _remove_existing_paging_arguments(argument_table, operation):
59 tokens = _get_input_tokens(operation)
60 for token_name in tokens:
61 cli_name = _get_cli_name(operation.params, token_name)
62 del argument_table[cli_name]
63 if 'limit_key' in operation.pagination:
64 key_name = operation.pagination['limit_key']
65 cli_name = _get_cli_name(operation.params, key_name)
66 del argument_table[cli_name]
67
68
69 def _get_input_tokens(operation):
70 config = operation.pagination
71 tokens = config['input_token']
72 if not isinstance(tokens, list):
73 return [tokens]
74 return tokens
75
76
77 def _get_cli_name(param_objects, token_name):
78 for param in param_objects:
79 if param.name == token_name:
80 return param.cli_name.lstrip('-')
81
82
83 class PageArgument(BaseCLIArgument):
84 type_map = {
85 'string': str,
86 'integer': int,
87 }
88
89 def __init__(self, name, documentation, operation, parse_type):
90 param = StringParameter(operation, name=name, type=parse_type)
91 self._name = name
92 self.argument_object = param
93 self._name = name
94 self._documentation = documentation
95 self._parse_type = parse_type
96
97 @property
98 def cli_name(self):
99 return '--' + self._name
100
101 @property
102 def cli_type_name(self):
103 return self._parse_type
104
105 @property
106 def required(self):
107 return False
108
109 @property
110 def documentation(self):
111 return self._documentation
112
113 def add_to_parser(self, parser):
114 parser.add_argument(self.cli_name, dest=self.py_name,
115 type=self.type_map[self._parse_type])
116
117 def add_to_params(self, parameters, value):
118 if value is not None:
119 parameters[self.py_name] = value
120
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/awscli/customizations/paginate.py b/awscli/customizations/paginate.py
--- a/awscli/customizations/paginate.py
+++ b/awscli/customizations/paginate.py
@@ -36,7 +36,7 @@
MAX_ITEMS_HELP = """
<p>The total number of items to return. If the total number
of items available is more than the value specified in
-max-items then a <code>NextMarker</code> will
+max-items then a <code>NextToken</code> will
be provided in the output that you can use to resume pagination.
"""
|
{"golden_diff": "diff --git a/awscli/customizations/paginate.py b/awscli/customizations/paginate.py\n--- a/awscli/customizations/paginate.py\n+++ b/awscli/customizations/paginate.py\n@@ -36,7 +36,7 @@\n MAX_ITEMS_HELP = \"\"\"\n <p>The total number of items to return. If the total number\n of items available is more than the value specified in\n-max-items then a <code>NextMarker</code> will\n+max-items then a <code>NextToken</code> will\n be provided in the output that you can use to resume pagination.\n \"\"\"\n", "issue": "typo in s3api list-objects documentation\nThe documentation for the s3api list-objects --max-items parameter says that a `NextMarker` will be provided, while the --starting-token parameter refers to this as `NextToken` which is the actual name of the returned token in JSON.\n\nSo in short I think that the `NextMarker` should really say `NextToken` to prevent any confusion.\n\n", "before_files": [{"content": "# Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\n\"\"\"This module has customizations to unify paging paramters.\n\nFor any operation that can be paginated, we will:\n\n * Remove the service specific pagination params. This can vary across\n services and we're going to replace them with a consistent set of\n arguments.\n * Add a ``--starting-token`` and a ``--max-items`` argument.\n\n\"\"\"\nimport logging\n\nfrom awscli.arguments import BaseCLIArgument\nfrom botocore.parameters import StringParameter\n\nlogger = logging.getLogger(__name__)\n\n\nSTARTING_TOKEN_HELP = \"\"\"\n<p>A token to specify where to start paginating. This is the\n<code>NextToken</code> from a previously truncated response.</p>\n\"\"\"\n\nMAX_ITEMS_HELP = \"\"\"\n<p>The total number of items to return. If the total number\nof items available is more than the value specified in\nmax-items then a <code>NextMarker</code> will\nbe provided in the output that you can use to resume pagination.\n\"\"\"\n\n\ndef unify_paging_params(argument_table, operation, **kwargs):\n if not operation.can_paginate:\n # We only apply these customizations to paginated responses.\n return\n logger.debug(\"Modifying paging parameters for operation: %s\", operation)\n _remove_existing_paging_arguments(argument_table, operation)\n argument_table['starting-token'] = PageArgument('starting-token',\n STARTING_TOKEN_HELP,\n operation,\n parse_type='string')\n argument_table['max-items'] = PageArgument('max-items', MAX_ITEMS_HELP,\n operation, parse_type='integer')\n\n\ndef _remove_existing_paging_arguments(argument_table, operation):\n tokens = _get_input_tokens(operation)\n for token_name in tokens:\n cli_name = _get_cli_name(operation.params, token_name)\n del argument_table[cli_name]\n if 'limit_key' in operation.pagination:\n key_name = operation.pagination['limit_key']\n cli_name = _get_cli_name(operation.params, key_name)\n del argument_table[cli_name]\n\n\ndef _get_input_tokens(operation):\n config = operation.pagination\n tokens = config['input_token']\n if not isinstance(tokens, list):\n return [tokens]\n return tokens\n\n\ndef _get_cli_name(param_objects, token_name):\n for param in param_objects:\n if param.name == token_name:\n return param.cli_name.lstrip('-')\n\n\nclass PageArgument(BaseCLIArgument):\n type_map = {\n 'string': str,\n 'integer': int,\n }\n\n def __init__(self, name, documentation, operation, parse_type):\n param = StringParameter(operation, name=name, type=parse_type)\n self._name = name\n self.argument_object = param\n self._name = name\n self._documentation = documentation\n self._parse_type = parse_type\n\n @property\n def cli_name(self):\n return '--' + self._name\n\n @property\n def cli_type_name(self):\n return self._parse_type\n\n @property\n def required(self):\n return False\n\n @property\n def documentation(self):\n return self._documentation\n\n def add_to_parser(self, parser):\n parser.add_argument(self.cli_name, dest=self.py_name,\n type=self.type_map[self._parse_type])\n\n def add_to_params(self, parameters, value):\n if value is not None:\n parameters[self.py_name] = value\n", "path": "awscli/customizations/paginate.py"}], "after_files": [{"content": "# Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\n\"\"\"This module has customizations to unify paging paramters.\n\nFor any operation that can be paginated, we will:\n\n * Remove the service specific pagination params. This can vary across\n services and we're going to replace them with a consistent set of\n arguments.\n * Add a ``--starting-token`` and a ``--max-items`` argument.\n\n\"\"\"\nimport logging\n\nfrom awscli.arguments import BaseCLIArgument\nfrom botocore.parameters import StringParameter\n\nlogger = logging.getLogger(__name__)\n\n\nSTARTING_TOKEN_HELP = \"\"\"\n<p>A token to specify where to start paginating. This is the\n<code>NextToken</code> from a previously truncated response.</p>\n\"\"\"\n\nMAX_ITEMS_HELP = \"\"\"\n<p>The total number of items to return. If the total number\nof items available is more than the value specified in\nmax-items then a <code>NextToken</code> will\nbe provided in the output that you can use to resume pagination.\n\"\"\"\n\n\ndef unify_paging_params(argument_table, operation, **kwargs):\n if not operation.can_paginate:\n # We only apply these customizations to paginated responses.\n return\n logger.debug(\"Modifying paging parameters for operation: %s\", operation)\n _remove_existing_paging_arguments(argument_table, operation)\n argument_table['starting-token'] = PageArgument('starting-token',\n STARTING_TOKEN_HELP,\n operation,\n parse_type='string')\n argument_table['max-items'] = PageArgument('max-items', MAX_ITEMS_HELP,\n operation, parse_type='integer')\n\n\ndef _remove_existing_paging_arguments(argument_table, operation):\n tokens = _get_input_tokens(operation)\n for token_name in tokens:\n cli_name = _get_cli_name(operation.params, token_name)\n del argument_table[cli_name]\n if 'limit_key' in operation.pagination:\n key_name = operation.pagination['limit_key']\n cli_name = _get_cli_name(operation.params, key_name)\n del argument_table[cli_name]\n\n\ndef _get_input_tokens(operation):\n config = operation.pagination\n tokens = config['input_token']\n if not isinstance(tokens, list):\n return [tokens]\n return tokens\n\n\ndef _get_cli_name(param_objects, token_name):\n for param in param_objects:\n if param.name == token_name:\n return param.cli_name.lstrip('-')\n\n\nclass PageArgument(BaseCLIArgument):\n type_map = {\n 'string': str,\n 'integer': int,\n }\n\n def __init__(self, name, documentation, operation, parse_type):\n param = StringParameter(operation, name=name, type=parse_type)\n self._name = name\n self.argument_object = param\n self._name = name\n self._documentation = documentation\n self._parse_type = parse_type\n\n @property\n def cli_name(self):\n return '--' + self._name\n\n @property\n def cli_type_name(self):\n return self._parse_type\n\n @property\n def required(self):\n return False\n\n @property\n def documentation(self):\n return self._documentation\n\n def add_to_parser(self, parser):\n parser.add_argument(self.cli_name, dest=self.py_name,\n type=self.type_map[self._parse_type])\n\n def add_to_params(self, parameters, value):\n if value is not None:\n parameters[self.py_name] = value\n", "path": "awscli/customizations/paginate.py"}]}
| 1,467 | 131 |
gh_patches_debug_12275
|
rasdani/github-patches
|
git_diff
|
conan-io__conan-5052
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
MSBuild should allow "do not adjust PlatformToolset" with an argument
Hey,
In https://github.com/conan-io/conan/pull/4250 the default behavior of VisualStudio's toolset selection was changed to compile the whole solution using the same toolset (the `/p:PlatformToolset` is always set - see discussion here https://github.com/conan-io/conan/pull/4250#pullrequestreview-193994771).
This change prevents using two different toolsets in the same solution. In my case building a driver using the `WindowsKernelModeDriver10.0` (which is not even a valid option right now) toolset and user mode library using the `v141` toolset.
I will be happy to write the code if this behavior change is accepted.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `conans/client/build/msbuild.py`
Content:
```
1 import copy
2 import os
3 import re
4 import subprocess
5
6 from conans.client import tools
7 from conans.client.build.visual_environment import (VisualStudioBuildEnvironment,
8 vs_build_type_flags, vs_std_cpp)
9 from conans.client.tools.oss import cpu_count
10 from conans.client.tools.win import vcvars_command
11 from conans.errors import ConanException
12 from conans.model.conan_file import ConanFile
13 from conans.model.version import Version
14 from conans.tools import vcvars_command as tools_vcvars_command
15 from conans.util.env_reader import get_env
16 from conans.util.files import decode_text, save
17
18
19 class MSBuild(object):
20
21 def __init__(self, conanfile):
22 if isinstance(conanfile, ConanFile):
23 self._conanfile = conanfile
24 self._settings = self._conanfile.settings
25 self._output = self._conanfile.output
26 self.build_env = VisualStudioBuildEnvironment(self._conanfile,
27 with_build_type_flags=False)
28 else: # backwards compatible with build_sln_command
29 self._settings = conanfile
30 self.build_env = None
31
32 def build(self, project_file, targets=None, upgrade_project=True, build_type=None, arch=None,
33 parallel=True, force_vcvars=False, toolset=None, platforms=None, use_env=True,
34 vcvars_ver=None, winsdk_version=None, properties=None, output_binary_log=None,
35 property_file_name=None, verbosity=None, definitions=None):
36 """
37 :param project_file: Path to the .sln file.
38 :param targets: List of targets to build.
39 :param upgrade_project: Will call devenv to upgrade the solution to your current Visual Studio.
40 :param build_type: Use a custom build type name instead of the default settings.build_type one.
41 :param arch: Use a custom architecture name instead of the settings.arch one.
42 It will be used to build the /p:Configuration= parameter of MSBuild.
43 It can be used as the key of the platforms parameter. E.g. arch="x86", platforms={"x86": "i386"}
44 :param parallel: Will use the configured number of cores in the conan.conf file or tools.cpu_count():
45 In the solution: Building the solution with the projects in parallel. (/m: parameter).
46 CL compiler: Building the sources in parallel. (/MP: compiler flag)
47 :param force_vcvars: Will ignore if the environment is already set for a different Visual Studio version.
48 :param toolset: Specify a toolset. Will append a /p:PlatformToolset option.
49 :param platforms: Dictionary with the mapping of archs/platforms from Conan naming to another one.
50 It is useful for Visual Studio solutions that have a different naming in architectures.
51 Example: platforms={"x86":"Win32"} (Visual solution uses "Win32" instead of "x86").
52 This dictionary will update the default one:
53 msvc_arch = {'x86': 'x86', 'x86_64': 'x64', 'armv7': 'ARM', 'armv8': 'ARM64'}
54 :param use_env: Applies the argument /p:UseEnv=true to the MSBuild call.
55 :param vcvars_ver: Specifies the Visual Studio compiler toolset to use.
56 :param winsdk_version: Specifies the version of the Windows SDK to use.
57 :param properties: Dictionary with new properties, for each element in the dictionary {name: value}
58 it will append a /p:name="value" option.
59 :param output_binary_log: If set to True then MSBuild will output a binary log file called msbuild.binlog in
60 the working directory. It can also be used to set the name of log file like this
61 output_binary_log="my_log.binlog".
62 This parameter is only supported starting from MSBuild version 15.3 and onwards.
63 :param property_file_name: When None it will generate a file named conan_build.props.
64 You can specify a different name for the generated properties file.
65 :param verbosity: Specifies verbosity level (/verbosity: parameter)
66 :param definitions: Dictionary with additional compiler definitions to be applied during the build.
67 Use value of None to set compiler definition with no value.
68 :return: status code of the MSBuild command invocation
69 """
70
71 property_file_name = property_file_name or "conan_build.props"
72 self.build_env.parallel = parallel
73
74 with tools.environment_append(self.build_env.vars):
75 # Path for custom properties file
76 props_file_contents = self._get_props_file_contents(definitions)
77 property_file_name = os.path.abspath(property_file_name)
78 save(property_file_name, props_file_contents)
79 vcvars = vcvars_command(self._conanfile.settings, force=force_vcvars,
80 vcvars_ver=vcvars_ver, winsdk_version=winsdk_version,
81 output=self._output)
82 command = self.get_command(project_file, property_file_name,
83 targets=targets, upgrade_project=upgrade_project,
84 build_type=build_type, arch=arch, parallel=parallel,
85 toolset=toolset, platforms=platforms,
86 use_env=use_env, properties=properties,
87 output_binary_log=output_binary_log,
88 verbosity=verbosity)
89 command = "%s && %s" % (vcvars, command)
90 return self._conanfile.run(command)
91
92 def get_command(self, project_file, props_file_path=None, targets=None, upgrade_project=True,
93 build_type=None, arch=None, parallel=True, toolset=None, platforms=None,
94 use_env=False, properties=None, output_binary_log=None, verbosity=None):
95
96 targets = targets or []
97 properties = properties or {}
98 command = []
99
100 if upgrade_project and not get_env("CONAN_SKIP_VS_PROJECTS_UPGRADE", False):
101 command.append('devenv "%s" /upgrade &&' % project_file)
102 else:
103 self._output.info("Skipped sln project upgrade")
104
105 build_type = build_type or self._settings.get_safe("build_type")
106 arch = arch or self._settings.get_safe("arch")
107 toolset = toolset or tools.msvs_toolset(self._settings)
108 verbosity = os.getenv("CONAN_MSBUILD_VERBOSITY") or verbosity or "minimal"
109 if not build_type:
110 raise ConanException("Cannot build_sln_command, build_type not defined")
111 if not arch:
112 raise ConanException("Cannot build_sln_command, arch not defined")
113
114 command.append('msbuild "%s" /p:Configuration="%s"' % (project_file, build_type))
115 msvc_arch = {'x86': 'x86',
116 'x86_64': 'x64',
117 'armv7': 'ARM',
118 'armv8': 'ARM64'}
119 if platforms:
120 msvc_arch.update(platforms)
121 msvc_arch = msvc_arch.get(str(arch))
122 if self._settings.get_safe("os") == "WindowsCE":
123 msvc_arch = self._settings.get_safe("os.platform")
124 try:
125 sln = tools.load(project_file)
126 pattern = re.compile(r"GlobalSection\(SolutionConfigurationPlatforms\)"
127 r"(.*?)EndGlobalSection", re.DOTALL)
128 solution_global = pattern.search(sln).group(1)
129 lines = solution_global.splitlines()
130 lines = [s.split("=")[0].strip() for s in lines]
131 except Exception:
132 pass # TODO: !!! what are we catching here? tools.load? .group(1)? .splitlines?
133 else:
134 config = "%s|%s" % (build_type, msvc_arch)
135 if config not in "".join(lines):
136 self._output.warn("***** The configuration %s does not exist in this solution *****"
137 % config)
138 self._output.warn("Use 'platforms' argument to define your architectures")
139
140 if output_binary_log:
141 msbuild_version = MSBuild.get_version(self._settings)
142 if msbuild_version >= "15.3": # http://msbuildlog.com/
143 command.append('/bl' if isinstance(output_binary_log, bool)
144 else '/bl:"%s"' % output_binary_log)
145 else:
146 raise ConanException("MSBuild version detected (%s) does not support "
147 "'output_binary_log' ('/bl')" % msbuild_version)
148
149 if use_env:
150 command.append('/p:UseEnv=true')
151
152 if msvc_arch:
153 command.append('/p:Platform="%s"' % msvc_arch)
154
155 if parallel:
156 command.append('/m:%s' % cpu_count(output=self._output))
157
158 if targets:
159 command.append("/target:%s" % ";".join(targets))
160
161 if toolset:
162 command.append('/p:PlatformToolset="%s"' % toolset)
163
164 if verbosity:
165 command.append('/verbosity:%s' % verbosity)
166
167 if props_file_path:
168 command.append('/p:ForceImportBeforeCppTargets="%s"'
169 % os.path.abspath(props_file_path))
170
171 for name, value in properties.items():
172 command.append('/p:%s="%s"' % (name, value))
173
174 return " ".join(command)
175
176 def _get_props_file_contents(self, definitions=None):
177
178 def format_macro(name, value):
179 return "%s=%s" % (name, value) if value else name
180
181 # how to specify runtime in command line:
182 # https://stackoverflow.com/questions/38840332/msbuild-overrides-properties-while-building-vc-project
183 runtime_library = {"MT": "MultiThreaded",
184 "MTd": "MultiThreadedDebug",
185 "MD": "MultiThreadedDLL",
186 "MDd": "MultiThreadedDebugDLL"}.get(
187 self._settings.get_safe("compiler.runtime"), "")
188
189 if self.build_env:
190 # Take the flags from the build env, the user was able to alter them if needed
191 flags = copy.copy(self.build_env.flags)
192 flags.append(self.build_env.std)
193 else: # To be removed when build_sln_command is deprecated
194 flags = vs_build_type_flags(self._settings, with_flags=False)
195 flags.append(vs_std_cpp(self._settings))
196
197 if definitions:
198 definitions = ";".join([format_macro(name, definitions[name]) for name in definitions])
199
200 flags_str = " ".join(list(filter(None, flags))) # Removes empty and None elements
201 additional_node = "<AdditionalOptions>" \
202 "{} %(AdditionalOptions)" \
203 "</AdditionalOptions>".format(flags_str) if flags_str else ""
204 runtime_node = "<RuntimeLibrary>" \
205 "{}" \
206 "</RuntimeLibrary>".format(runtime_library) if runtime_library else ""
207 definitions_node = "<PreprocessorDefinitions>" \
208 "{};%(PreprocessorDefinitions)" \
209 "</PreprocessorDefinitions>".format(definitions) if definitions else ""
210 template = """<?xml version="1.0" encoding="utf-8"?>
211 <Project xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
212 <ItemDefinitionGroup>
213 <ClCompile>
214 {runtime_node}
215 {additional_node}
216 {definitions_node}
217 </ClCompile>
218 </ItemDefinitionGroup>
219 </Project>""".format(**{"runtime_node": runtime_node,
220 "additional_node": additional_node,
221 "definitions_node": definitions_node})
222 return template
223
224 @staticmethod
225 def get_version(settings):
226 msbuild_cmd = "msbuild -version"
227 vcvars = tools_vcvars_command(settings)
228 command = "%s && %s" % (vcvars, msbuild_cmd)
229 try:
230 out, _ = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True).communicate()
231 version_line = decode_text(out).split("\n")[-1]
232 prog = re.compile("(\d+\.){2,3}\d+")
233 result = prog.match(version_line).group()
234 return Version(result)
235 except Exception as e:
236 raise ConanException("Error retrieving MSBuild version: '{}'".format(e))
237
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/conans/client/build/msbuild.py b/conans/client/build/msbuild.py
--- a/conans/client/build/msbuild.py
+++ b/conans/client/build/msbuild.py
@@ -104,7 +104,8 @@
build_type = build_type or self._settings.get_safe("build_type")
arch = arch or self._settings.get_safe("arch")
- toolset = toolset or tools.msvs_toolset(self._settings)
+ if toolset is None: # False value to skip adjusting
+ toolset = tools.msvs_toolset(self._settings)
verbosity = os.getenv("CONAN_MSBUILD_VERBOSITY") or verbosity or "minimal"
if not build_type:
raise ConanException("Cannot build_sln_command, build_type not defined")
|
{"golden_diff": "diff --git a/conans/client/build/msbuild.py b/conans/client/build/msbuild.py\n--- a/conans/client/build/msbuild.py\n+++ b/conans/client/build/msbuild.py\n@@ -104,7 +104,8 @@\n \n build_type = build_type or self._settings.get_safe(\"build_type\")\n arch = arch or self._settings.get_safe(\"arch\")\n- toolset = toolset or tools.msvs_toolset(self._settings)\n+ if toolset is None: # False value to skip adjusting\n+ toolset = tools.msvs_toolset(self._settings)\n verbosity = os.getenv(\"CONAN_MSBUILD_VERBOSITY\") or verbosity or \"minimal\"\n if not build_type:\n raise ConanException(\"Cannot build_sln_command, build_type not defined\")\n", "issue": "MSBuild should allow \"do not adjust PlatformToolset\" with an argument\nHey,\r\n\r\nIn https://github.com/conan-io/conan/pull/4250 the default behavior of VisualStudio's toolset selection was changed to compile the whole solution using the same toolset (the `/p:PlatformToolset` is always set - see discussion here https://github.com/conan-io/conan/pull/4250#pullrequestreview-193994771).\r\n\r\nThis change prevents using two different toolsets in the same solution. In my case building a driver using the `WindowsKernelModeDriver10.0` (which is not even a valid option right now) toolset and user mode library using the `v141` toolset.\r\n\r\nI will be happy to write the code if this behavior change is accepted.\n", "before_files": [{"content": "import copy\nimport os\nimport re\nimport subprocess\n\nfrom conans.client import tools\nfrom conans.client.build.visual_environment import (VisualStudioBuildEnvironment,\n vs_build_type_flags, vs_std_cpp)\nfrom conans.client.tools.oss import cpu_count\nfrom conans.client.tools.win import vcvars_command\nfrom conans.errors import ConanException\nfrom conans.model.conan_file import ConanFile\nfrom conans.model.version import Version\nfrom conans.tools import vcvars_command as tools_vcvars_command\nfrom conans.util.env_reader import get_env\nfrom conans.util.files import decode_text, save\n\n\nclass MSBuild(object):\n\n def __init__(self, conanfile):\n if isinstance(conanfile, ConanFile):\n self._conanfile = conanfile\n self._settings = self._conanfile.settings\n self._output = self._conanfile.output\n self.build_env = VisualStudioBuildEnvironment(self._conanfile,\n with_build_type_flags=False)\n else: # backwards compatible with build_sln_command\n self._settings = conanfile\n self.build_env = None\n\n def build(self, project_file, targets=None, upgrade_project=True, build_type=None, arch=None,\n parallel=True, force_vcvars=False, toolset=None, platforms=None, use_env=True,\n vcvars_ver=None, winsdk_version=None, properties=None, output_binary_log=None,\n property_file_name=None, verbosity=None, definitions=None):\n \"\"\"\n :param project_file: Path to the .sln file.\n :param targets: List of targets to build.\n :param upgrade_project: Will call devenv to upgrade the solution to your current Visual Studio.\n :param build_type: Use a custom build type name instead of the default settings.build_type one.\n :param arch: Use a custom architecture name instead of the settings.arch one.\n It will be used to build the /p:Configuration= parameter of MSBuild.\n It can be used as the key of the platforms parameter. E.g. arch=\"x86\", platforms={\"x86\": \"i386\"}\n :param parallel: Will use the configured number of cores in the conan.conf file or tools.cpu_count():\n In the solution: Building the solution with the projects in parallel. (/m: parameter).\n CL compiler: Building the sources in parallel. (/MP: compiler flag)\n :param force_vcvars: Will ignore if the environment is already set for a different Visual Studio version.\n :param toolset: Specify a toolset. Will append a /p:PlatformToolset option.\n :param platforms: Dictionary with the mapping of archs/platforms from Conan naming to another one.\n It is useful for Visual Studio solutions that have a different naming in architectures.\n Example: platforms={\"x86\":\"Win32\"} (Visual solution uses \"Win32\" instead of \"x86\").\n This dictionary will update the default one:\n msvc_arch = {'x86': 'x86', 'x86_64': 'x64', 'armv7': 'ARM', 'armv8': 'ARM64'}\n :param use_env: Applies the argument /p:UseEnv=true to the MSBuild call.\n :param vcvars_ver: Specifies the Visual Studio compiler toolset to use.\n :param winsdk_version: Specifies the version of the Windows SDK to use.\n :param properties: Dictionary with new properties, for each element in the dictionary {name: value}\n it will append a /p:name=\"value\" option.\n :param output_binary_log: If set to True then MSBuild will output a binary log file called msbuild.binlog in\n the working directory. It can also be used to set the name of log file like this\n output_binary_log=\"my_log.binlog\".\n This parameter is only supported starting from MSBuild version 15.3 and onwards.\n :param property_file_name: When None it will generate a file named conan_build.props.\n You can specify a different name for the generated properties file.\n :param verbosity: Specifies verbosity level (/verbosity: parameter)\n :param definitions: Dictionary with additional compiler definitions to be applied during the build.\n Use value of None to set compiler definition with no value.\n :return: status code of the MSBuild command invocation\n \"\"\"\n\n property_file_name = property_file_name or \"conan_build.props\"\n self.build_env.parallel = parallel\n\n with tools.environment_append(self.build_env.vars):\n # Path for custom properties file\n props_file_contents = self._get_props_file_contents(definitions)\n property_file_name = os.path.abspath(property_file_name)\n save(property_file_name, props_file_contents)\n vcvars = vcvars_command(self._conanfile.settings, force=force_vcvars,\n vcvars_ver=vcvars_ver, winsdk_version=winsdk_version,\n output=self._output)\n command = self.get_command(project_file, property_file_name,\n targets=targets, upgrade_project=upgrade_project,\n build_type=build_type, arch=arch, parallel=parallel,\n toolset=toolset, platforms=platforms,\n use_env=use_env, properties=properties,\n output_binary_log=output_binary_log,\n verbosity=verbosity)\n command = \"%s && %s\" % (vcvars, command)\n return self._conanfile.run(command)\n\n def get_command(self, project_file, props_file_path=None, targets=None, upgrade_project=True,\n build_type=None, arch=None, parallel=True, toolset=None, platforms=None,\n use_env=False, properties=None, output_binary_log=None, verbosity=None):\n\n targets = targets or []\n properties = properties or {}\n command = []\n\n if upgrade_project and not get_env(\"CONAN_SKIP_VS_PROJECTS_UPGRADE\", False):\n command.append('devenv \"%s\" /upgrade &&' % project_file)\n else:\n self._output.info(\"Skipped sln project upgrade\")\n\n build_type = build_type or self._settings.get_safe(\"build_type\")\n arch = arch or self._settings.get_safe(\"arch\")\n toolset = toolset or tools.msvs_toolset(self._settings)\n verbosity = os.getenv(\"CONAN_MSBUILD_VERBOSITY\") or verbosity or \"minimal\"\n if not build_type:\n raise ConanException(\"Cannot build_sln_command, build_type not defined\")\n if not arch:\n raise ConanException(\"Cannot build_sln_command, arch not defined\")\n\n command.append('msbuild \"%s\" /p:Configuration=\"%s\"' % (project_file, build_type))\n msvc_arch = {'x86': 'x86',\n 'x86_64': 'x64',\n 'armv7': 'ARM',\n 'armv8': 'ARM64'}\n if platforms:\n msvc_arch.update(platforms)\n msvc_arch = msvc_arch.get(str(arch))\n if self._settings.get_safe(\"os\") == \"WindowsCE\":\n msvc_arch = self._settings.get_safe(\"os.platform\")\n try:\n sln = tools.load(project_file)\n pattern = re.compile(r\"GlobalSection\\(SolutionConfigurationPlatforms\\)\"\n r\"(.*?)EndGlobalSection\", re.DOTALL)\n solution_global = pattern.search(sln).group(1)\n lines = solution_global.splitlines()\n lines = [s.split(\"=\")[0].strip() for s in lines]\n except Exception:\n pass # TODO: !!! what are we catching here? tools.load? .group(1)? .splitlines?\n else:\n config = \"%s|%s\" % (build_type, msvc_arch)\n if config not in \"\".join(lines):\n self._output.warn(\"***** The configuration %s does not exist in this solution *****\"\n % config)\n self._output.warn(\"Use 'platforms' argument to define your architectures\")\n\n if output_binary_log:\n msbuild_version = MSBuild.get_version(self._settings)\n if msbuild_version >= \"15.3\": # http://msbuildlog.com/\n command.append('/bl' if isinstance(output_binary_log, bool)\n else '/bl:\"%s\"' % output_binary_log)\n else:\n raise ConanException(\"MSBuild version detected (%s) does not support \"\n \"'output_binary_log' ('/bl')\" % msbuild_version)\n\n if use_env:\n command.append('/p:UseEnv=true')\n\n if msvc_arch:\n command.append('/p:Platform=\"%s\"' % msvc_arch)\n\n if parallel:\n command.append('/m:%s' % cpu_count(output=self._output))\n\n if targets:\n command.append(\"/target:%s\" % \";\".join(targets))\n\n if toolset:\n command.append('/p:PlatformToolset=\"%s\"' % toolset)\n\n if verbosity:\n command.append('/verbosity:%s' % verbosity)\n\n if props_file_path:\n command.append('/p:ForceImportBeforeCppTargets=\"%s\"'\n % os.path.abspath(props_file_path))\n\n for name, value in properties.items():\n command.append('/p:%s=\"%s\"' % (name, value))\n\n return \" \".join(command)\n\n def _get_props_file_contents(self, definitions=None):\n\n def format_macro(name, value):\n return \"%s=%s\" % (name, value) if value else name\n\n # how to specify runtime in command line:\n # https://stackoverflow.com/questions/38840332/msbuild-overrides-properties-while-building-vc-project\n runtime_library = {\"MT\": \"MultiThreaded\",\n \"MTd\": \"MultiThreadedDebug\",\n \"MD\": \"MultiThreadedDLL\",\n \"MDd\": \"MultiThreadedDebugDLL\"}.get(\n self._settings.get_safe(\"compiler.runtime\"), \"\")\n\n if self.build_env:\n # Take the flags from the build env, the user was able to alter them if needed\n flags = copy.copy(self.build_env.flags)\n flags.append(self.build_env.std)\n else: # To be removed when build_sln_command is deprecated\n flags = vs_build_type_flags(self._settings, with_flags=False)\n flags.append(vs_std_cpp(self._settings))\n\n if definitions:\n definitions = \";\".join([format_macro(name, definitions[name]) for name in definitions])\n\n flags_str = \" \".join(list(filter(None, flags))) # Removes empty and None elements\n additional_node = \"<AdditionalOptions>\" \\\n \"{} %(AdditionalOptions)\" \\\n \"</AdditionalOptions>\".format(flags_str) if flags_str else \"\"\n runtime_node = \"<RuntimeLibrary>\" \\\n \"{}\" \\\n \"</RuntimeLibrary>\".format(runtime_library) if runtime_library else \"\"\n definitions_node = \"<PreprocessorDefinitions>\" \\\n \"{};%(PreprocessorDefinitions)\" \\\n \"</PreprocessorDefinitions>\".format(definitions) if definitions else \"\"\n template = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<Project xmlns=\"http://schemas.microsoft.com/developer/msbuild/2003\">\n <ItemDefinitionGroup>\n <ClCompile>\n {runtime_node}\n {additional_node}\n {definitions_node}\n </ClCompile>\n </ItemDefinitionGroup>\n</Project>\"\"\".format(**{\"runtime_node\": runtime_node,\n \"additional_node\": additional_node,\n \"definitions_node\": definitions_node})\n return template\n\n @staticmethod\n def get_version(settings):\n msbuild_cmd = \"msbuild -version\"\n vcvars = tools_vcvars_command(settings)\n command = \"%s && %s\" % (vcvars, msbuild_cmd)\n try:\n out, _ = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True).communicate()\n version_line = decode_text(out).split(\"\\n\")[-1]\n prog = re.compile(\"(\\d+\\.){2,3}\\d+\")\n result = prog.match(version_line).group()\n return Version(result)\n except Exception as e:\n raise ConanException(\"Error retrieving MSBuild version: '{}'\".format(e))\n", "path": "conans/client/build/msbuild.py"}], "after_files": [{"content": "import copy\nimport os\nimport re\nimport subprocess\n\nfrom conans.client import tools\nfrom conans.client.build.visual_environment import (VisualStudioBuildEnvironment,\n vs_build_type_flags, vs_std_cpp)\nfrom conans.client.tools.oss import cpu_count\nfrom conans.client.tools.win import vcvars_command\nfrom conans.errors import ConanException\nfrom conans.model.conan_file import ConanFile\nfrom conans.model.version import Version\nfrom conans.tools import vcvars_command as tools_vcvars_command\nfrom conans.util.env_reader import get_env\nfrom conans.util.files import decode_text, save\n\n\nclass MSBuild(object):\n\n def __init__(self, conanfile):\n if isinstance(conanfile, ConanFile):\n self._conanfile = conanfile\n self._settings = self._conanfile.settings\n self._output = self._conanfile.output\n self.build_env = VisualStudioBuildEnvironment(self._conanfile,\n with_build_type_flags=False)\n else: # backwards compatible with build_sln_command\n self._settings = conanfile\n self.build_env = None\n\n def build(self, project_file, targets=None, upgrade_project=True, build_type=None, arch=None,\n parallel=True, force_vcvars=False, toolset=None, platforms=None, use_env=True,\n vcvars_ver=None, winsdk_version=None, properties=None, output_binary_log=None,\n property_file_name=None, verbosity=None, definitions=None):\n \"\"\"\n :param project_file: Path to the .sln file.\n :param targets: List of targets to build.\n :param upgrade_project: Will call devenv to upgrade the solution to your current Visual Studio.\n :param build_type: Use a custom build type name instead of the default settings.build_type one.\n :param arch: Use a custom architecture name instead of the settings.arch one.\n It will be used to build the /p:Configuration= parameter of MSBuild.\n It can be used as the key of the platforms parameter. E.g. arch=\"x86\", platforms={\"x86\": \"i386\"}\n :param parallel: Will use the configured number of cores in the conan.conf file or tools.cpu_count():\n In the solution: Building the solution with the projects in parallel. (/m: parameter).\n CL compiler: Building the sources in parallel. (/MP: compiler flag)\n :param force_vcvars: Will ignore if the environment is already set for a different Visual Studio version.\n :param toolset: Specify a toolset. Will append a /p:PlatformToolset option.\n :param platforms: Dictionary with the mapping of archs/platforms from Conan naming to another one.\n It is useful for Visual Studio solutions that have a different naming in architectures.\n Example: platforms={\"x86\":\"Win32\"} (Visual solution uses \"Win32\" instead of \"x86\").\n This dictionary will update the default one:\n msvc_arch = {'x86': 'x86', 'x86_64': 'x64', 'armv7': 'ARM', 'armv8': 'ARM64'}\n :param use_env: Applies the argument /p:UseEnv=true to the MSBuild call.\n :param vcvars_ver: Specifies the Visual Studio compiler toolset to use.\n :param winsdk_version: Specifies the version of the Windows SDK to use.\n :param properties: Dictionary with new properties, for each element in the dictionary {name: value}\n it will append a /p:name=\"value\" option.\n :param output_binary_log: If set to True then MSBuild will output a binary log file called msbuild.binlog in\n the working directory. It can also be used to set the name of log file like this\n output_binary_log=\"my_log.binlog\".\n This parameter is only supported starting from MSBuild version 15.3 and onwards.\n :param property_file_name: When None it will generate a file named conan_build.props.\n You can specify a different name for the generated properties file.\n :param verbosity: Specifies verbosity level (/verbosity: parameter)\n :param definitions: Dictionary with additional compiler definitions to be applied during the build.\n Use value of None to set compiler definition with no value.\n :return: status code of the MSBuild command invocation\n \"\"\"\n\n property_file_name = property_file_name or \"conan_build.props\"\n self.build_env.parallel = parallel\n\n with tools.environment_append(self.build_env.vars):\n # Path for custom properties file\n props_file_contents = self._get_props_file_contents(definitions)\n property_file_name = os.path.abspath(property_file_name)\n save(property_file_name, props_file_contents)\n vcvars = vcvars_command(self._conanfile.settings, force=force_vcvars,\n vcvars_ver=vcvars_ver, winsdk_version=winsdk_version,\n output=self._output)\n command = self.get_command(project_file, property_file_name,\n targets=targets, upgrade_project=upgrade_project,\n build_type=build_type, arch=arch, parallel=parallel,\n toolset=toolset, platforms=platforms,\n use_env=use_env, properties=properties,\n output_binary_log=output_binary_log,\n verbosity=verbosity)\n command = \"%s && %s\" % (vcvars, command)\n return self._conanfile.run(command)\n\n def get_command(self, project_file, props_file_path=None, targets=None, upgrade_project=True,\n build_type=None, arch=None, parallel=True, toolset=None, platforms=None,\n use_env=False, properties=None, output_binary_log=None, verbosity=None):\n\n targets = targets or []\n properties = properties or {}\n command = []\n\n if upgrade_project and not get_env(\"CONAN_SKIP_VS_PROJECTS_UPGRADE\", False):\n command.append('devenv \"%s\" /upgrade &&' % project_file)\n else:\n self._output.info(\"Skipped sln project upgrade\")\n\n build_type = build_type or self._settings.get_safe(\"build_type\")\n arch = arch or self._settings.get_safe(\"arch\")\n if toolset is None: # False value to skip adjusting\n toolset = tools.msvs_toolset(self._settings)\n verbosity = os.getenv(\"CONAN_MSBUILD_VERBOSITY\") or verbosity or \"minimal\"\n if not build_type:\n raise ConanException(\"Cannot build_sln_command, build_type not defined\")\n if not arch:\n raise ConanException(\"Cannot build_sln_command, arch not defined\")\n\n command.append('msbuild \"%s\" /p:Configuration=\"%s\"' % (project_file, build_type))\n msvc_arch = {'x86': 'x86',\n 'x86_64': 'x64',\n 'armv7': 'ARM',\n 'armv8': 'ARM64'}\n if platforms:\n msvc_arch.update(platforms)\n msvc_arch = msvc_arch.get(str(arch))\n if self._settings.get_safe(\"os\") == \"WindowsCE\":\n msvc_arch = self._settings.get_safe(\"os.platform\")\n try:\n sln = tools.load(project_file)\n pattern = re.compile(r\"GlobalSection\\(SolutionConfigurationPlatforms\\)\"\n r\"(.*?)EndGlobalSection\", re.DOTALL)\n solution_global = pattern.search(sln).group(1)\n lines = solution_global.splitlines()\n lines = [s.split(\"=\")[0].strip() for s in lines]\n except Exception:\n pass # TODO: !!! what are we catching here? tools.load? .group(1)? .splitlines?\n else:\n config = \"%s|%s\" % (build_type, msvc_arch)\n if config not in \"\".join(lines):\n self._output.warn(\"***** The configuration %s does not exist in this solution *****\"\n % config)\n self._output.warn(\"Use 'platforms' argument to define your architectures\")\n\n if output_binary_log:\n msbuild_version = MSBuild.get_version(self._settings)\n if msbuild_version >= \"15.3\": # http://msbuildlog.com/\n command.append('/bl' if isinstance(output_binary_log, bool)\n else '/bl:\"%s\"' % output_binary_log)\n else:\n raise ConanException(\"MSBuild version detected (%s) does not support \"\n \"'output_binary_log' ('/bl')\" % msbuild_version)\n\n if use_env:\n command.append('/p:UseEnv=true')\n\n if msvc_arch:\n command.append('/p:Platform=\"%s\"' % msvc_arch)\n\n if parallel:\n command.append('/m:%s' % cpu_count(output=self._output))\n\n if targets:\n command.append(\"/target:%s\" % \";\".join(targets))\n\n if toolset:\n command.append('/p:PlatformToolset=\"%s\"' % toolset)\n\n if verbosity:\n command.append('/verbosity:%s' % verbosity)\n\n if props_file_path:\n command.append('/p:ForceImportBeforeCppTargets=\"%s\"'\n % os.path.abspath(props_file_path))\n\n for name, value in properties.items():\n command.append('/p:%s=\"%s\"' % (name, value))\n\n return \" \".join(command)\n\n def _get_props_file_contents(self, definitions=None):\n\n def format_macro(name, value):\n return \"%s=%s\" % (name, value) if value else name\n\n # how to specify runtime in command line:\n # https://stackoverflow.com/questions/38840332/msbuild-overrides-properties-while-building-vc-project\n runtime_library = {\"MT\": \"MultiThreaded\",\n \"MTd\": \"MultiThreadedDebug\",\n \"MD\": \"MultiThreadedDLL\",\n \"MDd\": \"MultiThreadedDebugDLL\"}.get(\n self._settings.get_safe(\"compiler.runtime\"), \"\")\n\n if self.build_env:\n # Take the flags from the build env, the user was able to alter them if needed\n flags = copy.copy(self.build_env.flags)\n flags.append(self.build_env.std)\n else: # To be removed when build_sln_command is deprecated\n flags = vs_build_type_flags(self._settings, with_flags=False)\n flags.append(vs_std_cpp(self._settings))\n\n if definitions:\n definitions = \";\".join([format_macro(name, definitions[name]) for name in definitions])\n\n flags_str = \" \".join(list(filter(None, flags))) # Removes empty and None elements\n additional_node = \"<AdditionalOptions>\" \\\n \"{} %(AdditionalOptions)\" \\\n \"</AdditionalOptions>\".format(flags_str) if flags_str else \"\"\n runtime_node = \"<RuntimeLibrary>\" \\\n \"{}\" \\\n \"</RuntimeLibrary>\".format(runtime_library) if runtime_library else \"\"\n definitions_node = \"<PreprocessorDefinitions>\" \\\n \"{};%(PreprocessorDefinitions)\" \\\n \"</PreprocessorDefinitions>\".format(definitions) if definitions else \"\"\n template = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<Project xmlns=\"http://schemas.microsoft.com/developer/msbuild/2003\">\n <ItemDefinitionGroup>\n <ClCompile>\n {runtime_node}\n {additional_node}\n {definitions_node}\n </ClCompile>\n </ItemDefinitionGroup>\n</Project>\"\"\".format(**{\"runtime_node\": runtime_node,\n \"additional_node\": additional_node,\n \"definitions_node\": definitions_node})\n return template\n\n @staticmethod\n def get_version(settings):\n msbuild_cmd = \"msbuild -version\"\n vcvars = tools_vcvars_command(settings)\n command = \"%s && %s\" % (vcvars, msbuild_cmd)\n try:\n out, _ = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True).communicate()\n version_line = decode_text(out).split(\"\\n\")[-1]\n prog = re.compile(\"(\\d+\\.){2,3}\\d+\")\n result = prog.match(version_line).group()\n return Version(result)\n except Exception as e:\n raise ConanException(\"Error retrieving MSBuild version: '{}'\".format(e))\n", "path": "conans/client/build/msbuild.py"}]}
| 3,634 | 176 |
gh_patches_debug_7438
|
rasdani/github-patches
|
git_diff
|
ranaroussi__yfinance-1237
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
fix(sec): upgrade lxml to 4.9.1
### What happened?
There are 1 security vulnerabilities found in lxml 4.5.1
- [CVE-2022-2309](https://www.oscs1024.com/hd/CVE-2022-2309)
### What did I do?
Upgrade lxml from 4.5.1 to 4.9.1 for vulnerability fix
### What did you expect to happen?
Ideally, no insecure libs should be used.
### The specification of the pull request
[PR Specification](https://www.oscs1024.com/docs/pr-specification/) from OSCS
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: UTF-8 -*-
3 #
4 # yfinance - market data downloader
5 # https://github.com/ranaroussi/yfinance
6
7 """yfinance - market data downloader"""
8
9 from setuptools import setup, find_packages
10 # from codecs import open
11 import io
12 from os import path
13
14 # --- get version ---
15 version = "unknown"
16 with open("yfinance/version.py") as f:
17 line = f.read().strip()
18 version = line.replace("version = ", "").replace('"', '')
19 # --- /get version ---
20
21
22 here = path.abspath(path.dirname(__file__))
23
24 # Get the long description from the README file
25 with io.open(path.join(here, 'README.md'), encoding='utf-8') as f:
26 long_description = f.read()
27
28 setup(
29 name='yfinance',
30 version=version,
31 description='Download market data from Yahoo! Finance API',
32 long_description=long_description,
33 long_description_content_type='text/markdown',
34 url='https://github.com/ranaroussi/yfinance',
35 author='Ran Aroussi',
36 author_email='[email protected]',
37 license='Apache',
38 classifiers=[
39 'License :: OSI Approved :: Apache Software License',
40 # 'Development Status :: 3 - Alpha',
41 # 'Development Status :: 4 - Beta',
42 'Development Status :: 5 - Production/Stable',
43
44
45 'Operating System :: OS Independent',
46 'Intended Audience :: Developers',
47 'Topic :: Office/Business :: Financial',
48 'Topic :: Office/Business :: Financial :: Investment',
49 'Topic :: Scientific/Engineering :: Interface Engine/Protocol Translator',
50 'Topic :: Software Development :: Libraries',
51 'Topic :: Software Development :: Libraries :: Python Modules',
52
53 'Programming Language :: Python :: 2.7',
54 'Programming Language :: Python :: 3.4',
55 'Programming Language :: Python :: 3.5',
56 # 'Programming Language :: Python :: 3.6',
57 'Programming Language :: Python :: 3.7',
58 'Programming Language :: Python :: 3.8',
59 'Programming Language :: Python :: 3.9',
60 ],
61 platforms=['any'],
62 keywords='pandas, yahoo finance, pandas datareader',
63 packages=find_packages(exclude=['contrib', 'docs', 'tests', 'examples']),
64 install_requires=['pandas>=1.3.0', 'numpy>=1.16.5',
65 'requests>=2.26', 'multitasking>=0.0.7',
66 'appdirs>=1.4.4'],
67 entry_points={
68 'console_scripts': [
69 'sample=sample:main',
70 ],
71 },
72 )
73
74 print("""
75 NOTE: yfinance is not affiliated, endorsed, or vetted by Yahoo, Inc.
76
77 You should refer to Yahoo!'s terms of use for details on your rights
78 to use the actual data downloaded.""")
79
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -63,7 +63,7 @@
packages=find_packages(exclude=['contrib', 'docs', 'tests', 'examples']),
install_requires=['pandas>=1.3.0', 'numpy>=1.16.5',
'requests>=2.26', 'multitasking>=0.0.7',
- 'appdirs>=1.4.4'],
+ 'lxml>=4.9.1', 'appdirs>=1.4.4'],
entry_points={
'console_scripts': [
'sample=sample:main',
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -63,7 +63,7 @@\n packages=find_packages(exclude=['contrib', 'docs', 'tests', 'examples']),\n install_requires=['pandas>=1.3.0', 'numpy>=1.16.5',\n 'requests>=2.26', 'multitasking>=0.0.7',\n- 'appdirs>=1.4.4'],\n+ 'lxml>=4.9.1', 'appdirs>=1.4.4'],\n entry_points={\n 'console_scripts': [\n 'sample=sample:main',\n", "issue": "fix(sec): upgrade lxml to 4.9.1\n### What happened\uff1f\nThere are 1 security vulnerabilities found in lxml 4.5.1\n- [CVE-2022-2309](https://www.oscs1024.com/hd/CVE-2022-2309)\n\n\n### What did I do\uff1f\nUpgrade lxml from 4.5.1 to 4.9.1 for vulnerability fix\n\n### What did you expect to happen\uff1f\nIdeally, no insecure libs should be used.\n\n### The specification of the pull request\n[PR Specification](https://www.oscs1024.com/docs/pr-specification/) from OSCS\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n#\n# yfinance - market data downloader\n# https://github.com/ranaroussi/yfinance\n\n\"\"\"yfinance - market data downloader\"\"\"\n\nfrom setuptools import setup, find_packages\n# from codecs import open\nimport io\nfrom os import path\n\n# --- get version ---\nversion = \"unknown\"\nwith open(\"yfinance/version.py\") as f:\n line = f.read().strip()\n version = line.replace(\"version = \", \"\").replace('\"', '')\n# --- /get version ---\n\n\nhere = path.abspath(path.dirname(__file__))\n\n# Get the long description from the README file\nwith io.open(path.join(here, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\n\nsetup(\n name='yfinance',\n version=version,\n description='Download market data from Yahoo! Finance API',\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='https://github.com/ranaroussi/yfinance',\n author='Ran Aroussi',\n author_email='[email protected]',\n license='Apache',\n classifiers=[\n 'License :: OSI Approved :: Apache Software License',\n # 'Development Status :: 3 - Alpha',\n # 'Development Status :: 4 - Beta',\n 'Development Status :: 5 - Production/Stable',\n\n\n 'Operating System :: OS Independent',\n 'Intended Audience :: Developers',\n 'Topic :: Office/Business :: Financial',\n 'Topic :: Office/Business :: Financial :: Investment',\n 'Topic :: Scientific/Engineering :: Interface Engine/Protocol Translator',\n 'Topic :: Software Development :: Libraries',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n # 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n ],\n platforms=['any'],\n keywords='pandas, yahoo finance, pandas datareader',\n packages=find_packages(exclude=['contrib', 'docs', 'tests', 'examples']),\n install_requires=['pandas>=1.3.0', 'numpy>=1.16.5',\n 'requests>=2.26', 'multitasking>=0.0.7',\n 'appdirs>=1.4.4'],\n entry_points={\n 'console_scripts': [\n 'sample=sample:main',\n ],\n },\n)\n\nprint(\"\"\"\nNOTE: yfinance is not affiliated, endorsed, or vetted by Yahoo, Inc.\n\nYou should refer to Yahoo!'s terms of use for details on your rights\nto use the actual data downloaded.\"\"\")\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n#\n# yfinance - market data downloader\n# https://github.com/ranaroussi/yfinance\n\n\"\"\"yfinance - market data downloader\"\"\"\n\nfrom setuptools import setup, find_packages\n# from codecs import open\nimport io\nfrom os import path\n\n# --- get version ---\nversion = \"unknown\"\nwith open(\"yfinance/version.py\") as f:\n line = f.read().strip()\n version = line.replace(\"version = \", \"\").replace('\"', '')\n# --- /get version ---\n\n\nhere = path.abspath(path.dirname(__file__))\n\n# Get the long description from the README file\nwith io.open(path.join(here, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\n\nsetup(\n name='yfinance',\n version=version,\n description='Download market data from Yahoo! Finance API',\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='https://github.com/ranaroussi/yfinance',\n author='Ran Aroussi',\n author_email='[email protected]',\n license='Apache',\n classifiers=[\n 'License :: OSI Approved :: Apache Software License',\n # 'Development Status :: 3 - Alpha',\n # 'Development Status :: 4 - Beta',\n 'Development Status :: 5 - Production/Stable',\n\n\n 'Operating System :: OS Independent',\n 'Intended Audience :: Developers',\n 'Topic :: Office/Business :: Financial',\n 'Topic :: Office/Business :: Financial :: Investment',\n 'Topic :: Scientific/Engineering :: Interface Engine/Protocol Translator',\n 'Topic :: Software Development :: Libraries',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n # 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n ],\n platforms=['any'],\n keywords='pandas, yahoo finance, pandas datareader',\n packages=find_packages(exclude=['contrib', 'docs', 'tests', 'examples']),\n install_requires=['pandas>=1.3.0', 'numpy>=1.16.5',\n 'requests>=2.26', 'multitasking>=0.0.7',\n 'lxml>=4.9.1', 'appdirs>=1.4.4'],\n entry_points={\n 'console_scripts': [\n 'sample=sample:main',\n ],\n },\n)\n\nprint(\"\"\"\nNOTE: yfinance is not affiliated, endorsed, or vetted by Yahoo, Inc.\n\nYou should refer to Yahoo!'s terms of use for details on your rights\nto use the actual data downloaded.\"\"\")\n", "path": "setup.py"}]}
| 1,174 | 145 |
gh_patches_debug_12394
|
rasdani/github-patches
|
git_diff
|
aws__aws-cli-341
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
argparse dependency is only needed for Python 2.6
We currently have a dependency on argparse because it's not in stdlib for Python 2.6. We should make this dependency specific to 2.6 and not install it for other Python versions.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 import os
3 import sys
4
5 from setuptools import setup, find_packages
6
7 import awscli
8
9
10 requires = ['botocore>=0.16.0,<0.17.0',
11 'bcdoc>=0.9.0,<0.10.0',
12 'six>=1.1.0',
13 'colorama==0.2.5',
14 'argparse>=1.1',
15 'docutils>=0.10',
16 'rsa==3.1.1']
17
18
19 setup_options = dict(
20 name='awscli',
21 version=awscli.__version__,
22 description='Universal Command Line Environment for AWS.',
23 long_description=open('README.rst').read(),
24 author='Mitch Garnaat',
25 author_email='[email protected]',
26 url='http://aws.amazon.com/cli/',
27 scripts=['bin/aws', 'bin/aws.cmd',
28 'bin/aws_completer', 'bin/aws_zsh_completer.sh'],
29 packages=find_packages('.', exclude=['tests*']),
30 package_dir={'awscli': 'awscli'},
31 package_data={'awscli': ['data/*.json', 'examples/*/*']},
32 install_requires=requires,
33 license=open("LICENSE.txt").read(),
34 classifiers=(
35 'Development Status :: 5 - Production/Stable',
36 'Intended Audience :: Developers',
37 'Intended Audience :: System Administrators',
38 'Natural Language :: English',
39 'License :: OSI Approved :: Apache Software License',
40 'Programming Language :: Python',
41 'Programming Language :: Python :: 2.6',
42 'Programming Language :: Python :: 2.7',
43 'Programming Language :: Python :: 3',
44 'Programming Language :: Python :: 3.3',
45 ),
46 )
47
48 if 'py2exe' in sys.argv:
49 # This will actually give us a py2exe command.
50 import py2exe
51 # And we have some py2exe specific options.
52 setup_options['options'] = {
53 'py2exe': {
54 'optimize': 0,
55 'skip_archive': True,
56 'includes': ['ConfigParser', 'urllib', 'httplib',
57 'docutils.readers.standalone',
58 'docutils.parsers.rst',
59 'docutils.languages.en',
60 'xml.etree.ElementTree', 'HTMLParser',
61 'awscli.handlers'],
62 }
63 }
64 setup_options['console'] = ['bin/aws']
65
66
67 setup(**setup_options)
68
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -1,5 +1,4 @@
#!/usr/bin/env python
-import os
import sys
from setuptools import setup, find_packages
@@ -11,10 +10,14 @@
'bcdoc>=0.9.0,<0.10.0',
'six>=1.1.0',
'colorama==0.2.5',
- 'argparse>=1.1',
'docutils>=0.10',
'rsa==3.1.1']
+if sys.version_info[:2] == (2, 6):
+ # For python2.6 we have to require argparse since it
+ # was not in stdlib until 2.7.
+ requires.append('argparse>=1.1')
+
setup_options = dict(
name='awscli',
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -1,5 +1,4 @@\n #!/usr/bin/env python\n-import os\n import sys\n \n from setuptools import setup, find_packages\n@@ -11,10 +10,14 @@\n 'bcdoc>=0.9.0,<0.10.0',\n 'six>=1.1.0',\n 'colorama==0.2.5',\n- 'argparse>=1.1',\n 'docutils>=0.10',\n 'rsa==3.1.1']\n \n+if sys.version_info[:2] == (2, 6):\n+ # For python2.6 we have to require argparse since it\n+ # was not in stdlib until 2.7.\n+ requires.append('argparse>=1.1')\n+\n \n setup_options = dict(\n name='awscli',\n", "issue": "argparse dependency is only needed for Python 2.6\nWe currently have a dependency on argparse because it's not in stdlib for Python 2.6. We should make this dependency specific to 2.6 and not install it for other Python versions.\n\n", "before_files": [{"content": "#!/usr/bin/env python\nimport os\nimport sys\n\nfrom setuptools import setup, find_packages\n\nimport awscli\n\n\nrequires = ['botocore>=0.16.0,<0.17.0',\n 'bcdoc>=0.9.0,<0.10.0',\n 'six>=1.1.0',\n 'colorama==0.2.5',\n 'argparse>=1.1',\n 'docutils>=0.10',\n 'rsa==3.1.1']\n\n\nsetup_options = dict(\n name='awscli',\n version=awscli.__version__,\n description='Universal Command Line Environment for AWS.',\n long_description=open('README.rst').read(),\n author='Mitch Garnaat',\n author_email='[email protected]',\n url='http://aws.amazon.com/cli/',\n scripts=['bin/aws', 'bin/aws.cmd',\n 'bin/aws_completer', 'bin/aws_zsh_completer.sh'],\n packages=find_packages('.', exclude=['tests*']),\n package_dir={'awscli': 'awscli'},\n package_data={'awscli': ['data/*.json', 'examples/*/*']},\n install_requires=requires,\n license=open(\"LICENSE.txt\").read(),\n classifiers=(\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Natural Language :: English',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n ),\n)\n\nif 'py2exe' in sys.argv:\n # This will actually give us a py2exe command.\n import py2exe\n # And we have some py2exe specific options.\n setup_options['options'] = {\n 'py2exe': {\n 'optimize': 0,\n 'skip_archive': True,\n 'includes': ['ConfigParser', 'urllib', 'httplib',\n 'docutils.readers.standalone',\n 'docutils.parsers.rst',\n 'docutils.languages.en',\n 'xml.etree.ElementTree', 'HTMLParser',\n 'awscli.handlers'],\n }\n }\n setup_options['console'] = ['bin/aws']\n\n\nsetup(**setup_options)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\nimport sys\n\nfrom setuptools import setup, find_packages\n\nimport awscli\n\n\nrequires = ['botocore>=0.16.0,<0.17.0',\n 'bcdoc>=0.9.0,<0.10.0',\n 'six>=1.1.0',\n 'colorama==0.2.5',\n 'docutils>=0.10',\n 'rsa==3.1.1']\n\nif sys.version_info[:2] == (2, 6):\n # For python2.6 we have to require argparse since it\n # was not in stdlib until 2.7.\n requires.append('argparse>=1.1')\n\n\nsetup_options = dict(\n name='awscli',\n version=awscli.__version__,\n description='Universal Command Line Environment for AWS.',\n long_description=open('README.rst').read(),\n author='Mitch Garnaat',\n author_email='[email protected]',\n url='http://aws.amazon.com/cli/',\n scripts=['bin/aws', 'bin/aws.cmd',\n 'bin/aws_completer', 'bin/aws_zsh_completer.sh'],\n packages=find_packages('.', exclude=['tests*']),\n package_dir={'awscli': 'awscli'},\n package_data={'awscli': ['data/*.json', 'examples/*/*']},\n install_requires=requires,\n license=open(\"LICENSE.txt\").read(),\n classifiers=(\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Natural Language :: English',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n ),\n)\n\nif 'py2exe' in sys.argv:\n # This will actually give us a py2exe command.\n import py2exe\n # And we have some py2exe specific options.\n setup_options['options'] = {\n 'py2exe': {\n 'optimize': 0,\n 'skip_archive': True,\n 'includes': ['ConfigParser', 'urllib', 'httplib',\n 'docutils.readers.standalone',\n 'docutils.parsers.rst',\n 'docutils.languages.en',\n 'xml.etree.ElementTree', 'HTMLParser',\n 'awscli.handlers'],\n }\n }\n setup_options['console'] = ['bin/aws']\n\n\nsetup(**setup_options)\n", "path": "setup.py"}]}
| 966 | 205 |
gh_patches_debug_31754
|
rasdani/github-patches
|
git_diff
|
dask__distributed-4530
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Accessing `ipython` on workers through `client.start_ipython_workers()`
I am trying to see if I can run ipython on workers interactively. The first example in the documentation (https://distributed.dask.org/en/latest/ipython.html) works, while the second one is not working for me. Here's the minimal code to reproduce:
```python
from dask.distributed import Client
client = Client()
info = client.start_ipython_workers(magic_names="w*")
%w0 1+1
# this gives TimeoutError: Timeout waiting for IPython output
```
This is more of a curiosity for me, just trying to explore what is possible. I posted it also as a question on StackOverflow: https://stackoverflow.com/questions/66167619/how-to-use-client-start-ipython-workers-in-dask-distributed
**Environment**:
- Dask version: 2021.02.0
- Python version: Python 3.7.9
- Operating System: MacOS Catalina
- Install method (conda, pip, source): conda
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `distributed/_ipython_utils.py`
Content:
```
1 """Utilities for integrating with IPython
2
3 These functions should probably reside in Jupyter and IPython repositories,
4 after which we can import them instead of having our own definitions.
5 """
6
7 import atexit
8 import os
9
10 try:
11 import queue
12 except ImportError:
13 # Python 2
14 import Queue as queue
15 from subprocess import Popen
16 import sys
17 from threading import Thread
18 from uuid import uuid4
19
20 from tornado.gen import TimeoutError
21 from tornado.ioloop import IOLoop
22 from threading import Event
23
24 from IPython import get_ipython
25 from jupyter_client import BlockingKernelClient, write_connection_file
26 from jupyter_core.paths import jupyter_runtime_dir
27
28
29 OUTPUT_TIMEOUT = 10
30
31
32 def run_cell_remote(ip, kc, cell):
33 """Run a cell on a KernelClient
34
35 Any output from the cell will be redisplayed in the local session.
36 """
37 msg_id = kc.execute(cell)
38
39 in_kernel = getattr(ip, "kernel", False)
40 if in_kernel:
41 socket = ip.display_pub.pub_socket
42 session = ip.display_pub.session
43 parent_header = ip.display_pub.parent_header
44
45 while True:
46 try:
47 msg = kc.get_iopub_msg(timeout=OUTPUT_TIMEOUT)
48 except queue.Empty:
49 raise TimeoutError("Timeout waiting for IPython output")
50
51 if msg["parent_header"].get("msg_id") != msg_id:
52 continue
53 msg_type = msg["header"]["msg_type"]
54 content = msg["content"]
55 if msg_type == "status":
56 if content["execution_state"] == "idle":
57 # idle means output is done
58 break
59 elif msg_type == "stream":
60 stream = getattr(sys, content["name"])
61 stream.write(content["text"])
62 elif msg_type in ("display_data", "execute_result", "error"):
63 if in_kernel:
64 session.send(socket, msg_type, content, parent=parent_header)
65 else:
66 if msg_type == "error":
67 print("\n".join(content["traceback"]), file=sys.stderr)
68 else:
69 sys.stdout.write(content["data"].get("text/plain", ""))
70 else:
71 pass
72
73
74 def register_worker_magic(connection_info, magic_name="worker"):
75 """Register a %worker magic, given connection_info.
76
77 Both a line and cell magic are registered,
78 which run the given cell in a remote kernel.
79 """
80 ip = get_ipython()
81 info = dict(connection_info) # copy
82 key = info.pop("key")
83 kc = BlockingKernelClient(**connection_info)
84 kc.session.key = key
85 kc.start_channels()
86
87 def remote(line, cell=None):
88 """Run the current cell on a remote IPython kernel"""
89 if cell is None:
90 # both line and cell magic
91 cell = line
92 run_cell_remote(ip, kc, cell)
93
94 remote.client = kc # preserve reference on kc, largely for mocking
95 ip.register_magic_function(remote, magic_kind="line", magic_name=magic_name)
96 ip.register_magic_function(remote, magic_kind="cell", magic_name=magic_name)
97
98
99 def remote_magic(line, cell=None):
100 """A magic for running code on a specified remote worker
101
102 The connection_info dict of the worker will be looked up
103 as the first positional arg to the magic.
104 The rest of the line (or the entire cell for a %%cell magic)
105 will be passed to the remote kernel.
106
107 Usage:
108
109 info = e.start_ipython(worker)[worker]
110 %remote info print(worker.data)
111 """
112 # get connection info from IPython's user namespace
113 ip = get_ipython()
114 split_line = line.split(None, 1)
115 info_name = split_line[0]
116 if info_name not in ip.user_ns:
117 raise NameError(info_name)
118 connection_info = dict(ip.user_ns[info_name])
119
120 if not cell: # line magic, use the rest of the line
121 if len(split_line) == 1:
122 raise ValueError("I need some code to run!")
123 cell = split_line[1]
124
125 # turn info dict to hashable str for use as lookup key in _clients cache
126 key = ",".join(map(str, sorted(connection_info.items())))
127 session_key = connection_info.pop("key")
128
129 if key in remote_magic._clients:
130 kc = remote_magic._clients[key]
131 else:
132 kc = BlockingKernelClient(**connection_info)
133 kc.session.key = session_key
134 kc.start_channels()
135 kc.wait_for_ready(timeout=10)
136 remote_magic._clients[key] = kc
137
138 # actually run the code
139 run_cell_remote(ip, kc, cell)
140
141
142 # cache clients for re-use in remote magic
143 remote_magic._clients = {}
144
145
146 def register_remote_magic(magic_name="remote"):
147 """Define the parameterized %remote magic
148
149 See remote_magic above for details.
150 """
151 ip = get_ipython()
152 if ip is None:
153 return # do nothing if IPython's not running
154 ip.register_magic_function(remote_magic, magic_kind="line", magic_name=magic_name)
155 ip.register_magic_function(remote_magic, magic_kind="cell", magic_name=magic_name)
156
157
158 def connect_qtconsole(connection_info, name=None, extra_args=None):
159 """Open a QtConsole connected to a worker who has the given future
160
161 - identify worker with who_has
162 - start IPython kernel on the worker
163 - start qtconsole connected to the kernel
164 """
165 runtime_dir = jupyter_runtime_dir()
166 if name is None:
167 name = uuid4().hex
168
169 path = os.path.join(runtime_dir, name + ".json")
170 write_connection_file(path, **connection_info)
171 cmd = ["jupyter", "qtconsole", "--existing", path]
172 if extra_args:
173 cmd.extend(extra_args)
174 Popen(cmd)
175
176 @atexit.register
177 def _cleanup_connection_file():
178 """Cleanup our connection file when we exit."""
179 try:
180 os.remove(path)
181 except OSError:
182 pass
183
184
185 def start_ipython(ip=None, ns=None, log=None):
186 """Start an IPython kernel in a thread
187
188 Parameters
189 ----------
190 ip : str
191 The IP address to listen on (likely the parent object's ip).
192 ns : dict
193 Any names that should be injected into the IPython namespace.
194 log : logger instance
195 Hook up IPython's logging to an existing logger instead of the default.
196 """
197 from IPython import get_ipython
198
199 if get_ipython() is not None:
200 raise RuntimeError("Cannot start IPython, it's already running.")
201
202 from zmq.eventloop.ioloop import ZMQIOLoop
203 from ipykernel.kernelapp import IPKernelApp
204
205 # save the global IOLoop instance
206 # since IPython relies on it, but we are going to put it in a thread.
207 save_inst = IOLoop.instance()
208 IOLoop.clear_instance()
209 zmq_loop = ZMQIOLoop()
210 zmq_loop.install()
211
212 # start IPython, disabling its signal handlers that won't work due to running in a thread:
213 app = IPKernelApp.instance(log=log)
214 # Don't connect to the history database
215 app.config.HistoryManager.hist_file = ":memory:"
216 # listen on all interfaces, so remote clients can connect:
217 if ip:
218 app.ip = ip
219 # disable some signal handling, logging
220
221 def noop():
222 return None
223
224 app.init_signal = noop
225 app.log_connection_info = noop
226
227 # start IPython in a thread
228 # initialization happens in the thread to avoid threading problems
229 # with the sqlite history
230 evt = Event()
231
232 def _start():
233 app.initialize([])
234 app.kernel.pre_handler_hook = noop
235 app.kernel.post_handler_hook = noop
236 app.kernel.start()
237 app.kernel.loop = IOLoop.instance()
238 # save self in the IPython namespace as 'worker'
239 # inject things into the IPython namespace
240 if ns:
241 app.kernel.shell.user_ns.update(ns)
242 evt.set()
243 zmq_loop.start()
244
245 zmq_loop_thread = Thread(target=_start)
246 zmq_loop_thread.daemon = True
247 zmq_loop_thread.start()
248 assert evt.wait(timeout=5), "IPython didn't start in a reasonable amount of time."
249
250 # put the global IOLoop instance back:
251 IOLoop.clear_instance()
252 save_inst.install()
253 return app
254
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/distributed/_ipython_utils.py b/distributed/_ipython_utils.py
--- a/distributed/_ipython_utils.py
+++ b/distributed/_ipython_utils.py
@@ -199,16 +199,8 @@
if get_ipython() is not None:
raise RuntimeError("Cannot start IPython, it's already running.")
- from zmq.eventloop.ioloop import ZMQIOLoop
from ipykernel.kernelapp import IPKernelApp
- # save the global IOLoop instance
- # since IPython relies on it, but we are going to put it in a thread.
- save_inst = IOLoop.instance()
- IOLoop.clear_instance()
- zmq_loop = ZMQIOLoop()
- zmq_loop.install()
-
# start IPython, disabling its signal handlers that won't work due to running in a thread:
app = IPKernelApp.instance(log=log)
# Don't connect to the history database
@@ -234,20 +226,17 @@
app.kernel.pre_handler_hook = noop
app.kernel.post_handler_hook = noop
app.kernel.start()
- app.kernel.loop = IOLoop.instance()
# save self in the IPython namespace as 'worker'
# inject things into the IPython namespace
if ns:
app.kernel.shell.user_ns.update(ns)
evt.set()
- zmq_loop.start()
+ # start the app's IOLoop in its thread
+ IOLoop.current().start()
zmq_loop_thread = Thread(target=_start)
zmq_loop_thread.daemon = True
zmq_loop_thread.start()
assert evt.wait(timeout=5), "IPython didn't start in a reasonable amount of time."
- # put the global IOLoop instance back:
- IOLoop.clear_instance()
- save_inst.install()
return app
|
{"golden_diff": "diff --git a/distributed/_ipython_utils.py b/distributed/_ipython_utils.py\n--- a/distributed/_ipython_utils.py\n+++ b/distributed/_ipython_utils.py\n@@ -199,16 +199,8 @@\n if get_ipython() is not None:\n raise RuntimeError(\"Cannot start IPython, it's already running.\")\n \n- from zmq.eventloop.ioloop import ZMQIOLoop\n from ipykernel.kernelapp import IPKernelApp\n \n- # save the global IOLoop instance\n- # since IPython relies on it, but we are going to put it in a thread.\n- save_inst = IOLoop.instance()\n- IOLoop.clear_instance()\n- zmq_loop = ZMQIOLoop()\n- zmq_loop.install()\n-\n # start IPython, disabling its signal handlers that won't work due to running in a thread:\n app = IPKernelApp.instance(log=log)\n # Don't connect to the history database\n@@ -234,20 +226,17 @@\n app.kernel.pre_handler_hook = noop\n app.kernel.post_handler_hook = noop\n app.kernel.start()\n- app.kernel.loop = IOLoop.instance()\n # save self in the IPython namespace as 'worker'\n # inject things into the IPython namespace\n if ns:\n app.kernel.shell.user_ns.update(ns)\n evt.set()\n- zmq_loop.start()\n+ # start the app's IOLoop in its thread\n+ IOLoop.current().start()\n \n zmq_loop_thread = Thread(target=_start)\n zmq_loop_thread.daemon = True\n zmq_loop_thread.start()\n assert evt.wait(timeout=5), \"IPython didn't start in a reasonable amount of time.\"\n \n- # put the global IOLoop instance back:\n- IOLoop.clear_instance()\n- save_inst.install()\n return app\n", "issue": "Accessing `ipython` on workers through `client.start_ipython_workers()`\nI am trying to see if I can run ipython on workers interactively. The first example in the documentation (https://distributed.dask.org/en/latest/ipython.html) works, while the second one is not working for me. Here's the minimal code to reproduce:\r\n\r\n```python\r\nfrom dask.distributed import Client\r\nclient = Client()\r\n\r\ninfo = client.start_ipython_workers(magic_names=\"w*\")\r\n%w0 1+1\r\n# this gives TimeoutError: Timeout waiting for IPython output\r\n```\r\n\r\nThis is more of a curiosity for me, just trying to explore what is possible. I posted it also as a question on StackOverflow: https://stackoverflow.com/questions/66167619/how-to-use-client-start-ipython-workers-in-dask-distributed\r\n\r\n**Environment**:\r\n\r\n- Dask version: 2021.02.0\r\n- Python version: Python 3.7.9\r\n- Operating System: MacOS Catalina\r\n- Install method (conda, pip, source): conda\r\n\n", "before_files": [{"content": "\"\"\"Utilities for integrating with IPython\n\nThese functions should probably reside in Jupyter and IPython repositories,\nafter which we can import them instead of having our own definitions.\n\"\"\"\n\nimport atexit\nimport os\n\ntry:\n import queue\nexcept ImportError:\n # Python 2\n import Queue as queue\nfrom subprocess import Popen\nimport sys\nfrom threading import Thread\nfrom uuid import uuid4\n\nfrom tornado.gen import TimeoutError\nfrom tornado.ioloop import IOLoop\nfrom threading import Event\n\nfrom IPython import get_ipython\nfrom jupyter_client import BlockingKernelClient, write_connection_file\nfrom jupyter_core.paths import jupyter_runtime_dir\n\n\nOUTPUT_TIMEOUT = 10\n\n\ndef run_cell_remote(ip, kc, cell):\n \"\"\"Run a cell on a KernelClient\n\n Any output from the cell will be redisplayed in the local session.\n \"\"\"\n msg_id = kc.execute(cell)\n\n in_kernel = getattr(ip, \"kernel\", False)\n if in_kernel:\n socket = ip.display_pub.pub_socket\n session = ip.display_pub.session\n parent_header = ip.display_pub.parent_header\n\n while True:\n try:\n msg = kc.get_iopub_msg(timeout=OUTPUT_TIMEOUT)\n except queue.Empty:\n raise TimeoutError(\"Timeout waiting for IPython output\")\n\n if msg[\"parent_header\"].get(\"msg_id\") != msg_id:\n continue\n msg_type = msg[\"header\"][\"msg_type\"]\n content = msg[\"content\"]\n if msg_type == \"status\":\n if content[\"execution_state\"] == \"idle\":\n # idle means output is done\n break\n elif msg_type == \"stream\":\n stream = getattr(sys, content[\"name\"])\n stream.write(content[\"text\"])\n elif msg_type in (\"display_data\", \"execute_result\", \"error\"):\n if in_kernel:\n session.send(socket, msg_type, content, parent=parent_header)\n else:\n if msg_type == \"error\":\n print(\"\\n\".join(content[\"traceback\"]), file=sys.stderr)\n else:\n sys.stdout.write(content[\"data\"].get(\"text/plain\", \"\"))\n else:\n pass\n\n\ndef register_worker_magic(connection_info, magic_name=\"worker\"):\n \"\"\"Register a %worker magic, given connection_info.\n\n Both a line and cell magic are registered,\n which run the given cell in a remote kernel.\n \"\"\"\n ip = get_ipython()\n info = dict(connection_info) # copy\n key = info.pop(\"key\")\n kc = BlockingKernelClient(**connection_info)\n kc.session.key = key\n kc.start_channels()\n\n def remote(line, cell=None):\n \"\"\"Run the current cell on a remote IPython kernel\"\"\"\n if cell is None:\n # both line and cell magic\n cell = line\n run_cell_remote(ip, kc, cell)\n\n remote.client = kc # preserve reference on kc, largely for mocking\n ip.register_magic_function(remote, magic_kind=\"line\", magic_name=magic_name)\n ip.register_magic_function(remote, magic_kind=\"cell\", magic_name=magic_name)\n\n\ndef remote_magic(line, cell=None):\n \"\"\"A magic for running code on a specified remote worker\n\n The connection_info dict of the worker will be looked up\n as the first positional arg to the magic.\n The rest of the line (or the entire cell for a %%cell magic)\n will be passed to the remote kernel.\n\n Usage:\n\n info = e.start_ipython(worker)[worker]\n %remote info print(worker.data)\n \"\"\"\n # get connection info from IPython's user namespace\n ip = get_ipython()\n split_line = line.split(None, 1)\n info_name = split_line[0]\n if info_name not in ip.user_ns:\n raise NameError(info_name)\n connection_info = dict(ip.user_ns[info_name])\n\n if not cell: # line magic, use the rest of the line\n if len(split_line) == 1:\n raise ValueError(\"I need some code to run!\")\n cell = split_line[1]\n\n # turn info dict to hashable str for use as lookup key in _clients cache\n key = \",\".join(map(str, sorted(connection_info.items())))\n session_key = connection_info.pop(\"key\")\n\n if key in remote_magic._clients:\n kc = remote_magic._clients[key]\n else:\n kc = BlockingKernelClient(**connection_info)\n kc.session.key = session_key\n kc.start_channels()\n kc.wait_for_ready(timeout=10)\n remote_magic._clients[key] = kc\n\n # actually run the code\n run_cell_remote(ip, kc, cell)\n\n\n# cache clients for re-use in remote magic\nremote_magic._clients = {}\n\n\ndef register_remote_magic(magic_name=\"remote\"):\n \"\"\"Define the parameterized %remote magic\n\n See remote_magic above for details.\n \"\"\"\n ip = get_ipython()\n if ip is None:\n return # do nothing if IPython's not running\n ip.register_magic_function(remote_magic, magic_kind=\"line\", magic_name=magic_name)\n ip.register_magic_function(remote_magic, magic_kind=\"cell\", magic_name=magic_name)\n\n\ndef connect_qtconsole(connection_info, name=None, extra_args=None):\n \"\"\"Open a QtConsole connected to a worker who has the given future\n\n - identify worker with who_has\n - start IPython kernel on the worker\n - start qtconsole connected to the kernel\n \"\"\"\n runtime_dir = jupyter_runtime_dir()\n if name is None:\n name = uuid4().hex\n\n path = os.path.join(runtime_dir, name + \".json\")\n write_connection_file(path, **connection_info)\n cmd = [\"jupyter\", \"qtconsole\", \"--existing\", path]\n if extra_args:\n cmd.extend(extra_args)\n Popen(cmd)\n\n @atexit.register\n def _cleanup_connection_file():\n \"\"\"Cleanup our connection file when we exit.\"\"\"\n try:\n os.remove(path)\n except OSError:\n pass\n\n\ndef start_ipython(ip=None, ns=None, log=None):\n \"\"\"Start an IPython kernel in a thread\n\n Parameters\n ----------\n ip : str\n The IP address to listen on (likely the parent object's ip).\n ns : dict\n Any names that should be injected into the IPython namespace.\n log : logger instance\n Hook up IPython's logging to an existing logger instead of the default.\n \"\"\"\n from IPython import get_ipython\n\n if get_ipython() is not None:\n raise RuntimeError(\"Cannot start IPython, it's already running.\")\n\n from zmq.eventloop.ioloop import ZMQIOLoop\n from ipykernel.kernelapp import IPKernelApp\n\n # save the global IOLoop instance\n # since IPython relies on it, but we are going to put it in a thread.\n save_inst = IOLoop.instance()\n IOLoop.clear_instance()\n zmq_loop = ZMQIOLoop()\n zmq_loop.install()\n\n # start IPython, disabling its signal handlers that won't work due to running in a thread:\n app = IPKernelApp.instance(log=log)\n # Don't connect to the history database\n app.config.HistoryManager.hist_file = \":memory:\"\n # listen on all interfaces, so remote clients can connect:\n if ip:\n app.ip = ip\n # disable some signal handling, logging\n\n def noop():\n return None\n\n app.init_signal = noop\n app.log_connection_info = noop\n\n # start IPython in a thread\n # initialization happens in the thread to avoid threading problems\n # with the sqlite history\n evt = Event()\n\n def _start():\n app.initialize([])\n app.kernel.pre_handler_hook = noop\n app.kernel.post_handler_hook = noop\n app.kernel.start()\n app.kernel.loop = IOLoop.instance()\n # save self in the IPython namespace as 'worker'\n # inject things into the IPython namespace\n if ns:\n app.kernel.shell.user_ns.update(ns)\n evt.set()\n zmq_loop.start()\n\n zmq_loop_thread = Thread(target=_start)\n zmq_loop_thread.daemon = True\n zmq_loop_thread.start()\n assert evt.wait(timeout=5), \"IPython didn't start in a reasonable amount of time.\"\n\n # put the global IOLoop instance back:\n IOLoop.clear_instance()\n save_inst.install()\n return app\n", "path": "distributed/_ipython_utils.py"}], "after_files": [{"content": "\"\"\"Utilities for integrating with IPython\n\nThese functions should probably reside in Jupyter and IPython repositories,\nafter which we can import them instead of having our own definitions.\n\"\"\"\n\nimport atexit\nimport os\n\ntry:\n import queue\nexcept ImportError:\n # Python 2\n import Queue as queue\nfrom subprocess import Popen\nimport sys\nfrom threading import Thread\nfrom uuid import uuid4\n\nfrom tornado.gen import TimeoutError\nfrom tornado.ioloop import IOLoop\nfrom threading import Event\n\nfrom IPython import get_ipython\nfrom jupyter_client import BlockingKernelClient, write_connection_file\nfrom jupyter_core.paths import jupyter_runtime_dir\n\n\nOUTPUT_TIMEOUT = 10\n\n\ndef run_cell_remote(ip, kc, cell):\n \"\"\"Run a cell on a KernelClient\n\n Any output from the cell will be redisplayed in the local session.\n \"\"\"\n msg_id = kc.execute(cell)\n\n in_kernel = getattr(ip, \"kernel\", False)\n if in_kernel:\n socket = ip.display_pub.pub_socket\n session = ip.display_pub.session\n parent_header = ip.display_pub.parent_header\n\n while True:\n try:\n msg = kc.get_iopub_msg(timeout=OUTPUT_TIMEOUT)\n except queue.Empty:\n raise TimeoutError(\"Timeout waiting for IPython output\")\n\n if msg[\"parent_header\"].get(\"msg_id\") != msg_id:\n continue\n msg_type = msg[\"header\"][\"msg_type\"]\n content = msg[\"content\"]\n if msg_type == \"status\":\n if content[\"execution_state\"] == \"idle\":\n # idle means output is done\n break\n elif msg_type == \"stream\":\n stream = getattr(sys, content[\"name\"])\n stream.write(content[\"text\"])\n elif msg_type in (\"display_data\", \"execute_result\", \"error\"):\n if in_kernel:\n session.send(socket, msg_type, content, parent=parent_header)\n else:\n if msg_type == \"error\":\n print(\"\\n\".join(content[\"traceback\"]), file=sys.stderr)\n else:\n sys.stdout.write(content[\"data\"].get(\"text/plain\", \"\"))\n else:\n pass\n\n\ndef register_worker_magic(connection_info, magic_name=\"worker\"):\n \"\"\"Register a %worker magic, given connection_info.\n\n Both a line and cell magic are registered,\n which run the given cell in a remote kernel.\n \"\"\"\n ip = get_ipython()\n info = dict(connection_info) # copy\n key = info.pop(\"key\")\n kc = BlockingKernelClient(**connection_info)\n kc.session.key = key\n kc.start_channels()\n\n def remote(line, cell=None):\n \"\"\"Run the current cell on a remote IPython kernel\"\"\"\n if cell is None:\n # both line and cell magic\n cell = line\n run_cell_remote(ip, kc, cell)\n\n remote.client = kc # preserve reference on kc, largely for mocking\n ip.register_magic_function(remote, magic_kind=\"line\", magic_name=magic_name)\n ip.register_magic_function(remote, magic_kind=\"cell\", magic_name=magic_name)\n\n\ndef remote_magic(line, cell=None):\n \"\"\"A magic for running code on a specified remote worker\n\n The connection_info dict of the worker will be looked up\n as the first positional arg to the magic.\n The rest of the line (or the entire cell for a %%cell magic)\n will be passed to the remote kernel.\n\n Usage:\n\n info = e.start_ipython(worker)[worker]\n %remote info print(worker.data)\n \"\"\"\n # get connection info from IPython's user namespace\n ip = get_ipython()\n split_line = line.split(None, 1)\n info_name = split_line[0]\n if info_name not in ip.user_ns:\n raise NameError(info_name)\n connection_info = dict(ip.user_ns[info_name])\n\n if not cell: # line magic, use the rest of the line\n if len(split_line) == 1:\n raise ValueError(\"I need some code to run!\")\n cell = split_line[1]\n\n # turn info dict to hashable str for use as lookup key in _clients cache\n key = \",\".join(map(str, sorted(connection_info.items())))\n session_key = connection_info.pop(\"key\")\n\n if key in remote_magic._clients:\n kc = remote_magic._clients[key]\n else:\n kc = BlockingKernelClient(**connection_info)\n kc.session.key = session_key\n kc.start_channels()\n kc.wait_for_ready(timeout=10)\n remote_magic._clients[key] = kc\n\n # actually run the code\n run_cell_remote(ip, kc, cell)\n\n\n# cache clients for re-use in remote magic\nremote_magic._clients = {}\n\n\ndef register_remote_magic(magic_name=\"remote\"):\n \"\"\"Define the parameterized %remote magic\n\n See remote_magic above for details.\n \"\"\"\n ip = get_ipython()\n if ip is None:\n return # do nothing if IPython's not running\n ip.register_magic_function(remote_magic, magic_kind=\"line\", magic_name=magic_name)\n ip.register_magic_function(remote_magic, magic_kind=\"cell\", magic_name=magic_name)\n\n\ndef connect_qtconsole(connection_info, name=None, extra_args=None):\n \"\"\"Open a QtConsole connected to a worker who has the given future\n\n - identify worker with who_has\n - start IPython kernel on the worker\n - start qtconsole connected to the kernel\n \"\"\"\n runtime_dir = jupyter_runtime_dir()\n if name is None:\n name = uuid4().hex\n\n path = os.path.join(runtime_dir, name + \".json\")\n write_connection_file(path, **connection_info)\n cmd = [\"jupyter\", \"qtconsole\", \"--existing\", path]\n if extra_args:\n cmd.extend(extra_args)\n Popen(cmd)\n\n @atexit.register\n def _cleanup_connection_file():\n \"\"\"Cleanup our connection file when we exit.\"\"\"\n try:\n os.remove(path)\n except OSError:\n pass\n\n\ndef start_ipython(ip=None, ns=None, log=None):\n \"\"\"Start an IPython kernel in a thread\n\n Parameters\n ----------\n ip : str\n The IP address to listen on (likely the parent object's ip).\n ns : dict\n Any names that should be injected into the IPython namespace.\n log : logger instance\n Hook up IPython's logging to an existing logger instead of the default.\n \"\"\"\n from IPython import get_ipython\n\n if get_ipython() is not None:\n raise RuntimeError(\"Cannot start IPython, it's already running.\")\n\n from ipykernel.kernelapp import IPKernelApp\n\n # start IPython, disabling its signal handlers that won't work due to running in a thread:\n app = IPKernelApp.instance(log=log)\n # Don't connect to the history database\n app.config.HistoryManager.hist_file = \":memory:\"\n # listen on all interfaces, so remote clients can connect:\n if ip:\n app.ip = ip\n # disable some signal handling, logging\n\n def noop():\n return None\n\n app.init_signal = noop\n app.log_connection_info = noop\n\n # start IPython in a thread\n # initialization happens in the thread to avoid threading problems\n # with the sqlite history\n evt = Event()\n\n def _start():\n app.initialize([])\n app.kernel.pre_handler_hook = noop\n app.kernel.post_handler_hook = noop\n app.kernel.start()\n # save self in the IPython namespace as 'worker'\n # inject things into the IPython namespace\n if ns:\n app.kernel.shell.user_ns.update(ns)\n evt.set()\n # start the app's IOLoop in its thread\n IOLoop.current().start()\n\n zmq_loop_thread = Thread(target=_start)\n zmq_loop_thread.daemon = True\n zmq_loop_thread.start()\n assert evt.wait(timeout=5), \"IPython didn't start in a reasonable amount of time.\"\n\n return app\n", "path": "distributed/_ipython_utils.py"}]}
| 2,979 | 414 |
gh_patches_debug_11235
|
rasdani/github-patches
|
git_diff
|
saleor__saleor-5311
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Broken multiple interface notation in schema
### What I'm trying to achieve
To use Apollo tooling to generate TS types for the application queries. However, it fails because Saleor's schema uses comma as a separator instead of ampersand. More: https://github.com/apollographql/apollo-tooling/issues/434
### Steps to reproduce the problem
1. Go to mirumee/saleor-dashboard repository and clone it
2. Copy schema from core to dashboard
3. `npm run build-types`
4. Notice that it fails at multiple interface implementation.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `saleor/graphql/management/commands/get_graphql_schema.py`
Content:
```
1 from django.core.management.base import BaseCommand
2 from graphql import print_schema
3
4 from ...api import schema
5
6
7 class Command(BaseCommand):
8 help = "Writes SDL for GraphQL API schema to stdout"
9
10 def handle(self, *args, **options):
11 self.stdout.write(print_schema(schema))
12
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/saleor/graphql/management/commands/get_graphql_schema.py b/saleor/graphql/management/commands/get_graphql_schema.py
--- a/saleor/graphql/management/commands/get_graphql_schema.py
+++ b/saleor/graphql/management/commands/get_graphql_schema.py
@@ -8,4 +8,14 @@
help = "Writes SDL for GraphQL API schema to stdout"
def handle(self, *args, **options):
- self.stdout.write(print_schema(schema))
+ """Support multiple interface notation in schema for Apollo tooling.
+
+ In `graphql-core` V2 separator for interaces is `,`.
+ Apollo tooling to generate TypeScript types using `&` as interfaces separator.
+ https://github.com/graphql-python/graphql-core/pull/258
+ """
+ printed_schema = print_schema(schema)
+ for line in printed_schema.splitlines():
+ if "implements" in line:
+ line = line.replace(",", " &")
+ self.stdout.write(f"{line}\n")
|
{"golden_diff": "diff --git a/saleor/graphql/management/commands/get_graphql_schema.py b/saleor/graphql/management/commands/get_graphql_schema.py\n--- a/saleor/graphql/management/commands/get_graphql_schema.py\n+++ b/saleor/graphql/management/commands/get_graphql_schema.py\n@@ -8,4 +8,14 @@\n help = \"Writes SDL for GraphQL API schema to stdout\"\n \n def handle(self, *args, **options):\n- self.stdout.write(print_schema(schema))\n+ \"\"\"Support multiple interface notation in schema for Apollo tooling.\n+\n+ In `graphql-core` V2 separator for interaces is `,`.\n+ Apollo tooling to generate TypeScript types using `&` as interfaces separator.\n+ https://github.com/graphql-python/graphql-core/pull/258\n+ \"\"\"\n+ printed_schema = print_schema(schema)\n+ for line in printed_schema.splitlines():\n+ if \"implements\" in line:\n+ line = line.replace(\",\", \" &\")\n+ self.stdout.write(f\"{line}\\n\")\n", "issue": "Broken multiple interface notation in schema\n### What I'm trying to achieve\r\nTo use Apollo tooling to generate TS types for the application queries. However, it fails because Saleor's schema uses comma as a separator instead of ampersand. More: https://github.com/apollographql/apollo-tooling/issues/434 \r\n\r\n### Steps to reproduce the problem\r\n1. Go to mirumee/saleor-dashboard repository and clone it\r\n2. Copy schema from core to dashboard\r\n3. `npm run build-types`\r\n4. Notice that it fails at multiple interface implementation.\n", "before_files": [{"content": "from django.core.management.base import BaseCommand\nfrom graphql import print_schema\n\nfrom ...api import schema\n\n\nclass Command(BaseCommand):\n help = \"Writes SDL for GraphQL API schema to stdout\"\n\n def handle(self, *args, **options):\n self.stdout.write(print_schema(schema))\n", "path": "saleor/graphql/management/commands/get_graphql_schema.py"}], "after_files": [{"content": "from django.core.management.base import BaseCommand\nfrom graphql import print_schema\n\nfrom ...api import schema\n\n\nclass Command(BaseCommand):\n help = \"Writes SDL for GraphQL API schema to stdout\"\n\n def handle(self, *args, **options):\n \"\"\"Support multiple interface notation in schema for Apollo tooling.\n\n In `graphql-core` V2 separator for interaces is `,`.\n Apollo tooling to generate TypeScript types using `&` as interfaces separator.\n https://github.com/graphql-python/graphql-core/pull/258\n \"\"\"\n printed_schema = print_schema(schema)\n for line in printed_schema.splitlines():\n if \"implements\" in line:\n line = line.replace(\",\", \" &\")\n self.stdout.write(f\"{line}\\n\")\n", "path": "saleor/graphql/management/commands/get_graphql_schema.py"}]}
| 464 | 226 |
gh_patches_debug_10332
|
rasdani/github-patches
|
git_diff
|
ipython__ipython-1882
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
cython_magic uses importlib, which doesn't ship with py2.6
Sorry, I don't have time to fix this right now, but wanted to leave a report.
`importlib` was not in the standard library for python2.6, though [it has been backported](http://pypi.python.org/pypi/importlib/1.0.1).
Trying to run `%load_ext cythonmagic` results in this traceback:
```
/ipython/IPython/extensions/cythonmagic.py in <module>()
18 import io
19 import os, sys
---> 20 from importlib import import_module
21 import imp
22
ImportError: No module named importlib
```
not sure if we should make people install it themselves, or if it's better to just put a copy in `IPython.external`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `IPython/extensions/cythonmagic.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """
3 Cython related magics.
4
5 Author:
6 * Brian Granger
7
8 Parts of this code were taken from Cython.inline.
9 """
10 #-----------------------------------------------------------------------------
11 # Copyright (C) 2010-2011, IPython Development Team.
12 #
13 # Distributed under the terms of the Modified BSD License.
14 #
15 # The full license is in the file COPYING.txt, distributed with this software.
16 #-----------------------------------------------------------------------------
17
18 import io
19 import os, sys
20 from importlib import import_module
21 import imp
22
23 try:
24 import hashlib
25 except ImportError:
26 import md5 as hashlib
27
28 from distutils.core import Distribution, Extension
29 from distutils.command.build_ext import build_ext
30
31 from IPython.core.magic import Magics, magics_class, cell_magic
32 from IPython.testing.skipdoctest import skip_doctest
33 from IPython.core.magic_arguments import (
34 argument, magic_arguments, parse_argstring
35 )
36 from IPython.utils import py3compat
37
38 import Cython
39 from Cython.Compiler.Errors import CompileError
40 from Cython.Compiler.Main import Context, default_options
41 from Cython.Build.Dependencies import cythonize
42
43
44 @magics_class
45 class CythonMagics(Magics):
46
47 def __init__(self, shell):
48 super(CythonMagics,self).__init__(shell)
49 self._reloads = {}
50 self._code_cache = {}
51
52 def _import_all(self, module):
53 for k,v in module.__dict__.items():
54 if not k.startswith('__'):
55 self.shell.push({k:v})
56
57 @cell_magic
58 def cython_inline(self, line, cell):
59 """Compile and run a Cython code cell using Cython.inline.
60
61 This magic simply passes the body of the cell to Cython.inline
62 and returns the result. If the variables `a` and `b` are defined
63 in the user's namespace, here is a simple example that returns
64 their sum::
65
66 %%cython_inline
67 return a+b
68
69 For most purposes, we recommend the usage of the `%%cython` magic.
70 """
71 locs = self.shell.user_global_ns
72 globs = self.shell.user_ns
73 return Cython.inline(cell, locals=locs, globals=globs)
74
75 @cell_magic
76 def cython_pyximport(self, line, cell):
77 """Compile and import a Cython code cell using pyximport.
78
79 The contents of the cell are written to a `.pyx` file in the current
80 working directory, which is then imported using `pyximport`. This
81 magic requires a module name to be passed::
82
83 %%cython_pyximport modulename
84 def f(x):
85 return 2.0*x
86
87 The compiled module is then imported and all of its symbols are injected into
88 the user's namespace. For most purposes, we recommend the usage of the
89 `%%cython` magic.
90 """
91 module_name = line.strip()
92 if not module_name:
93 raise ValueError('module name must be given')
94 fname = module_name + '.pyx'
95 with io.open(fname, 'w', encoding='utf-8') as f:
96 f.write(cell)
97 if 'pyximport' not in sys.modules:
98 import pyximport
99 pyximport.install(reload_support=True)
100 if module_name in self._reloads:
101 module = self._reloads[module_name]
102 reload(module)
103 else:
104 module = import_module(module_name)
105 self._reloads[module_name] = module
106 self._import_all(module)
107
108 @magic_arguments()
109 @argument(
110 '-f', '--force', action='store_true', default=False,
111 help="Force the compilation of the pyx module even if it hasn't changed"
112 )
113 @cell_magic
114 def cython(self, line, cell):
115 """Compile and import everything from a Cython code cell.
116
117 The contents of the cell are written to a `.pyx` file in the
118 directory `IPYTHONDIR/cython` using a filename with the hash of the code.
119 This file is then cythonized and compiled. The resulting module
120 is imported and all of its symbols are injected into the user's
121 namespace. The usage is similar to that of `%%cython_pyximport` but
122 you don't have to pass a module name::
123
124 %%cython
125 def f(x):
126 return 2.0*x
127 """
128 args = parse_argstring(self.cython, line)
129 code = cell if cell.endswith('\n') else cell+'\n'
130 lib_dir=os.path.join(self.shell.ipython_dir, 'cython')
131 cython_include_dirs=['.']
132 force=args.force
133 quiet=True
134 ctx = Context(cython_include_dirs, default_options)
135 key = code, sys.version_info, sys.executable, Cython.__version__
136 module_name = "_cython_magic_" + hashlib.md5(str(key).encode('utf-8')).hexdigest()
137 so_ext = [ ext for ext,_,mod_type in imp.get_suffixes() if mod_type == imp.C_EXTENSION ][0]
138 module_path = os.path.join(lib_dir, module_name+so_ext)
139
140 if not os.path.exists(lib_dir):
141 os.makedirs(lib_dir)
142
143 if force or not os.path.isfile(module_path):
144 cflags = []
145 c_include_dirs = []
146 if 'numpy' in code:
147 import numpy
148 c_include_dirs.append(numpy.get_include())
149 pyx_file = os.path.join(lib_dir, module_name + '.pyx')
150 pyx_file = py3compat.cast_bytes_py2(pyx_file, encoding=sys.getfilesystemencoding())
151 with io.open(pyx_file, 'w', encoding='utf-8') as f:
152 f.write(code)
153 extension = Extension(
154 name = module_name,
155 sources = [pyx_file],
156 include_dirs = c_include_dirs,
157 extra_compile_args = cflags
158 )
159 dist = Distribution()
160 config_files = dist.find_config_files()
161 try:
162 config_files.remove('setup.cfg')
163 except ValueError:
164 pass
165 dist.parse_config_files(config_files)
166 build_extension = build_ext(dist)
167 build_extension.finalize_options()
168 try:
169 build_extension.extensions = cythonize([extension], ctx=ctx, quiet=quiet)
170 except CompileError:
171 return
172 build_extension.build_temp = os.path.dirname(pyx_file)
173 build_extension.build_lib = lib_dir
174 build_extension.run()
175 self._code_cache[key] = module_name
176
177 module = imp.load_dynamic(module_name, module_path)
178 self._import_all(module)
179
180
181 _loaded = False
182
183 def load_ipython_extension(ip):
184 """Load the extension in IPython."""
185 global _loaded
186 if not _loaded:
187 ip.register_magics(CythonMagics)
188 _loaded = True
189
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/IPython/extensions/cythonmagic.py b/IPython/extensions/cythonmagic.py
--- a/IPython/extensions/cythonmagic.py
+++ b/IPython/extensions/cythonmagic.py
@@ -17,7 +17,6 @@
import io
import os, sys
-from importlib import import_module
import imp
try:
@@ -101,7 +100,8 @@
module = self._reloads[module_name]
reload(module)
else:
- module = import_module(module_name)
+ __import__(module_name)
+ module = sys.modules[module_name]
self._reloads[module_name] = module
self._import_all(module)
|
{"golden_diff": "diff --git a/IPython/extensions/cythonmagic.py b/IPython/extensions/cythonmagic.py\n--- a/IPython/extensions/cythonmagic.py\n+++ b/IPython/extensions/cythonmagic.py\n@@ -17,7 +17,6 @@\n \n import io\n import os, sys\n-from importlib import import_module\n import imp\n \n try:\n@@ -101,7 +100,8 @@\n module = self._reloads[module_name]\n reload(module)\n else:\n- module = import_module(module_name)\n+ __import__(module_name)\n+ module = sys.modules[module_name]\n self._reloads[module_name] = module\n self._import_all(module)\n", "issue": "cython_magic uses importlib, which doesn't ship with py2.6\nSorry, I don't have time to fix this right now, but wanted to leave a report.\n\n`importlib` was not in the standard library for python2.6, though [it has been backported](http://pypi.python.org/pypi/importlib/1.0.1).\n\nTrying to run `%load_ext cythonmagic` results in this traceback:\n\n```\n/ipython/IPython/extensions/cythonmagic.py in <module>()\n 18 import io\n 19 import os, sys\n---> 20 from importlib import import_module\n 21 import imp\n 22 \n\nImportError: No module named importlib\n```\n\nnot sure if we should make people install it themselves, or if it's better to just put a copy in `IPython.external`\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nCython related magics.\n\nAuthor:\n* Brian Granger\n\nParts of this code were taken from Cython.inline.\n\"\"\"\n#-----------------------------------------------------------------------------\n# Copyright (C) 2010-2011, IPython Development Team.\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\nimport io\nimport os, sys\nfrom importlib import import_module\nimport imp\n\ntry:\n import hashlib\nexcept ImportError:\n import md5 as hashlib\n\nfrom distutils.core import Distribution, Extension\nfrom distutils.command.build_ext import build_ext\n\nfrom IPython.core.magic import Magics, magics_class, cell_magic\nfrom IPython.testing.skipdoctest import skip_doctest\nfrom IPython.core.magic_arguments import (\n argument, magic_arguments, parse_argstring\n)\nfrom IPython.utils import py3compat\n\nimport Cython\nfrom Cython.Compiler.Errors import CompileError\nfrom Cython.Compiler.Main import Context, default_options\nfrom Cython.Build.Dependencies import cythonize\n\n\n@magics_class\nclass CythonMagics(Magics):\n\n def __init__(self, shell):\n super(CythonMagics,self).__init__(shell)\n self._reloads = {}\n self._code_cache = {}\n\n def _import_all(self, module):\n for k,v in module.__dict__.items():\n if not k.startswith('__'):\n self.shell.push({k:v})\n\n @cell_magic\n def cython_inline(self, line, cell):\n \"\"\"Compile and run a Cython code cell using Cython.inline.\n\n This magic simply passes the body of the cell to Cython.inline\n and returns the result. If the variables `a` and `b` are defined\n in the user's namespace, here is a simple example that returns\n their sum::\n \n %%cython_inline\n return a+b\n\n For most purposes, we recommend the usage of the `%%cython` magic.\n \"\"\"\n locs = self.shell.user_global_ns\n globs = self.shell.user_ns\n return Cython.inline(cell, locals=locs, globals=globs)\n\n @cell_magic\n def cython_pyximport(self, line, cell):\n \"\"\"Compile and import a Cython code cell using pyximport.\n\n The contents of the cell are written to a `.pyx` file in the current\n working directory, which is then imported using `pyximport`. This\n magic requires a module name to be passed::\n \n %%cython_pyximport modulename\n def f(x):\n return 2.0*x\n\n The compiled module is then imported and all of its symbols are injected into\n the user's namespace. For most purposes, we recommend the usage of the\n `%%cython` magic.\n \"\"\"\n module_name = line.strip()\n if not module_name:\n raise ValueError('module name must be given')\n fname = module_name + '.pyx'\n with io.open(fname, 'w', encoding='utf-8') as f:\n f.write(cell)\n if 'pyximport' not in sys.modules:\n import pyximport\n pyximport.install(reload_support=True)\n if module_name in self._reloads:\n module = self._reloads[module_name]\n reload(module)\n else:\n module = import_module(module_name)\n self._reloads[module_name] = module\n self._import_all(module)\n\n @magic_arguments()\n @argument(\n '-f', '--force', action='store_true', default=False,\n help=\"Force the compilation of the pyx module even if it hasn't changed\"\n )\n @cell_magic\n def cython(self, line, cell):\n \"\"\"Compile and import everything from a Cython code cell.\n\n The contents of the cell are written to a `.pyx` file in the\n directory `IPYTHONDIR/cython` using a filename with the hash of the code.\n This file is then cythonized and compiled. The resulting module\n is imported and all of its symbols are injected into the user's\n namespace. The usage is similar to that of `%%cython_pyximport` but\n you don't have to pass a module name::\n\n %%cython\n def f(x):\n return 2.0*x\n \"\"\"\n args = parse_argstring(self.cython, line)\n code = cell if cell.endswith('\\n') else cell+'\\n'\n lib_dir=os.path.join(self.shell.ipython_dir, 'cython')\n cython_include_dirs=['.']\n force=args.force\n quiet=True\n ctx = Context(cython_include_dirs, default_options)\n key = code, sys.version_info, sys.executable, Cython.__version__\n module_name = \"_cython_magic_\" + hashlib.md5(str(key).encode('utf-8')).hexdigest()\n so_ext = [ ext for ext,_,mod_type in imp.get_suffixes() if mod_type == imp.C_EXTENSION ][0]\n module_path = os.path.join(lib_dir, module_name+so_ext)\n\n if not os.path.exists(lib_dir):\n os.makedirs(lib_dir)\n\n if force or not os.path.isfile(module_path):\n cflags = []\n c_include_dirs = []\n if 'numpy' in code:\n import numpy\n c_include_dirs.append(numpy.get_include())\n pyx_file = os.path.join(lib_dir, module_name + '.pyx')\n pyx_file = py3compat.cast_bytes_py2(pyx_file, encoding=sys.getfilesystemencoding())\n with io.open(pyx_file, 'w', encoding='utf-8') as f:\n f.write(code)\n extension = Extension(\n name = module_name,\n sources = [pyx_file],\n include_dirs = c_include_dirs,\n extra_compile_args = cflags\n )\n dist = Distribution()\n config_files = dist.find_config_files()\n try: \n config_files.remove('setup.cfg')\n except ValueError:\n pass\n dist.parse_config_files(config_files)\n build_extension = build_ext(dist)\n build_extension.finalize_options()\n try:\n build_extension.extensions = cythonize([extension], ctx=ctx, quiet=quiet)\n except CompileError:\n return\n build_extension.build_temp = os.path.dirname(pyx_file)\n build_extension.build_lib = lib_dir\n build_extension.run()\n self._code_cache[key] = module_name\n\n module = imp.load_dynamic(module_name, module_path)\n self._import_all(module)\n\n\n_loaded = False\n\ndef load_ipython_extension(ip):\n \"\"\"Load the extension in IPython.\"\"\"\n global _loaded\n if not _loaded:\n ip.register_magics(CythonMagics)\n _loaded = True\n", "path": "IPython/extensions/cythonmagic.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nCython related magics.\n\nAuthor:\n* Brian Granger\n\nParts of this code were taken from Cython.inline.\n\"\"\"\n#-----------------------------------------------------------------------------\n# Copyright (C) 2010-2011, IPython Development Team.\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\nimport io\nimport os, sys\nimport imp\n\ntry:\n import hashlib\nexcept ImportError:\n import md5 as hashlib\n\nfrom distutils.core import Distribution, Extension\nfrom distutils.command.build_ext import build_ext\n\nfrom IPython.core.magic import Magics, magics_class, cell_magic\nfrom IPython.testing.skipdoctest import skip_doctest\nfrom IPython.core.magic_arguments import (\n argument, magic_arguments, parse_argstring\n)\nfrom IPython.utils import py3compat\n\nimport Cython\nfrom Cython.Compiler.Errors import CompileError\nfrom Cython.Compiler.Main import Context, default_options\nfrom Cython.Build.Dependencies import cythonize\n\n\n@magics_class\nclass CythonMagics(Magics):\n\n def __init__(self, shell):\n super(CythonMagics,self).__init__(shell)\n self._reloads = {}\n self._code_cache = {}\n\n def _import_all(self, module):\n for k,v in module.__dict__.items():\n if not k.startswith('__'):\n self.shell.push({k:v})\n\n @cell_magic\n def cython_inline(self, line, cell):\n \"\"\"Compile and run a Cython code cell using Cython.inline.\n\n This magic simply passes the body of the cell to Cython.inline\n and returns the result. If the variables `a` and `b` are defined\n in the user's namespace, here is a simple example that returns\n their sum::\n \n %%cython_inline\n return a+b\n\n For most purposes, we recommend the usage of the `%%cython` magic.\n \"\"\"\n locs = self.shell.user_global_ns\n globs = self.shell.user_ns\n return Cython.inline(cell, locals=locs, globals=globs)\n\n @cell_magic\n def cython_pyximport(self, line, cell):\n \"\"\"Compile and import a Cython code cell using pyximport.\n\n The contents of the cell are written to a `.pyx` file in the current\n working directory, which is then imported using `pyximport`. This\n magic requires a module name to be passed::\n \n %%cython_pyximport modulename\n def f(x):\n return 2.0*x\n\n The compiled module is then imported and all of its symbols are injected into\n the user's namespace. For most purposes, we recommend the usage of the\n `%%cython` magic.\n \"\"\"\n module_name = line.strip()\n if not module_name:\n raise ValueError('module name must be given')\n fname = module_name + '.pyx'\n with io.open(fname, 'w', encoding='utf-8') as f:\n f.write(cell)\n if 'pyximport' not in sys.modules:\n import pyximport\n pyximport.install(reload_support=True)\n if module_name in self._reloads:\n module = self._reloads[module_name]\n reload(module)\n else:\n __import__(module_name)\n module = sys.modules[module_name]\n self._reloads[module_name] = module\n self._import_all(module)\n\n @magic_arguments()\n @argument(\n '-f', '--force', action='store_true', default=False,\n help=\"Force the compilation of the pyx module even if it hasn't changed\"\n )\n @cell_magic\n def cython(self, line, cell):\n \"\"\"Compile and import everything from a Cython code cell.\n\n The contents of the cell are written to a `.pyx` file in the\n directory `IPYTHONDIR/cython` using a filename with the hash of the code.\n This file is then cythonized and compiled. The resulting module\n is imported and all of its symbols are injected into the user's\n namespace. The usage is similar to that of `%%cython_pyximport` but\n you don't have to pass a module name::\n\n %%cython\n def f(x):\n return 2.0*x\n \"\"\"\n args = parse_argstring(self.cython, line)\n code = cell if cell.endswith('\\n') else cell+'\\n'\n lib_dir=os.path.join(self.shell.ipython_dir, 'cython')\n cython_include_dirs=['.']\n force=args.force\n quiet=True\n ctx = Context(cython_include_dirs, default_options)\n key = code, sys.version_info, sys.executable, Cython.__version__\n module_name = \"_cython_magic_\" + hashlib.md5(str(key).encode('utf-8')).hexdigest()\n so_ext = [ ext for ext,_,mod_type in imp.get_suffixes() if mod_type == imp.C_EXTENSION ][0]\n module_path = os.path.join(lib_dir, module_name+so_ext)\n\n if not os.path.exists(lib_dir):\n os.makedirs(lib_dir)\n\n if force or not os.path.isfile(module_path):\n cflags = []\n c_include_dirs = []\n if 'numpy' in code:\n import numpy\n c_include_dirs.append(numpy.get_include())\n pyx_file = os.path.join(lib_dir, module_name + '.pyx')\n pyx_file = py3compat.cast_bytes_py2(pyx_file, encoding=sys.getfilesystemencoding())\n with io.open(pyx_file, 'w', encoding='utf-8') as f:\n f.write(code)\n extension = Extension(\n name = module_name,\n sources = [pyx_file],\n include_dirs = c_include_dirs,\n extra_compile_args = cflags\n )\n dist = Distribution()\n config_files = dist.find_config_files()\n try: \n config_files.remove('setup.cfg')\n except ValueError:\n pass\n dist.parse_config_files(config_files)\n build_extension = build_ext(dist)\n build_extension.finalize_options()\n try:\n build_extension.extensions = cythonize([extension], ctx=ctx, quiet=quiet)\n except CompileError:\n return\n build_extension.build_temp = os.path.dirname(pyx_file)\n build_extension.build_lib = lib_dir\n build_extension.run()\n self._code_cache[key] = module_name\n\n module = imp.load_dynamic(module_name, module_path)\n self._import_all(module)\n\n\n_loaded = False\n\ndef load_ipython_extension(ip):\n \"\"\"Load the extension in IPython.\"\"\"\n global _loaded\n if not _loaded:\n ip.register_magics(CythonMagics)\n _loaded = True\n", "path": "IPython/extensions/cythonmagic.py"}]}
| 2,383 | 151 |
gh_patches_debug_6839
|
rasdani/github-patches
|
git_diff
|
sunpy__sunpy-3255
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
test_mapsequence_solar_derotate fails under 32bit with Astropy 3.2
When `test_mapsequence_solar_derotate` is run with the `clip` keyword (second stage of that test).
It fails with this output:
```python
# Test that the shape of data is correct when clipped
clipped_shape = (25, 18)
for m in tmc:
> assert(m.data.shape == clipped_shape)
E assert (25, 19) == (25, 18)
E At index 1 diff: 19 != 18
E Use -v to get the full diff
```
Turns out that the return for where to clip is different.
32bit
```python
[_lower_clip(x.value), _upper_clip(x.value)] * u.pix
<Quantity [0., 2.] pix>
```
64bit
```python
[_lower_clip(x.value), _upper_clip(x.value)] * u.pix
<Quantity [1., 2.] pix>
```
The `x.value` is not the same.
32bit
```
array([-2.96311832e-15, 5.46585361e-01, 1.09445035e+00])
```
64bit
```
array([2.96311832e-15, 5.46585361e-01, 1.09445035e+00])
```
So it comes from `calculate_solar_rotate_shift`
32bit
```
{'x': <Quantity [-5.68434189e-14, -1.04855012e+01, -2.09955502e+01] arcsec>, 'y': <Quantity [5.68434189e-14, 2.68536149e-01, 5.23060756e-01] arcsec>}
```
64bit
```
{'x': <Quantity [ 5.68434189e-14, -1.04855012e+01, -2.09955502e+01] arcsec>, 'y': <Quantity [1.13686838e-13, 2.68536149e-01, 5.23060756e-01] arcsec>}
```
It would seem that the sign is the issue.
Doing a git bisect on astropy got me to this PR that "broke" it.
https://github.com/astropy/astropy/pull/8594
For now, the test is skipped on 32bit.
However it seems that https://github.com/sunpy/sunpy/pull/3223 fixed it?!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sunpy/physics/solar_rotation.py`
Content:
```
1 """
2 This module provides routines for applying solar rotation functions to
3 map sequences.
4 """
5
6 import numpy as np
7
8 import astropy.units as u
9
10 from sunpy.physics.differential_rotation import solar_rotate_coordinate
11 from sunpy.image.coalignment import apply_shifts
12
13 __author__ = 'J. Ireland'
14
15 __all__ = ['calculate_solar_rotate_shift', 'mapsequence_solar_derotate']
16
17
18 def calculate_solar_rotate_shift(mc, layer_index=0, **kwargs):
19 """
20 Calculate the shift that must be applied to each map contained in a mapsequence
21 in order to compensate for solar rotation.
22
23 The center of the map is used to calculate the position of each mapsequence
24 layer. Shifts are calculated relative to a specified layer in the mapsequence.
25 When using this functionality, it is a good idea to check that the shifts
26 that were applied to were reasonable and expected. One way of checking this
27 is to animate the original mapsequence, animate the derotated mapsequence, and
28 compare the differences you see to the calculated shifts. An example use is
29 as follows. If you select data from the SDO cutout service, it is common to
30 not use the solar tracking implemented by this service. This is because (at
31 time of writing) the solar tracking implemented by that service moves the
32 image by single pixels at a time. This is not optimal for many use cases,
33 as it introduces artificial jumps in the data. So with solar tracking not
34 chosen, the selected area is like a window through which you can see the
35 Sun rotating underneath.
36
37 Parameters
38 ----------
39 mc : `sunpy.map.MapSequence`
40 The input mapsequence.
41 layer_index : int
42 The index layer. Shifts are calculated relative to the time of
43 this layer.
44 ``**kwargs``
45 These keywords are passed to the function
46 `sunpy.physics.differential_rotation.solar_rotate_coordinate`.
47 Returns
48 -------
49 x, y : `~astropy.units.Quantity`, ~astropy.units.Quantity`
50 The shifts relative to the index layer that can be applied
51 to the input mapsequence in order to compensate for solar rotation.
52 The shifts are given in arcseconds as understood in helioprojective
53 coordinates systems.
54 """
55 # Size of the data
56 nt = len(mc.maps)
57
58 # Storage for the shifts in arcseconds
59 xshift_arcseconds = np.zeros(nt) * u.arcsec
60 yshift_arcseconds = np.zeros_like(xshift_arcseconds)
61
62 # Layer that
63 rotate_to_this_layer = mc.maps[layer_index]
64
65 # Calculate the rotations and the shifts
66 for i, m in enumerate(mc):
67 # Calculate the rotation of the center of the map 'm' at its
68 # observation time to the observation time of the reference layer
69 # indicated by "layer_index".
70 new_coordinate = solar_rotate_coordinate(m.center,
71 observer=rotate_to_this_layer.observer_coordinate,
72 **kwargs)
73
74 # Calculate the shift in arcseconds
75 xshift_arcseconds[i] = new_coordinate.Tx - rotate_to_this_layer.center.Tx
76 yshift_arcseconds[i] = new_coordinate.Ty - rotate_to_this_layer.center.Ty
77
78 return {"x": xshift_arcseconds, "y": yshift_arcseconds}
79
80
81 def mapsequence_solar_derotate(mc, layer_index=0, clip=True, shift=None, **kwargs):
82 """
83 Move the layers in a mapsequence according to the input shifts.
84 If an input shift is not given, the shifts due to
85 solar rotation relative to an index layer is calculated and
86 applied. When using this functionality, it is a good idea to check
87 that the shifts that were applied to were reasonable and expected.
88 One way of checking this is to animate the original mapsequence, animate
89 the derotated mapsequence, and compare the differences you see to the
90 calculated shifts.
91
92 Parameters
93 ----------
94 mc : `sunpy.map.MapSequence`
95 A mapsequence of shape (ny, nx, nt), where nt is the number of layers in
96 the mapsequence.
97 layer_index : int
98 Solar derotation shifts of all maps in the mapsequence are assumed
99 to be relative to the layer in the mapsequence indexed by layer_index.
100 clip : bool
101 If True, then clip off x, y edges in the datasequence that are potentially
102 affected by edges effects.
103 ``**kwargs``
104 These keywords are passed to the function
105 `sunpy.physics.solar_rotation.calculate_solar_rotate_shift`.
106
107 Returns
108 -------
109 output : `sunpy.map.MapSequence`
110 The results of the shifts applied to the input mapsequence.
111
112 Examples
113 --------
114
115 >>> import sunpy.data.sample # doctest: +REMOTE_DATA
116 >>> from sunpy.physics.solar_rotation import mapsequence_solar_derotate
117 >>> map1 = sunpy.map.Map(sunpy.data.sample.AIA_171_IMAGE) # doctest: +REMOTE_DATA
118 >>> map2 = sunpy.map.Map(sunpy.data.sample.EIT_195_IMAGE) # doctest: +REMOTE_DATA
119 >>> mc = sunpy.map.Map([map1, map2], sequence=True) # doctest: +REMOTE_DATA
120 >>> derotated_mc = mapsequence_solar_derotate(mc) # doctest: +REMOTE_DATA
121 >>> derotated_mc = mapsequence_solar_derotate(mc, layer_index=-1) # doctest: +REMOTE_DATA
122 >>> derotated_mc = mapsequence_solar_derotate(mc, clip=False) # doctest: +REMOTE_DATA
123 """
124
125 # Size of the data
126 nt = len(mc.maps)
127
128 # Storage for the pixel shifts and the shifts in arcseconds
129 xshift_keep = np.zeros(nt) * u.pix
130 yshift_keep = np.zeros_like(xshift_keep)
131
132 # If no shifts are passed in, calculate them. Otherwise,
133 # use the shifts passed in.
134 if shift is None:
135 shift = calculate_solar_rotate_shift(mc, layer_index=layer_index, **kwargs)
136 xshift_arcseconds = shift['x']
137 yshift_arcseconds = shift['y']
138
139 # Calculate the pixel shifts
140 for i, m in enumerate(mc):
141 xshift_keep[i] = xshift_arcseconds[i] / m.scale[0]
142 yshift_keep[i] = yshift_arcseconds[i] / m.scale[1]
143
144 # Apply the pixel shifts and return the mapsequence
145 return apply_shifts(mc, yshift_keep, xshift_keep, clip=clip)
146
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/sunpy/physics/solar_rotation.py b/sunpy/physics/solar_rotation.py
--- a/sunpy/physics/solar_rotation.py
+++ b/sunpy/physics/solar_rotation.py
@@ -64,6 +64,10 @@
# Calculate the rotations and the shifts
for i, m in enumerate(mc):
+ # Skip the reference layer
+ if i == layer_index:
+ continue
+
# Calculate the rotation of the center of the map 'm' at its
# observation time to the observation time of the reference layer
# indicated by "layer_index".
|
{"golden_diff": "diff --git a/sunpy/physics/solar_rotation.py b/sunpy/physics/solar_rotation.py\n--- a/sunpy/physics/solar_rotation.py\n+++ b/sunpy/physics/solar_rotation.py\n@@ -64,6 +64,10 @@\n \n # Calculate the rotations and the shifts\n for i, m in enumerate(mc):\n+ # Skip the reference layer\n+ if i == layer_index:\n+ continue\n+\n # Calculate the rotation of the center of the map 'm' at its\n # observation time to the observation time of the reference layer\n # indicated by \"layer_index\".\n", "issue": "test_mapsequence_solar_derotate fails under 32bit with Astropy 3.2\nWhen `test_mapsequence_solar_derotate` is run with the `clip` keyword (second stage of that test).\r\n\r\nIt fails with this output:\r\n\r\n```python\r\n # Test that the shape of data is correct when clipped\r\n clipped_shape = (25, 18)\r\n for m in tmc:\r\n> assert(m.data.shape == clipped_shape)\r\nE assert (25, 19) == (25, 18)\r\nE At index 1 diff: 19 != 18\r\nE Use -v to get the full diff\r\n```\r\n\r\nTurns out that the return for where to clip is different.\r\n \r\n32bit\r\n```python\r\n[_lower_clip(x.value), _upper_clip(x.value)] * u.pix\r\n<Quantity [0., 2.] pix>\r\n```\r\n\r\n64bit\r\n```python\r\n[_lower_clip(x.value), _upper_clip(x.value)] * u.pix\r\n<Quantity [1., 2.] pix>\r\n```\r\n\r\nThe `x.value` is not the same.\r\n\r\n32bit\r\n```\r\narray([-2.96311832e-15, 5.46585361e-01, 1.09445035e+00])\r\n```\r\n\r\n64bit\r\n```\r\narray([2.96311832e-15, 5.46585361e-01, 1.09445035e+00])\r\n```\r\n\r\nSo it comes from `calculate_solar_rotate_shift`\r\n\r\n32bit\r\n```\r\n{'x': <Quantity [-5.68434189e-14, -1.04855012e+01, -2.09955502e+01] arcsec>, 'y': <Quantity [5.68434189e-14, 2.68536149e-01, 5.23060756e-01] arcsec>}\r\n```\r\n\r\n64bit\r\n```\r\n{'x': <Quantity [ 5.68434189e-14, -1.04855012e+01, -2.09955502e+01] arcsec>, 'y': <Quantity [1.13686838e-13, 2.68536149e-01, 5.23060756e-01] arcsec>}\r\n```\r\n\r\nIt would seem that the sign is the issue. \r\n\r\nDoing a git bisect on astropy got me to this PR that \"broke\" it.\r\n\r\nhttps://github.com/astropy/astropy/pull/8594\r\n\r\nFor now, the test is skipped on 32bit. \r\nHowever it seems that https://github.com/sunpy/sunpy/pull/3223 fixed it?!\n", "before_files": [{"content": "\"\"\"\nThis module provides routines for applying solar rotation functions to\nmap sequences.\n\"\"\"\n\nimport numpy as np\n\nimport astropy.units as u\n\nfrom sunpy.physics.differential_rotation import solar_rotate_coordinate\nfrom sunpy.image.coalignment import apply_shifts\n\n__author__ = 'J. Ireland'\n\n__all__ = ['calculate_solar_rotate_shift', 'mapsequence_solar_derotate']\n\n\ndef calculate_solar_rotate_shift(mc, layer_index=0, **kwargs):\n \"\"\"\n Calculate the shift that must be applied to each map contained in a mapsequence\n in order to compensate for solar rotation.\n\n The center of the map is used to calculate the position of each mapsequence\n layer. Shifts are calculated relative to a specified layer in the mapsequence.\n When using this functionality, it is a good idea to check that the shifts\n that were applied to were reasonable and expected. One way of checking this\n is to animate the original mapsequence, animate the derotated mapsequence, and\n compare the differences you see to the calculated shifts. An example use is\n as follows. If you select data from the SDO cutout service, it is common to\n not use the solar tracking implemented by this service. This is because (at\n time of writing) the solar tracking implemented by that service moves the\n image by single pixels at a time. This is not optimal for many use cases,\n as it introduces artificial jumps in the data. So with solar tracking not\n chosen, the selected area is like a window through which you can see the\n Sun rotating underneath.\n\n Parameters\n ----------\n mc : `sunpy.map.MapSequence`\n The input mapsequence.\n layer_index : int\n The index layer. Shifts are calculated relative to the time of\n this layer.\n ``**kwargs``\n These keywords are passed to the function\n `sunpy.physics.differential_rotation.solar_rotate_coordinate`.\n Returns\n -------\n x, y : `~astropy.units.Quantity`, ~astropy.units.Quantity`\n The shifts relative to the index layer that can be applied\n to the input mapsequence in order to compensate for solar rotation.\n The shifts are given in arcseconds as understood in helioprojective\n coordinates systems.\n \"\"\"\n # Size of the data\n nt = len(mc.maps)\n\n # Storage for the shifts in arcseconds\n xshift_arcseconds = np.zeros(nt) * u.arcsec\n yshift_arcseconds = np.zeros_like(xshift_arcseconds)\n\n # Layer that\n rotate_to_this_layer = mc.maps[layer_index]\n\n # Calculate the rotations and the shifts\n for i, m in enumerate(mc):\n # Calculate the rotation of the center of the map 'm' at its\n # observation time to the observation time of the reference layer\n # indicated by \"layer_index\".\n new_coordinate = solar_rotate_coordinate(m.center,\n observer=rotate_to_this_layer.observer_coordinate,\n **kwargs)\n\n # Calculate the shift in arcseconds\n xshift_arcseconds[i] = new_coordinate.Tx - rotate_to_this_layer.center.Tx\n yshift_arcseconds[i] = new_coordinate.Ty - rotate_to_this_layer.center.Ty\n\n return {\"x\": xshift_arcseconds, \"y\": yshift_arcseconds}\n\n\ndef mapsequence_solar_derotate(mc, layer_index=0, clip=True, shift=None, **kwargs):\n \"\"\"\n Move the layers in a mapsequence according to the input shifts.\n If an input shift is not given, the shifts due to\n solar rotation relative to an index layer is calculated and\n applied. When using this functionality, it is a good idea to check\n that the shifts that were applied to were reasonable and expected.\n One way of checking this is to animate the original mapsequence, animate\n the derotated mapsequence, and compare the differences you see to the\n calculated shifts.\n\n Parameters\n ----------\n mc : `sunpy.map.MapSequence`\n A mapsequence of shape (ny, nx, nt), where nt is the number of layers in\n the mapsequence.\n layer_index : int\n Solar derotation shifts of all maps in the mapsequence are assumed\n to be relative to the layer in the mapsequence indexed by layer_index.\n clip : bool\n If True, then clip off x, y edges in the datasequence that are potentially\n affected by edges effects.\n ``**kwargs``\n These keywords are passed to the function\n `sunpy.physics.solar_rotation.calculate_solar_rotate_shift`.\n\n Returns\n -------\n output : `sunpy.map.MapSequence`\n The results of the shifts applied to the input mapsequence.\n\n Examples\n --------\n\n >>> import sunpy.data.sample # doctest: +REMOTE_DATA\n >>> from sunpy.physics.solar_rotation import mapsequence_solar_derotate\n >>> map1 = sunpy.map.Map(sunpy.data.sample.AIA_171_IMAGE) # doctest: +REMOTE_DATA\n >>> map2 = sunpy.map.Map(sunpy.data.sample.EIT_195_IMAGE) # doctest: +REMOTE_DATA\n >>> mc = sunpy.map.Map([map1, map2], sequence=True) # doctest: +REMOTE_DATA\n >>> derotated_mc = mapsequence_solar_derotate(mc) # doctest: +REMOTE_DATA\n >>> derotated_mc = mapsequence_solar_derotate(mc, layer_index=-1) # doctest: +REMOTE_DATA\n >>> derotated_mc = mapsequence_solar_derotate(mc, clip=False) # doctest: +REMOTE_DATA\n \"\"\"\n\n # Size of the data\n nt = len(mc.maps)\n\n # Storage for the pixel shifts and the shifts in arcseconds\n xshift_keep = np.zeros(nt) * u.pix\n yshift_keep = np.zeros_like(xshift_keep)\n\n # If no shifts are passed in, calculate them. Otherwise,\n # use the shifts passed in.\n if shift is None:\n shift = calculate_solar_rotate_shift(mc, layer_index=layer_index, **kwargs)\n xshift_arcseconds = shift['x']\n yshift_arcseconds = shift['y']\n\n # Calculate the pixel shifts\n for i, m in enumerate(mc):\n xshift_keep[i] = xshift_arcseconds[i] / m.scale[0]\n yshift_keep[i] = yshift_arcseconds[i] / m.scale[1]\n\n # Apply the pixel shifts and return the mapsequence\n return apply_shifts(mc, yshift_keep, xshift_keep, clip=clip)\n", "path": "sunpy/physics/solar_rotation.py"}], "after_files": [{"content": "\"\"\"\nThis module provides routines for applying solar rotation functions to\nmap sequences.\n\"\"\"\n\nimport numpy as np\n\nimport astropy.units as u\n\nfrom sunpy.physics.differential_rotation import solar_rotate_coordinate\nfrom sunpy.image.coalignment import apply_shifts\n\n__author__ = 'J. Ireland'\n\n__all__ = ['calculate_solar_rotate_shift', 'mapsequence_solar_derotate']\n\n\ndef calculate_solar_rotate_shift(mc, layer_index=0, **kwargs):\n \"\"\"\n Calculate the shift that must be applied to each map contained in a mapsequence\n in order to compensate for solar rotation.\n\n The center of the map is used to calculate the position of each mapsequence\n layer. Shifts are calculated relative to a specified layer in the mapsequence.\n When using this functionality, it is a good idea to check that the shifts\n that were applied to were reasonable and expected. One way of checking this\n is to animate the original mapsequence, animate the derotated mapsequence, and\n compare the differences you see to the calculated shifts. An example use is\n as follows. If you select data from the SDO cutout service, it is common to\n not use the solar tracking implemented by this service. This is because (at\n time of writing) the solar tracking implemented by that service moves the\n image by single pixels at a time. This is not optimal for many use cases,\n as it introduces artificial jumps in the data. So with solar tracking not\n chosen, the selected area is like a window through which you can see the\n Sun rotating underneath.\n\n Parameters\n ----------\n mc : `sunpy.map.MapSequence`\n The input mapsequence.\n layer_index : int\n The index layer. Shifts are calculated relative to the time of\n this layer.\n ``**kwargs``\n These keywords are passed to the function\n `sunpy.physics.differential_rotation.solar_rotate_coordinate`.\n Returns\n -------\n x, y : `~astropy.units.Quantity`, ~astropy.units.Quantity`\n The shifts relative to the index layer that can be applied\n to the input mapsequence in order to compensate for solar rotation.\n The shifts are given in arcseconds as understood in helioprojective\n coordinates systems.\n \"\"\"\n # Size of the data\n nt = len(mc.maps)\n\n # Storage for the shifts in arcseconds\n xshift_arcseconds = np.zeros(nt) * u.arcsec\n yshift_arcseconds = np.zeros_like(xshift_arcseconds)\n\n # Layer that\n rotate_to_this_layer = mc.maps[layer_index]\n\n # Calculate the rotations and the shifts\n for i, m in enumerate(mc):\n # Skip the reference layer\n if i == layer_index:\n continue\n\n # Calculate the rotation of the center of the map 'm' at its\n # observation time to the observation time of the reference layer\n # indicated by \"layer_index\".\n new_coordinate = solar_rotate_coordinate(m.center,\n observer=rotate_to_this_layer.observer_coordinate,\n **kwargs)\n\n # Calculate the shift in arcseconds\n xshift_arcseconds[i] = new_coordinate.Tx - rotate_to_this_layer.center.Tx\n yshift_arcseconds[i] = new_coordinate.Ty - rotate_to_this_layer.center.Ty\n\n return {\"x\": xshift_arcseconds, \"y\": yshift_arcseconds}\n\n\ndef mapsequence_solar_derotate(mc, layer_index=0, clip=True, shift=None, **kwargs):\n \"\"\"\n Move the layers in a mapsequence according to the input shifts.\n If an input shift is not given, the shifts due to\n solar rotation relative to an index layer is calculated and\n applied. When using this functionality, it is a good idea to check\n that the shifts that were applied to were reasonable and expected.\n One way of checking this is to animate the original mapsequence, animate\n the derotated mapsequence, and compare the differences you see to the\n calculated shifts.\n\n Parameters\n ----------\n mc : `sunpy.map.MapSequence`\n A mapsequence of shape (ny, nx, nt), where nt is the number of layers in\n the mapsequence.\n layer_index : int\n Solar derotation shifts of all maps in the mapsequence are assumed\n to be relative to the layer in the mapsequence indexed by layer_index.\n clip : bool\n If True, then clip off x, y edges in the datasequence that are potentially\n affected by edges effects.\n ``**kwargs``\n These keywords are passed to the function\n `sunpy.physics.solar_rotation.calculate_solar_rotate_shift`.\n\n Returns\n -------\n output : `sunpy.map.MapSequence`\n The results of the shifts applied to the input mapsequence.\n\n Examples\n --------\n\n >>> import sunpy.data.sample # doctest: +REMOTE_DATA\n >>> from sunpy.physics.solar_rotation import mapsequence_solar_derotate\n >>> map1 = sunpy.map.Map(sunpy.data.sample.AIA_171_IMAGE) # doctest: +REMOTE_DATA\n >>> map2 = sunpy.map.Map(sunpy.data.sample.EIT_195_IMAGE) # doctest: +REMOTE_DATA\n >>> mc = sunpy.map.Map([map1, map2], sequence=True) # doctest: +REMOTE_DATA\n >>> derotated_mc = mapsequence_solar_derotate(mc) # doctest: +REMOTE_DATA\n >>> derotated_mc = mapsequence_solar_derotate(mc, layer_index=-1) # doctest: +REMOTE_DATA\n >>> derotated_mc = mapsequence_solar_derotate(mc, clip=False) # doctest: +REMOTE_DATA\n \"\"\"\n\n # Size of the data\n nt = len(mc.maps)\n\n # Storage for the pixel shifts and the shifts in arcseconds\n xshift_keep = np.zeros(nt) * u.pix\n yshift_keep = np.zeros_like(xshift_keep)\n\n # If no shifts are passed in, calculate them. Otherwise,\n # use the shifts passed in.\n if shift is None:\n shift = calculate_solar_rotate_shift(mc, layer_index=layer_index, **kwargs)\n xshift_arcseconds = shift['x']\n yshift_arcseconds = shift['y']\n\n # Calculate the pixel shifts\n for i, m in enumerate(mc):\n xshift_keep[i] = xshift_arcseconds[i] / m.scale[0]\n yshift_keep[i] = yshift_arcseconds[i] / m.scale[1]\n\n # Apply the pixel shifts and return the mapsequence\n return apply_shifts(mc, yshift_keep, xshift_keep, clip=clip)\n", "path": "sunpy/physics/solar_rotation.py"}]}
| 2,726 | 139 |
gh_patches_debug_7107
|
rasdani/github-patches
|
git_diff
|
pytorch__text-65
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Possible bug in LanguageModelingDataset
In the code for [`LanguageModelingDataset`](https://github.com/pytorch/text/blob/master/torchtext/datasets/language_modeling.py), the original text seems to be pre-processed twice, viz.:
- `text += text_field.preprocess(line)` [at line 22](https://github.com/pytorch/text/blob/master/torchtext/datasets/language_modeling.py#L22)
- `examples = [data.Example.fromlist([text], fields)]` [at line 26](https://github.com/pytorch/text/blob/master/torchtext/datasets/language_modeling.py#L26), which in turn calls
`setattr(ex, name, field.preprocess(val))` [at line 44 of example.py](https://github.com/pytorch/text/blob/master/torchtext/data/example.py#L44)
In fact, if I try to create a simple LanguageModelingDataset, I am getting an error as follows:
```
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/riddasgu/.local/lib/python2.7/site-packages/torchtext/datasets/language_modeling.py", line 28, in __init__
examples = [data.Example.fromlist([text], fields)]
File "/home/riddasgu/.local/lib/python2.7/site-packages/torchtext/data/example.py", line 44, in fromlist
setattr(ex, name, field.preprocess(val))
File "/home/riddasgu/.local/lib/python2.7/site-packages/torchtext/data/field.py", line 91, in preprocess
x = self.tokenize(x)
File "/home/riddasgu/.local/lib/python2.7/site-packages/torchtext/data/field.py", line 63, in <lambda>
tokenize=(lambda s: s.split()), include_lengths=False,
AttributeError: 'list' object has no attribute 'split'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `torchtext/data/field.py`
Content:
```
1 from collections import Counter, OrderedDict
2 import six
3 import torch
4 from torch.autograd import Variable
5
6 from .dataset import Dataset
7 from .pipeline import Pipeline
8 from .utils import get_tokenizer
9 from ..vocab import Vocab
10
11
12 class Field(object):
13 """Defines a datatype together with instructions for converting to Tensor.
14
15 Every dataset consists of one or more types of data. For instance, a text
16 classification dataset contains sentences and their classes, while a
17 machine translation dataset contains paired examples of text in two
18 languages. Each of these types of data is represented by a Field object,
19 which holds a Vocab object that defines the set of possible values for
20 elements of the field and their corresponding numerical representations.
21 The Field object also holds other parameters relating to how a datatype
22 should be numericalized, such as a tokenization method and the kind of
23 Tensor that should be produced.
24
25 If a Field is shared between two columns in a dataset (e.g., question and
26 answer in a QA dataset), then they will have a shared vocabulary.
27
28 Attributes:
29 sequential: Whether the datatype represents sequential data. If False,
30 no tokenization is applied. Default: True.
31 use_vocab: Whether to use a Vocab object. If False, the data in this
32 field should already be numerical. Default: True.
33 init_token: A token that will be prepended to every example using this
34 field, or None for no initial token. Default: None.
35 eos_token: A token that will be appended to every example using this
36 field, or None for no end-of-sentence token. Default: None.
37 fix_length: A fixed length that all examples using this field will be
38 padded to, or None for flexible sequence lengths. Default: None.
39 tensor_type: The torch.Tensor class that represents a batch of examples
40 of this kind of data. Default: torch.LongTensor.
41 preprocessing: The Pipeline that will be applied to examples
42 using this field after tokenizing but before numericalizing. Many
43 Datasets replace this attribute with a custom preprocessor.
44 Default: the identity Pipeline.
45 postprocessing: A Pipeline that will be applied to examples using
46 this field after numericalizing but before the numbers are turned
47 into a Tensor. Default: the identity Pipeline.
48 lower: Whether to lowercase the text in this field. Default: False.
49 tokenize: The function used to tokenize strings using this field into
50 sequential examples. Default: str.split.
51 include_lengths: Whether to return a tuple of a padded minibatch and
52 a list containing the lengths of each examples, or just a padded
53 minibatch. Default: False.
54 batch_first: Whether to produce tensors with the batch dimension first.
55 Default: False.
56 pad_token: The string token used as padding. Default: "<pad>".
57 """
58
59 def __init__(
60 self, sequential=True, use_vocab=True, init_token=None,
61 eos_token=None, fix_length=None, tensor_type=torch.LongTensor,
62 preprocessing=None, postprocessing=None, lower=False,
63 tokenize=(lambda s: s.split()), include_lengths=False,
64 batch_first=False, pad_token="<pad>"):
65 self.sequential = sequential
66 self.use_vocab = use_vocab
67 self.init_token = init_token
68 self.eos_token = eos_token
69 self.fix_length = fix_length
70 self.tensor_type = tensor_type
71 self.preprocessing = (Pipeline() if preprocessing
72 is None else preprocessing)
73 self.postprocessing = (Pipeline() if postprocessing
74 is None else postprocessing)
75 self.lower = lower
76 self.tokenize = get_tokenizer(tokenize)
77 self.include_lengths = include_lengths
78 self.batch_first = batch_first
79 self.pad_token = pad_token if self.sequential else None
80
81 def preprocess(self, x):
82 """Load a single example using this field, tokenizing if necessary.
83
84 If the input is a Python 2 `str`, it will be converted to Unicode
85 first. If `sequential=True`, it will be tokenized. Then the input
86 will be optionally lowercased and passed to the user-provided
87 `preprocessing` Pipeline."""
88 if six.PY2 and isinstance(x, six.string_types):
89 x = Pipeline(lambda s: unicode(s, encoding='utf-8'))(x)
90 if self.sequential:
91 x = self.tokenize(x)
92 if self.lower:
93 x = Pipeline(six.text_type.lower)(x)
94 return self.preprocessing(x)
95
96 def pad(self, minibatch):
97 """Pad a batch of examples using this field.
98
99 Pads to self.fix_length if provided, otherwise pads to the length of
100 the longest example in the batch. Prepends self.init_token and appends
101 self.eos_token if those attributes are not None. Returns a tuple of the
102 padded list and a list containing lengths of each example if
103 `self.include_lengths` is `True`, else just returns the padded list.
104 """
105 minibatch = list(minibatch)
106 if not self.sequential:
107 return minibatch
108 if self.fix_length is None:
109 max_len = max(len(x) for x in minibatch)
110 else:
111 max_len = self.fix_length + (
112 self.init_token, self.eos_token).count(None) - 2
113 padded, lengths = [], []
114 for x in minibatch:
115 padded.append(
116 ([] if self.init_token is None else [self.init_token]) +
117 list(x[:max_len]) +
118 ([] if self.eos_token is None else [self.eos_token]) +
119 [self.pad_token] * max(0, max_len - len(x)))
120 lengths.append(len(padded[-1]) - max(0, max_len - len(x)))
121 if self.include_lengths:
122 return (padded, lengths)
123 return padded
124
125 def build_vocab(self, *args, **kwargs):
126 """Construct the Vocab object for this field from one or more datasets.
127
128 Arguments:
129 Positional arguments: Dataset objects or other iterable data
130 sources from which to construct the Vocab object that
131 represents the set of possible values for this field. If
132 a Dataset object is provided, all columns corresponding
133 to this field are used; individual columns can also be
134 provided directly.
135 Remaining keyword arguments: Passed to the constructor of Vocab.
136 """
137 counter = Counter()
138 sources = []
139 for arg in args:
140 if isinstance(arg, Dataset):
141 sources += [getattr(arg, name) for name, field in
142 arg.fields.items() if field is self]
143 else:
144 sources.append(arg)
145 for data in sources:
146 for x in data:
147 if not self.sequential:
148 x = [x]
149 counter.update(x)
150 specials = list(OrderedDict.fromkeys(
151 tok for tok in [self.pad_token, self.init_token, self.eos_token]
152 if tok is not None))
153 self.vocab = Vocab(counter, specials=specials, **kwargs)
154
155 def numericalize(self, arr, device=None, train=True):
156 """Turn a batch of examples that use this field into a Variable.
157
158 If the field has include_lengths=True, a tensor of lengths will be
159 included in the return value.
160
161 Arguments:
162 arr: List of tokenized and padded examples, or tuple of a padded
163 list and a list of lengths if self.include_lengths is True.
164 device: Device to create the Variable's Tensor on. Use -1 for
165 CPU and None for the currently active GPU device. Default:
166 None.
167 train: Whether the batch is for a training set. If False, the
168 Variable will be created with volatile=True. Default: True.
169 """
170 if isinstance(arr, tuple):
171 arr, lengths = arr
172 if self.use_vocab:
173 if self.sequential:
174 arr = [[self.vocab.stoi[x] for x in ex] for ex in arr]
175 else:
176 arr = [self.vocab.stoi[x] for x in arr]
177 arr = self.postprocessing(arr, self.vocab, train)
178 else:
179 arr = self.postprocessing(arr, train)
180 arr = self.tensor_type(arr)
181 if self.include_lengths:
182 lengths = torch.LongTensor(lengths)
183 if self.sequential and not self.batch_first:
184 arr.t_()
185 if device == -1:
186 if self.sequential:
187 arr = arr.contiguous()
188 else:
189 arr = arr.cuda(device)
190 if self.include_lengths:
191 lengths = lengths.cuda(device)
192 if self.include_lengths:
193 return Variable(arr, volatile=not train), lengths
194 return Variable(arr, volatile=not train)
195
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/torchtext/data/field.py b/torchtext/data/field.py
--- a/torchtext/data/field.py
+++ b/torchtext/data/field.py
@@ -87,7 +87,7 @@
`preprocessing` Pipeline."""
if six.PY2 and isinstance(x, six.string_types):
x = Pipeline(lambda s: unicode(s, encoding='utf-8'))(x)
- if self.sequential:
+ if self.sequential and isinstance(x, six.text_type):
x = self.tokenize(x)
if self.lower:
x = Pipeline(six.text_type.lower)(x)
|
{"golden_diff": "diff --git a/torchtext/data/field.py b/torchtext/data/field.py\n--- a/torchtext/data/field.py\n+++ b/torchtext/data/field.py\n@@ -87,7 +87,7 @@\n `preprocessing` Pipeline.\"\"\"\n if six.PY2 and isinstance(x, six.string_types):\n x = Pipeline(lambda s: unicode(s, encoding='utf-8'))(x)\n- if self.sequential:\n+ if self.sequential and isinstance(x, six.text_type):\n x = self.tokenize(x)\n if self.lower:\n x = Pipeline(six.text_type.lower)(x)\n", "issue": "Possible bug in LanguageModelingDataset\nIn the code for [`LanguageModelingDataset`](https://github.com/pytorch/text/blob/master/torchtext/datasets/language_modeling.py), the original text seems to be pre-processed twice, viz.:\r\n\r\n- `text += text_field.preprocess(line)` [at line 22](https://github.com/pytorch/text/blob/master/torchtext/datasets/language_modeling.py#L22)\r\n- `examples = [data.Example.fromlist([text], fields)]` [at line 26](https://github.com/pytorch/text/blob/master/torchtext/datasets/language_modeling.py#L26), which in turn calls \r\n`setattr(ex, name, field.preprocess(val))` [at line 44 of example.py](https://github.com/pytorch/text/blob/master/torchtext/data/example.py#L44)\r\n\r\nIn fact, if I try to create a simple LanguageModelingDataset, I am getting an error as follows:\r\n```\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"/home/riddasgu/.local/lib/python2.7/site-packages/torchtext/datasets/language_modeling.py\", line 28, in __init__\r\n examples = [data.Example.fromlist([text], fields)]\r\n File \"/home/riddasgu/.local/lib/python2.7/site-packages/torchtext/data/example.py\", line 44, in fromlist\r\n setattr(ex, name, field.preprocess(val))\r\n File \"/home/riddasgu/.local/lib/python2.7/site-packages/torchtext/data/field.py\", line 91, in preprocess\r\n x = self.tokenize(x)\r\n File \"/home/riddasgu/.local/lib/python2.7/site-packages/torchtext/data/field.py\", line 63, in <lambda>\r\n tokenize=(lambda s: s.split()), include_lengths=False,\r\nAttributeError: 'list' object has no attribute 'split'\r\n```\n", "before_files": [{"content": "from collections import Counter, OrderedDict\nimport six\nimport torch\nfrom torch.autograd import Variable\n\nfrom .dataset import Dataset\nfrom .pipeline import Pipeline\nfrom .utils import get_tokenizer\nfrom ..vocab import Vocab\n\n\nclass Field(object):\n \"\"\"Defines a datatype together with instructions for converting to Tensor.\n\n Every dataset consists of one or more types of data. For instance, a text\n classification dataset contains sentences and their classes, while a\n machine translation dataset contains paired examples of text in two\n languages. Each of these types of data is represented by a Field object,\n which holds a Vocab object that defines the set of possible values for\n elements of the field and their corresponding numerical representations.\n The Field object also holds other parameters relating to how a datatype\n should be numericalized, such as a tokenization method and the kind of\n Tensor that should be produced.\n\n If a Field is shared between two columns in a dataset (e.g., question and\n answer in a QA dataset), then they will have a shared vocabulary.\n\n Attributes:\n sequential: Whether the datatype represents sequential data. If False,\n no tokenization is applied. Default: True.\n use_vocab: Whether to use a Vocab object. If False, the data in this\n field should already be numerical. Default: True.\n init_token: A token that will be prepended to every example using this\n field, or None for no initial token. Default: None.\n eos_token: A token that will be appended to every example using this\n field, or None for no end-of-sentence token. Default: None.\n fix_length: A fixed length that all examples using this field will be\n padded to, or None for flexible sequence lengths. Default: None.\n tensor_type: The torch.Tensor class that represents a batch of examples\n of this kind of data. Default: torch.LongTensor.\n preprocessing: The Pipeline that will be applied to examples\n using this field after tokenizing but before numericalizing. Many\n Datasets replace this attribute with a custom preprocessor.\n Default: the identity Pipeline.\n postprocessing: A Pipeline that will be applied to examples using\n this field after numericalizing but before the numbers are turned\n into a Tensor. Default: the identity Pipeline.\n lower: Whether to lowercase the text in this field. Default: False.\n tokenize: The function used to tokenize strings using this field into\n sequential examples. Default: str.split.\n include_lengths: Whether to return a tuple of a padded minibatch and\n a list containing the lengths of each examples, or just a padded\n minibatch. Default: False.\n batch_first: Whether to produce tensors with the batch dimension first.\n Default: False.\n pad_token: The string token used as padding. Default: \"<pad>\".\n \"\"\"\n\n def __init__(\n self, sequential=True, use_vocab=True, init_token=None,\n eos_token=None, fix_length=None, tensor_type=torch.LongTensor,\n preprocessing=None, postprocessing=None, lower=False,\n tokenize=(lambda s: s.split()), include_lengths=False,\n batch_first=False, pad_token=\"<pad>\"):\n self.sequential = sequential\n self.use_vocab = use_vocab\n self.init_token = init_token\n self.eos_token = eos_token\n self.fix_length = fix_length\n self.tensor_type = tensor_type\n self.preprocessing = (Pipeline() if preprocessing\n is None else preprocessing)\n self.postprocessing = (Pipeline() if postprocessing\n is None else postprocessing)\n self.lower = lower\n self.tokenize = get_tokenizer(tokenize)\n self.include_lengths = include_lengths\n self.batch_first = batch_first\n self.pad_token = pad_token if self.sequential else None\n\n def preprocess(self, x):\n \"\"\"Load a single example using this field, tokenizing if necessary.\n\n If the input is a Python 2 `str`, it will be converted to Unicode\n first. If `sequential=True`, it will be tokenized. Then the input\n will be optionally lowercased and passed to the user-provided\n `preprocessing` Pipeline.\"\"\"\n if six.PY2 and isinstance(x, six.string_types):\n x = Pipeline(lambda s: unicode(s, encoding='utf-8'))(x)\n if self.sequential:\n x = self.tokenize(x)\n if self.lower:\n x = Pipeline(six.text_type.lower)(x)\n return self.preprocessing(x)\n\n def pad(self, minibatch):\n \"\"\"Pad a batch of examples using this field.\n\n Pads to self.fix_length if provided, otherwise pads to the length of\n the longest example in the batch. Prepends self.init_token and appends\n self.eos_token if those attributes are not None. Returns a tuple of the\n padded list and a list containing lengths of each example if\n `self.include_lengths` is `True`, else just returns the padded list.\n \"\"\"\n minibatch = list(minibatch)\n if not self.sequential:\n return minibatch\n if self.fix_length is None:\n max_len = max(len(x) for x in minibatch)\n else:\n max_len = self.fix_length + (\n self.init_token, self.eos_token).count(None) - 2\n padded, lengths = [], []\n for x in minibatch:\n padded.append(\n ([] if self.init_token is None else [self.init_token]) +\n list(x[:max_len]) +\n ([] if self.eos_token is None else [self.eos_token]) +\n [self.pad_token] * max(0, max_len - len(x)))\n lengths.append(len(padded[-1]) - max(0, max_len - len(x)))\n if self.include_lengths:\n return (padded, lengths)\n return padded\n\n def build_vocab(self, *args, **kwargs):\n \"\"\"Construct the Vocab object for this field from one or more datasets.\n\n Arguments:\n Positional arguments: Dataset objects or other iterable data\n sources from which to construct the Vocab object that\n represents the set of possible values for this field. If\n a Dataset object is provided, all columns corresponding\n to this field are used; individual columns can also be\n provided directly.\n Remaining keyword arguments: Passed to the constructor of Vocab.\n \"\"\"\n counter = Counter()\n sources = []\n for arg in args:\n if isinstance(arg, Dataset):\n sources += [getattr(arg, name) for name, field in\n arg.fields.items() if field is self]\n else:\n sources.append(arg)\n for data in sources:\n for x in data:\n if not self.sequential:\n x = [x]\n counter.update(x)\n specials = list(OrderedDict.fromkeys(\n tok for tok in [self.pad_token, self.init_token, self.eos_token]\n if tok is not None))\n self.vocab = Vocab(counter, specials=specials, **kwargs)\n\n def numericalize(self, arr, device=None, train=True):\n \"\"\"Turn a batch of examples that use this field into a Variable.\n\n If the field has include_lengths=True, a tensor of lengths will be\n included in the return value.\n\n Arguments:\n arr: List of tokenized and padded examples, or tuple of a padded\n list and a list of lengths if self.include_lengths is True.\n device: Device to create the Variable's Tensor on. Use -1 for\n CPU and None for the currently active GPU device. Default:\n None.\n train: Whether the batch is for a training set. If False, the\n Variable will be created with volatile=True. Default: True.\n \"\"\"\n if isinstance(arr, tuple):\n arr, lengths = arr\n if self.use_vocab:\n if self.sequential:\n arr = [[self.vocab.stoi[x] for x in ex] for ex in arr]\n else:\n arr = [self.vocab.stoi[x] for x in arr]\n arr = self.postprocessing(arr, self.vocab, train)\n else:\n arr = self.postprocessing(arr, train)\n arr = self.tensor_type(arr)\n if self.include_lengths:\n lengths = torch.LongTensor(lengths)\n if self.sequential and not self.batch_first:\n arr.t_()\n if device == -1:\n if self.sequential:\n arr = arr.contiguous()\n else:\n arr = arr.cuda(device)\n if self.include_lengths:\n lengths = lengths.cuda(device)\n if self.include_lengths:\n return Variable(arr, volatile=not train), lengths\n return Variable(arr, volatile=not train)\n", "path": "torchtext/data/field.py"}], "after_files": [{"content": "from collections import Counter, OrderedDict\nimport six\nimport torch\nfrom torch.autograd import Variable\n\nfrom .dataset import Dataset\nfrom .pipeline import Pipeline\nfrom .utils import get_tokenizer\nfrom ..vocab import Vocab\n\n\nclass Field(object):\n \"\"\"Defines a datatype together with instructions for converting to Tensor.\n\n Every dataset consists of one or more types of data. For instance, a text\n classification dataset contains sentences and their classes, while a\n machine translation dataset contains paired examples of text in two\n languages. Each of these types of data is represented by a Field object,\n which holds a Vocab object that defines the set of possible values for\n elements of the field and their corresponding numerical representations.\n The Field object also holds other parameters relating to how a datatype\n should be numericalized, such as a tokenization method and the kind of\n Tensor that should be produced.\n\n If a Field is shared between two columns in a dataset (e.g., question and\n answer in a QA dataset), then they will have a shared vocabulary.\n\n Attributes:\n sequential: Whether the datatype represents sequential data. If False,\n no tokenization is applied. Default: True.\n use_vocab: Whether to use a Vocab object. If False, the data in this\n field should already be numerical. Default: True.\n init_token: A token that will be prepended to every example using this\n field, or None for no initial token. Default: None.\n eos_token: A token that will be appended to every example using this\n field, or None for no end-of-sentence token. Default: None.\n fix_length: A fixed length that all examples using this field will be\n padded to, or None for flexible sequence lengths. Default: None.\n tensor_type: The torch.Tensor class that represents a batch of examples\n of this kind of data. Default: torch.LongTensor.\n preprocessing: The Pipeline that will be applied to examples\n using this field after tokenizing but before numericalizing. Many\n Datasets replace this attribute with a custom preprocessor.\n Default: the identity Pipeline.\n postprocessing: A Pipeline that will be applied to examples using\n this field after numericalizing but before the numbers are turned\n into a Tensor. Default: the identity Pipeline.\n lower: Whether to lowercase the text in this field. Default: False.\n tokenize: The function used to tokenize strings using this field into\n sequential examples. Default: str.split.\n include_lengths: Whether to return a tuple of a padded minibatch and\n a list containing the lengths of each examples, or just a padded\n minibatch. Default: False.\n batch_first: Whether to produce tensors with the batch dimension first.\n Default: False.\n pad_token: The string token used as padding. Default: \"<pad>\".\n \"\"\"\n\n def __init__(\n self, sequential=True, use_vocab=True, init_token=None,\n eos_token=None, fix_length=None, tensor_type=torch.LongTensor,\n preprocessing=None, postprocessing=None, lower=False,\n tokenize=(lambda s: s.split()), include_lengths=False,\n batch_first=False, pad_token=\"<pad>\"):\n self.sequential = sequential\n self.use_vocab = use_vocab\n self.init_token = init_token\n self.eos_token = eos_token\n self.fix_length = fix_length\n self.tensor_type = tensor_type\n self.preprocessing = (Pipeline() if preprocessing\n is None else preprocessing)\n self.postprocessing = (Pipeline() if postprocessing\n is None else postprocessing)\n self.lower = lower\n self.tokenize = get_tokenizer(tokenize)\n self.include_lengths = include_lengths\n self.batch_first = batch_first\n self.pad_token = pad_token if self.sequential else None\n\n def preprocess(self, x):\n \"\"\"Load a single example using this field, tokenizing if necessary.\n\n If the input is a Python 2 `str`, it will be converted to Unicode\n first. If `sequential=True`, it will be tokenized. Then the input\n will be optionally lowercased and passed to the user-provided\n `preprocessing` Pipeline.\"\"\"\n if six.PY2 and isinstance(x, six.string_types):\n x = Pipeline(lambda s: unicode(s, encoding='utf-8'))(x)\n if self.sequential and isinstance(x, six.text_type):\n x = self.tokenize(x)\n if self.lower:\n x = Pipeline(six.text_type.lower)(x)\n return self.preprocessing(x)\n\n def pad(self, minibatch):\n \"\"\"Pad a batch of examples using this field.\n\n Pads to self.fix_length if provided, otherwise pads to the length of\n the longest example in the batch. Prepends self.init_token and appends\n self.eos_token if those attributes are not None. Returns a tuple of the\n padded list and a list containing lengths of each example if\n `self.include_lengths` is `True`, else just returns the padded list.\n \"\"\"\n minibatch = list(minibatch)\n if not self.sequential:\n return minibatch\n if self.fix_length is None:\n max_len = max(len(x) for x in minibatch)\n else:\n max_len = self.fix_length + (\n self.init_token, self.eos_token).count(None) - 2\n padded, lengths = [], []\n for x in minibatch:\n padded.append(\n ([] if self.init_token is None else [self.init_token]) +\n list(x[:max_len]) +\n ([] if self.eos_token is None else [self.eos_token]) +\n [self.pad_token] * max(0, max_len - len(x)))\n lengths.append(len(padded[-1]) - max(0, max_len - len(x)))\n if self.include_lengths:\n return (padded, lengths)\n return padded\n\n def build_vocab(self, *args, **kwargs):\n \"\"\"Construct the Vocab object for this field from one or more datasets.\n\n Arguments:\n Positional arguments: Dataset objects or other iterable data\n sources from which to construct the Vocab object that\n represents the set of possible values for this field. If\n a Dataset object is provided, all columns corresponding\n to this field are used; individual columns can also be\n provided directly.\n Remaining keyword arguments: Passed to the constructor of Vocab.\n \"\"\"\n counter = Counter()\n sources = []\n for arg in args:\n if isinstance(arg, Dataset):\n sources += [getattr(arg, name) for name, field in\n arg.fields.items() if field is self]\n else:\n sources.append(arg)\n for data in sources:\n for x in data:\n if not self.sequential:\n x = [x]\n counter.update(x)\n specials = list(OrderedDict.fromkeys(\n tok for tok in [self.pad_token, self.init_token, self.eos_token]\n if tok is not None))\n self.vocab = Vocab(counter, specials=specials, **kwargs)\n\n def numericalize(self, arr, device=None, train=True):\n \"\"\"Turn a batch of examples that use this field into a Variable.\n\n If the field has include_lengths=True, a tensor of lengths will be\n included in the return value.\n\n Arguments:\n arr: List of tokenized and padded examples, or tuple of a padded\n list and a list of lengths if self.include_lengths is True.\n device: Device to create the Variable's Tensor on. Use -1 for\n CPU and None for the currently active GPU device. Default:\n None.\n train: Whether the batch is for a training set. If False, the\n Variable will be created with volatile=True. Default: True.\n \"\"\"\n if isinstance(arr, tuple):\n arr, lengths = arr\n if self.use_vocab:\n if self.sequential:\n arr = [[self.vocab.stoi[x] for x in ex] for ex in arr]\n else:\n arr = [self.vocab.stoi[x] for x in arr]\n arr = self.postprocessing(arr, self.vocab, train)\n else:\n arr = self.postprocessing(arr, train)\n arr = self.tensor_type(arr)\n if self.include_lengths:\n lengths = torch.LongTensor(lengths)\n if self.sequential and not self.batch_first:\n arr.t_()\n if device == -1:\n if self.sequential:\n arr = arr.contiguous()\n else:\n arr = arr.cuda(device)\n if self.include_lengths:\n lengths = lengths.cuda(device)\n if self.include_lengths:\n return Variable(arr, volatile=not train), lengths\n return Variable(arr, volatile=not train)\n", "path": "torchtext/data/field.py"}]}
| 3,013 | 137 |
gh_patches_debug_17697
|
rasdani/github-patches
|
git_diff
|
Cog-Creators__Red-DiscordBot-1277
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[V3] Instance deletion requires making a backup
Please be sure to read through other issues as well to make sure what you are suggesting/reporting has not already
been suggested/reported
### Type:
- [ ] Suggestion
- [x] Bug
### Brief description of the problem
Instance deletion doesn't work if you don't want to make a backup
### Expected behavior
Should just remove the instance if the user opts not to take a backup
### Actual behavior
It just exits
### Steps to reproduce
1. Run `redbot-setup --delete`
2. Select an instance
3. Answer `n` to the question about making a backup
### Temporary workaround
Have it make a backup and just delete the backup manually if you don't want it
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `redbot/setup.py`
Content:
```
1 #!/usr/bin/env python
2
3 import argparse
4 import os
5 import shutil
6 import sys
7 import tarfile
8 from copy import deepcopy
9 from datetime import datetime as dt
10 from pathlib import Path
11
12 import appdirs
13 from redbot.core.cli import confirm
14 from redbot.core.data_manager import basic_config_default
15 from redbot.core.json_io import JsonIO
16
17 config_dir = None
18 appdir = appdirs.AppDirs("Red-DiscordBot")
19 if sys.platform == 'linux':
20 if 0 < os.getuid() < 1000:
21 config_dir = Path(appdir.site_data_dir)
22 if not config_dir:
23 config_dir = Path(appdir.user_config_dir)
24 try:
25 config_dir.mkdir(parents=True, exist_ok=True)
26 except PermissionError:
27 print(
28 "You don't have permission to write to "
29 "'{}'\nExiting...".format(config_dir))
30 sys.exit(1)
31 config_file = config_dir / 'config.json'
32
33
34 def parse_cli_args():
35 parser = argparse.ArgumentParser(
36 description="Red - Discord Bot's instance manager (V3)"
37 )
38 parser.add_argument(
39 "--delete", "-d",
40 help="Interactively delete an instance",
41 action="store_true"
42 )
43 return parser.parse_known_args()
44
45
46 def load_existing_config():
47 if not config_file.exists():
48 return {}
49
50 return JsonIO(config_file)._load_json()
51
52
53 def save_config(name, data, remove=False):
54 config = load_existing_config()
55 if remove and name in config:
56 config.pop(name)
57 else:
58 config[name] = data
59 JsonIO(config_file)._save_json(config)
60
61
62 def basic_setup():
63 """
64 Creates the data storage folder.
65 :return:
66 """
67
68 default_data_dir = Path(appdir.user_data_dir)
69
70 print("Hello! Before we begin the full configuration process we need to"
71 " gather some initial information about where you'd like us"
72 " to store your bot's data. We've attempted to figure out a"
73 " sane default data location which is printed below. If you don't"
74 " want to change this default please press [ENTER], otherwise"
75 " input your desired data location.")
76 print()
77 print("Default: {}".format(default_data_dir))
78
79 new_path = input('> ')
80
81 if new_path != '':
82 new_path = Path(new_path)
83 default_data_dir = new_path
84
85 if not default_data_dir.exists():
86 try:
87 default_data_dir.mkdir(parents=True, exist_ok=True)
88 except OSError:
89 print("We were unable to create your chosen directory."
90 " You may need to restart this process with admin"
91 " privileges.")
92 sys.exit(1)
93
94 print("You have chosen {} to be your data directory."
95 "".format(default_data_dir))
96 if not confirm("Please confirm (y/n):"):
97 print("Please start the process over.")
98 sys.exit(0)
99
100 default_dirs = deepcopy(basic_config_default)
101 default_dirs['DATA_PATH'] = str(default_data_dir.resolve())
102
103 storage_dict = {
104 1: "JSON",
105 2: "MongoDB"
106 }
107 storage = None
108 while storage is None:
109 print()
110 print("Please choose your storage backend (if you're unsure, choose 1).")
111 print("1. JSON (file storage, requires no database).")
112 print("2. MongoDB")
113 storage = input("> ")
114 try:
115 storage = int(storage)
116 except ValueError:
117 storage = None
118 else:
119 if storage not in storage_dict:
120 storage = None
121
122 default_dirs['STORAGE_TYPE'] = storage_dict.get(storage, 1)
123
124 if storage_dict.get(storage, 1) == "MongoDB":
125 from redbot.core.drivers.red_mongo import get_config_details
126 default_dirs['STORAGE_DETAILS'] = get_config_details()
127 else:
128 default_dirs['STORAGE_DETAILS'] = {}
129
130 name = ""
131 while len(name) == 0:
132 print()
133 print("Please enter a name for your instance, this name cannot include spaces"
134 " and it will be used to run your bot from here on out.")
135 name = input("> ")
136 if " " in name:
137 name = ""
138
139 save_config(name, default_dirs)
140
141 print()
142 print("Your basic configuration has been saved. Please run `redbot <name>` to"
143 " continue your setup process and to run the bot.")
144
145
146 def remove_instance():
147 instance_list = load_existing_config()
148 if not instance_list:
149 print("No instances have been set up!")
150 return
151
152 print(
153 "You have chosen to remove an instance. The following "
154 "is a list of instances that currently exist:\n"
155 )
156 for instance in instance_list.keys():
157 print("{}\n".format(instance))
158 print("Please select one of the above by entering its name")
159 selected = input("> ")
160
161 if selected not in instance_list.keys():
162 print("That isn't a valid instance!")
163 return
164 instance_data = instance_list[selected]
165 print(
166 "Would you like to make a backup of "
167 "the data for this instance (y/n)?"
168 )
169 yesno = input("> ")
170 if yesno.lower() == "y":
171 if instance_data["STORAGE_TYPE"] == "MongoDB":
172 raise NotImplementedError(
173 "Support for removing instances with MongoDB as the storage "
174 "is not implemented at this time due to backup support."
175 )
176 else:
177 print("Backing up the instance's data...")
178 backup_filename = "redv3-{}-{}.tar.gz".format(
179 selected, dt.utcnow().strftime("%Y-%m-%d %H:%M:%S")
180 )
181 pth = Path(instance_data["DATA_PATH"])
182 home = pth.home()
183 backup_file = home / backup_filename
184 os.chdir(str(pth.parent)) # str is used here because 3.5 support
185 with tarfile.open(str(backup_file), "w:gz") as tar:
186 tar.add(pth.stem) # add all files in that directory
187 print(
188 "A backup of {} has been made. It is at {}".format(
189 selected, backup_file
190 )
191 )
192 print("Removing the instance...")
193 shutil.rmtree(str(pth))
194 save_config(selected, {}, remove=True)
195 print("The instance has been removed")
196 return
197 elif yesno.lower() == "n":
198 print("Ok then")
199 return
200 else:
201 print("That's not a valid option!")
202 return
203
204
205 def main():
206 if args.delete:
207 try:
208 remove_instance()
209 except NotImplementedError as e:
210 print(str(e))
211 else:
212 basic_setup()
213
214 args, _ = parse_cli_args()
215
216 if __name__ == "__main__":
217 try:
218 main()
219 except KeyboardInterrupt:
220 print("Exiting...")
221 else:
222 print("Exiting...")
223
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/redbot/setup.py b/redbot/setup.py
--- a/redbot/setup.py
+++ b/redbot/setup.py
@@ -190,12 +190,22 @@
)
)
print("Removing the instance...")
- shutil.rmtree(str(pth))
+ try:
+ shutil.rmtree(str(pth))
+ except FileNotFoundError:
+ pass # data dir was removed manually
save_config(selected, {}, remove=True)
print("The instance has been removed")
return
elif yesno.lower() == "n":
- print("Ok then")
+ pth = Path(instance_data["DATA_PATH"])
+ print("Removing the instance...")
+ try:
+ shutil.rmtree(str(pth))
+ except FileNotFoundError:
+ pass # data dir was removed manually
+ save_config(selected, {}, remove=True)
+ print("The instance has been removed")
return
else:
print("That's not a valid option!")
|
{"golden_diff": "diff --git a/redbot/setup.py b/redbot/setup.py\n--- a/redbot/setup.py\n+++ b/redbot/setup.py\n@@ -190,12 +190,22 @@\n )\n )\n print(\"Removing the instance...\")\n- shutil.rmtree(str(pth))\n+ try:\n+ shutil.rmtree(str(pth))\n+ except FileNotFoundError:\n+ pass # data dir was removed manually\n save_config(selected, {}, remove=True)\n print(\"The instance has been removed\")\n return\n elif yesno.lower() == \"n\":\n- print(\"Ok then\")\n+ pth = Path(instance_data[\"DATA_PATH\"])\n+ print(\"Removing the instance...\")\n+ try:\n+ shutil.rmtree(str(pth))\n+ except FileNotFoundError:\n+ pass # data dir was removed manually\n+ save_config(selected, {}, remove=True)\n+ print(\"The instance has been removed\")\n return\n else:\n print(\"That's not a valid option!\")\n", "issue": "[V3] Instance deletion requires making a backup\nPlease be sure to read through other issues as well to make sure what you are suggesting/reporting has not already\r\nbeen suggested/reported\r\n\r\n### Type:\r\n\r\n- [ ] Suggestion\r\n- [x] Bug\r\n\r\n### Brief description of the problem\r\nInstance deletion doesn't work if you don't want to make a backup\r\n### Expected behavior\r\nShould just remove the instance if the user opts not to take a backup\r\n### Actual behavior\r\nIt just exits\r\n### Steps to reproduce\r\n\r\n1. Run `redbot-setup --delete`\r\n2. Select an instance\r\n3. Answer `n` to the question about making a backup\r\n\r\n### Temporary workaround\r\nHave it make a backup and just delete the backup manually if you don't want it\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\nimport argparse\nimport os\nimport shutil\nimport sys\nimport tarfile\nfrom copy import deepcopy\nfrom datetime import datetime as dt\nfrom pathlib import Path\n\nimport appdirs\nfrom redbot.core.cli import confirm\nfrom redbot.core.data_manager import basic_config_default\nfrom redbot.core.json_io import JsonIO\n\nconfig_dir = None\nappdir = appdirs.AppDirs(\"Red-DiscordBot\")\nif sys.platform == 'linux':\n if 0 < os.getuid() < 1000:\n config_dir = Path(appdir.site_data_dir)\nif not config_dir:\n config_dir = Path(appdir.user_config_dir)\ntry:\n config_dir.mkdir(parents=True, exist_ok=True)\nexcept PermissionError:\n print(\n \"You don't have permission to write to \"\n \"'{}'\\nExiting...\".format(config_dir))\n sys.exit(1)\nconfig_file = config_dir / 'config.json'\n\n\ndef parse_cli_args():\n parser = argparse.ArgumentParser(\n description=\"Red - Discord Bot's instance manager (V3)\"\n )\n parser.add_argument(\n \"--delete\", \"-d\",\n help=\"Interactively delete an instance\",\n action=\"store_true\"\n )\n return parser.parse_known_args()\n\n\ndef load_existing_config():\n if not config_file.exists():\n return {}\n\n return JsonIO(config_file)._load_json()\n\n\ndef save_config(name, data, remove=False):\n config = load_existing_config()\n if remove and name in config:\n config.pop(name)\n else:\n config[name] = data\n JsonIO(config_file)._save_json(config)\n\n\ndef basic_setup():\n \"\"\"\n Creates the data storage folder.\n :return:\n \"\"\"\n\n default_data_dir = Path(appdir.user_data_dir)\n\n print(\"Hello! Before we begin the full configuration process we need to\"\n \" gather some initial information about where you'd like us\"\n \" to store your bot's data. We've attempted to figure out a\"\n \" sane default data location which is printed below. If you don't\"\n \" want to change this default please press [ENTER], otherwise\"\n \" input your desired data location.\")\n print()\n print(\"Default: {}\".format(default_data_dir))\n\n new_path = input('> ')\n\n if new_path != '':\n new_path = Path(new_path)\n default_data_dir = new_path\n\n if not default_data_dir.exists():\n try:\n default_data_dir.mkdir(parents=True, exist_ok=True)\n except OSError:\n print(\"We were unable to create your chosen directory.\"\n \" You may need to restart this process with admin\"\n \" privileges.\")\n sys.exit(1)\n\n print(\"You have chosen {} to be your data directory.\"\n \"\".format(default_data_dir))\n if not confirm(\"Please confirm (y/n):\"):\n print(\"Please start the process over.\")\n sys.exit(0)\n\n default_dirs = deepcopy(basic_config_default)\n default_dirs['DATA_PATH'] = str(default_data_dir.resolve())\n\n storage_dict = {\n 1: \"JSON\",\n 2: \"MongoDB\"\n }\n storage = None\n while storage is None:\n print()\n print(\"Please choose your storage backend (if you're unsure, choose 1).\")\n print(\"1. JSON (file storage, requires no database).\")\n print(\"2. MongoDB\")\n storage = input(\"> \")\n try:\n storage = int(storage)\n except ValueError:\n storage = None\n else:\n if storage not in storage_dict:\n storage = None\n\n default_dirs['STORAGE_TYPE'] = storage_dict.get(storage, 1)\n\n if storage_dict.get(storage, 1) == \"MongoDB\":\n from redbot.core.drivers.red_mongo import get_config_details\n default_dirs['STORAGE_DETAILS'] = get_config_details()\n else:\n default_dirs['STORAGE_DETAILS'] = {}\n\n name = \"\"\n while len(name) == 0:\n print()\n print(\"Please enter a name for your instance, this name cannot include spaces\"\n \" and it will be used to run your bot from here on out.\")\n name = input(\"> \")\n if \" \" in name:\n name = \"\"\n\n save_config(name, default_dirs)\n\n print()\n print(\"Your basic configuration has been saved. Please run `redbot <name>` to\"\n \" continue your setup process and to run the bot.\")\n\n\ndef remove_instance():\n instance_list = load_existing_config()\n if not instance_list:\n print(\"No instances have been set up!\")\n return\n\n print(\n \"You have chosen to remove an instance. The following \"\n \"is a list of instances that currently exist:\\n\"\n )\n for instance in instance_list.keys():\n print(\"{}\\n\".format(instance))\n print(\"Please select one of the above by entering its name\")\n selected = input(\"> \")\n \n if selected not in instance_list.keys():\n print(\"That isn't a valid instance!\")\n return\n instance_data = instance_list[selected]\n print(\n \"Would you like to make a backup of \"\n \"the data for this instance (y/n)?\"\n )\n yesno = input(\"> \")\n if yesno.lower() == \"y\":\n if instance_data[\"STORAGE_TYPE\"] == \"MongoDB\":\n raise NotImplementedError(\n \"Support for removing instances with MongoDB as the storage \"\n \"is not implemented at this time due to backup support.\"\n )\n else:\n print(\"Backing up the instance's data...\")\n backup_filename = \"redv3-{}-{}.tar.gz\".format(\n selected, dt.utcnow().strftime(\"%Y-%m-%d %H:%M:%S\")\n )\n pth = Path(instance_data[\"DATA_PATH\"])\n home = pth.home()\n backup_file = home / backup_filename\n os.chdir(str(pth.parent)) # str is used here because 3.5 support\n with tarfile.open(str(backup_file), \"w:gz\") as tar:\n tar.add(pth.stem) # add all files in that directory\n print(\n \"A backup of {} has been made. It is at {}\".format(\n selected, backup_file\n )\n )\n print(\"Removing the instance...\")\n shutil.rmtree(str(pth))\n save_config(selected, {}, remove=True)\n print(\"The instance has been removed\")\n return\n elif yesno.lower() == \"n\":\n print(\"Ok then\")\n return\n else:\n print(\"That's not a valid option!\")\n return\n\n\ndef main():\n if args.delete:\n try:\n remove_instance()\n except NotImplementedError as e:\n print(str(e))\n else:\n basic_setup()\n\nargs, _ = parse_cli_args()\n\nif __name__ == \"__main__\":\n try:\n main()\n except KeyboardInterrupt:\n print(\"Exiting...\")\n else:\n print(\"Exiting...\")\n", "path": "redbot/setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\nimport argparse\nimport os\nimport shutil\nimport sys\nimport tarfile\nfrom copy import deepcopy\nfrom datetime import datetime as dt\nfrom pathlib import Path\n\nimport appdirs\nfrom redbot.core.cli import confirm\nfrom redbot.core.data_manager import basic_config_default\nfrom redbot.core.json_io import JsonIO\n\nconfig_dir = None\nappdir = appdirs.AppDirs(\"Red-DiscordBot\")\nif sys.platform == 'linux':\n if 0 < os.getuid() < 1000:\n config_dir = Path(appdir.site_data_dir)\nif not config_dir:\n config_dir = Path(appdir.user_config_dir)\ntry:\n config_dir.mkdir(parents=True, exist_ok=True)\nexcept PermissionError:\n print(\n \"You don't have permission to write to \"\n \"'{}'\\nExiting...\".format(config_dir))\n sys.exit(1)\nconfig_file = config_dir / 'config.json'\n\n\ndef parse_cli_args():\n parser = argparse.ArgumentParser(\n description=\"Red - Discord Bot's instance manager (V3)\"\n )\n parser.add_argument(\n \"--delete\", \"-d\",\n help=\"Interactively delete an instance\",\n action=\"store_true\"\n )\n return parser.parse_known_args()\n\n\ndef load_existing_config():\n if not config_file.exists():\n return {}\n\n return JsonIO(config_file)._load_json()\n\n\ndef save_config(name, data, remove=False):\n config = load_existing_config()\n if remove and name in config:\n config.pop(name)\n else:\n config[name] = data\n JsonIO(config_file)._save_json(config)\n\n\ndef basic_setup():\n \"\"\"\n Creates the data storage folder.\n :return:\n \"\"\"\n\n default_data_dir = Path(appdir.user_data_dir)\n\n print(\"Hello! Before we begin the full configuration process we need to\"\n \" gather some initial information about where you'd like us\"\n \" to store your bot's data. We've attempted to figure out a\"\n \" sane default data location which is printed below. If you don't\"\n \" want to change this default please press [ENTER], otherwise\"\n \" input your desired data location.\")\n print()\n print(\"Default: {}\".format(default_data_dir))\n\n new_path = input('> ')\n\n if new_path != '':\n new_path = Path(new_path)\n default_data_dir = new_path\n\n if not default_data_dir.exists():\n try:\n default_data_dir.mkdir(parents=True, exist_ok=True)\n except OSError:\n print(\"We were unable to create your chosen directory.\"\n \" You may need to restart this process with admin\"\n \" privileges.\")\n sys.exit(1)\n\n print(\"You have chosen {} to be your data directory.\"\n \"\".format(default_data_dir))\n if not confirm(\"Please confirm (y/n):\"):\n print(\"Please start the process over.\")\n sys.exit(0)\n\n default_dirs = deepcopy(basic_config_default)\n default_dirs['DATA_PATH'] = str(default_data_dir.resolve())\n\n storage_dict = {\n 1: \"JSON\",\n 2: \"MongoDB\"\n }\n storage = None\n while storage is None:\n print()\n print(\"Please choose your storage backend (if you're unsure, choose 1).\")\n print(\"1. JSON (file storage, requires no database).\")\n print(\"2. MongoDB\")\n storage = input(\"> \")\n try:\n storage = int(storage)\n except ValueError:\n storage = None\n else:\n if storage not in storage_dict:\n storage = None\n\n default_dirs['STORAGE_TYPE'] = storage_dict.get(storage, 1)\n\n if storage_dict.get(storage, 1) == \"MongoDB\":\n from redbot.core.drivers.red_mongo import get_config_details\n default_dirs['STORAGE_DETAILS'] = get_config_details()\n else:\n default_dirs['STORAGE_DETAILS'] = {}\n\n name = \"\"\n while len(name) == 0:\n print()\n print(\"Please enter a name for your instance, this name cannot include spaces\"\n \" and it will be used to run your bot from here on out.\")\n name = input(\"> \")\n if \" \" in name:\n name = \"\"\n\n save_config(name, default_dirs)\n\n print()\n print(\"Your basic configuration has been saved. Please run `redbot <name>` to\"\n \" continue your setup process and to run the bot.\")\n\n\ndef remove_instance():\n instance_list = load_existing_config()\n if not instance_list:\n print(\"No instances have been set up!\")\n return\n\n print(\n \"You have chosen to remove an instance. The following \"\n \"is a list of instances that currently exist:\\n\"\n )\n for instance in instance_list.keys():\n print(\"{}\\n\".format(instance))\n print(\"Please select one of the above by entering its name\")\n selected = input(\"> \")\n \n if selected not in instance_list.keys():\n print(\"That isn't a valid instance!\")\n return\n instance_data = instance_list[selected]\n print(\n \"Would you like to make a backup of \"\n \"the data for this instance (y/n)?\"\n )\n yesno = input(\"> \")\n if yesno.lower() == \"y\":\n if instance_data[\"STORAGE_TYPE\"] == \"MongoDB\":\n raise NotImplementedError(\n \"Support for removing instances with MongoDB as the storage \"\n \"is not implemented at this time due to backup support.\"\n )\n else:\n print(\"Backing up the instance's data...\")\n backup_filename = \"redv3-{}-{}.tar.gz\".format(\n selected, dt.utcnow().strftime(\"%Y-%m-%d %H:%M:%S\")\n )\n pth = Path(instance_data[\"DATA_PATH\"])\n home = pth.home()\n backup_file = home / backup_filename\n os.chdir(str(pth.parent)) # str is used here because 3.5 support\n with tarfile.open(str(backup_file), \"w:gz\") as tar:\n tar.add(pth.stem) # add all files in that directory\n print(\n \"A backup of {} has been made. It is at {}\".format(\n selected, backup_file\n )\n )\n print(\"Removing the instance...\")\n try:\n shutil.rmtree(str(pth))\n except FileNotFoundError:\n pass # data dir was removed manually\n save_config(selected, {}, remove=True)\n print(\"The instance has been removed\")\n return\n elif yesno.lower() == \"n\":\n pth = Path(instance_data[\"DATA_PATH\"])\n print(\"Removing the instance...\")\n try:\n shutil.rmtree(str(pth))\n except FileNotFoundError:\n pass # data dir was removed manually\n save_config(selected, {}, remove=True)\n print(\"The instance has been removed\")\n return\n else:\n print(\"That's not a valid option!\")\n return\n\n\ndef main():\n if args.delete:\n try:\n remove_instance()\n except NotImplementedError as e:\n print(str(e))\n else:\n basic_setup()\n\nargs, _ = parse_cli_args()\n\nif __name__ == \"__main__\":\n try:\n main()\n except KeyboardInterrupt:\n print(\"Exiting...\")\n else:\n print(\"Exiting...\")\n", "path": "redbot/setup.py"}]}
| 2,470 | 213 |
gh_patches_debug_11846
|
rasdani/github-patches
|
git_diff
|
DataDog__dd-trace-py-2715
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
mypy stub files
mypy throws an error when ddtrace is in use:

It would be nice if this library could ship with stub files or be put on https://github.com/python/typeshed. I'm not sure how to do that yet but maybe someone here is.
More info from mypy side here https://mypy.readthedocs.io/en/latest/running_mypy.html#missing-imports
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 import os
2 import platform
3 import sys
4
5 from setuptools import setup, find_packages, Extension
6 from setuptools.command.test import test as TestCommand
7
8 # ORDER MATTERS
9 # Import this after setuptools or it will fail
10 from Cython.Build import cythonize # noqa: I100
11 import Cython.Distutils
12
13
14 HERE = os.path.dirname(os.path.abspath(__file__))
15
16
17 def load_module_from_project_file(mod_name, fname):
18 """
19 Helper used to load a module from a file in this project
20
21 DEV: Loading this way will by-pass loading all parent modules
22 e.g. importing `ddtrace.vendor.psutil.setup` will load `ddtrace/__init__.py`
23 which has side effects like loading the tracer
24 """
25 fpath = os.path.join(HERE, fname)
26
27 if sys.version_info >= (3, 5):
28 import importlib.util
29
30 spec = importlib.util.spec_from_file_location(mod_name, fpath)
31 mod = importlib.util.module_from_spec(spec)
32 spec.loader.exec_module(mod)
33 return mod
34 elif sys.version_info >= (3, 3):
35 from importlib.machinery import SourceFileLoader
36
37 return SourceFileLoader(mod_name, fpath).load_module()
38 else:
39 import imp
40
41 return imp.load_source(mod_name, fpath)
42
43
44 class Tox(TestCommand):
45
46 user_options = [("tox-args=", "a", "Arguments to pass to tox")]
47
48 def initialize_options(self):
49 TestCommand.initialize_options(self)
50 self.tox_args = None
51
52 def finalize_options(self):
53 TestCommand.finalize_options(self)
54 self.test_args = []
55 self.test_suite = True
56
57 def run_tests(self):
58 # import here, cause outside the eggs aren't loaded
59 import tox
60 import shlex
61
62 args = self.tox_args
63 if args:
64 args = shlex.split(self.tox_args)
65 errno = tox.cmdline(args=args)
66 sys.exit(errno)
67
68
69 long_description = """
70 # dd-trace-py
71
72 `ddtrace` is Datadog's tracing library for Python. It is used to trace requests
73 as they flow across web servers, databases and microservices so that developers
74 have great visibility into bottlenecks and troublesome requests.
75
76 ## Getting Started
77
78 For a basic product overview, installation and quick start, check out our
79 [setup documentation][setup docs].
80
81 For more advanced usage and configuration, check out our [API
82 documentation][api docs].
83
84 For descriptions of terminology used in APM, take a look at the [official
85 documentation][visualization docs].
86
87 [setup docs]: https://docs.datadoghq.com/tracing/setup/python/
88 [api docs]: https://ddtrace.readthedocs.io/
89 [visualization docs]: https://docs.datadoghq.com/tracing/visualization/
90 """
91
92
93 def get_exts_for(name):
94 try:
95 mod = load_module_from_project_file(
96 "ddtrace.vendor.{}.setup".format(name), "ddtrace/vendor/{}/setup.py".format(name)
97 )
98 return mod.get_extensions()
99 except Exception as e:
100 print("WARNING: Failed to load %s extensions, skipping: %s" % (name, e))
101 return []
102
103
104 if sys.byteorder == "big":
105 encoding_macros = [("__BIG_ENDIAN__", "1")]
106 else:
107 encoding_macros = [("__LITTLE_ENDIAN__", "1")]
108
109
110 if platform.system() == "Windows":
111 encoding_libraries = ["ws2_32"]
112 extra_compile_args = []
113 debug_compile_args = []
114 else:
115 encoding_libraries = []
116 extra_compile_args = ["-DPy_BUILD_CORE"]
117 if "DD_COMPILE_DEBUG" in os.environ:
118 if platform.system() == "Linux":
119 debug_compile_args = ["-g", "-O0", "-Werror", "-Wall", "-Wextra", "-Wpedantic", "-fanalyzer"]
120 else:
121 debug_compile_args = [
122 "-g",
123 "-O0",
124 "-Werror",
125 "-Wall",
126 "-Wextra",
127 "-Wpedantic",
128 # Cython is not deprecation-proof
129 "-Wno-deprecated-declarations",
130 ]
131 else:
132 debug_compile_args = []
133
134
135 if sys.version_info[:2] >= (3, 4):
136 ext_modules = [
137 Extension(
138 "ddtrace.profiling.collector._memalloc",
139 sources=[
140 "ddtrace/profiling/collector/_memalloc.c",
141 "ddtrace/profiling/collector/_memalloc_tb.c",
142 "ddtrace/profiling/collector/_memalloc_heap.c",
143 ],
144 extra_compile_args=debug_compile_args,
145 ),
146 ]
147 else:
148 ext_modules = []
149
150 setup(
151 name="ddtrace",
152 description="Datadog tracing code",
153 url="https://github.com/DataDog/dd-trace-py",
154 author="Datadog, Inc.",
155 author_email="[email protected]",
156 long_description=long_description,
157 long_description_content_type="text/markdown",
158 license="BSD",
159 packages=find_packages(exclude=["tests*"]),
160 py_modules=["ddtrace_gevent_check"],
161 python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*",
162 # enum34 is an enum backport for earlier versions of python
163 # funcsigs backport required for vendored debtcollector
164 install_requires=[
165 "enum34; python_version<'3.4'",
166 "funcsigs>=1.0.0; python_version=='2.7'",
167 "typing; python_version<'3.5'",
168 "packaging>=17.1",
169 "protobuf>=3",
170 "tenacity>=5",
171 "attrs>=19.2.0",
172 "six>=1.12.0",
173 "pep562; python_version<'3.7'",
174 ],
175 extras_require={
176 # users can include opentracing by having:
177 # install_requires=['ddtrace[opentracing]', ...]
178 "opentracing": ["opentracing>=2.0.0"],
179 },
180 # plugin tox
181 tests_require=["tox", "flake8"],
182 cmdclass={"test": Tox},
183 entry_points={
184 "console_scripts": [
185 "ddtrace-run = ddtrace.commands.ddtrace_run:main",
186 ],
187 "pytest11": ["ddtrace = ddtrace.contrib.pytest.plugin"],
188 "gevent.plugins.monkey.did_patch_all": [
189 "ddtrace_gevent_check = ddtrace_gevent_check:gevent_patch_all",
190 ],
191 },
192 classifiers=[
193 "Programming Language :: Python",
194 "Programming Language :: Python :: 2.7",
195 "Programming Language :: Python :: 3.5",
196 "Programming Language :: Python :: 3.6",
197 "Programming Language :: Python :: 3.7",
198 "Programming Language :: Python :: 3.8",
199 "Programming Language :: Python :: 3.9",
200 ],
201 use_scm_version=True,
202 setup_requires=["setuptools_scm[toml]>=4", "cython"],
203 ext_modules=ext_modules
204 + cythonize(
205 [
206 Cython.Distutils.Extension(
207 "ddtrace.internal._rand",
208 sources=["ddtrace/internal/_rand.pyx"],
209 language="c",
210 ),
211 Extension(
212 "ddtrace.internal._encoding",
213 ["ddtrace/internal/_encoding.pyx"],
214 include_dirs=["."],
215 libraries=encoding_libraries,
216 define_macros=encoding_macros,
217 ),
218 Cython.Distutils.Extension(
219 "ddtrace.profiling.collector.stack",
220 sources=["ddtrace/profiling/collector/stack.pyx"],
221 language="c",
222 extra_compile_args=extra_compile_args,
223 ),
224 Cython.Distutils.Extension(
225 "ddtrace.profiling.collector._traceback",
226 sources=["ddtrace/profiling/collector/_traceback.pyx"],
227 language="c",
228 ),
229 Cython.Distutils.Extension(
230 "ddtrace.profiling.collector._threading",
231 sources=["ddtrace/profiling/collector/_threading.pyx"],
232 language="c",
233 ),
234 Cython.Distutils.Extension(
235 "ddtrace.profiling.exporter.pprof",
236 sources=["ddtrace/profiling/exporter/pprof.pyx"],
237 language="c",
238 ),
239 Cython.Distutils.Extension(
240 "ddtrace.profiling._build",
241 sources=["ddtrace/profiling/_build.pyx"],
242 language="c",
243 ),
244 ],
245 compile_time_env={
246 "PY_MAJOR_VERSION": sys.version_info.major,
247 "PY_MINOR_VERSION": sys.version_info.minor,
248 "PY_MICRO_VERSION": sys.version_info.micro,
249 },
250 force=True,
251 )
252 + get_exts_for("wrapt")
253 + get_exts_for("psutil"),
254 )
255
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -157,8 +157,10 @@
long_description_content_type="text/markdown",
license="BSD",
packages=find_packages(exclude=["tests*"]),
+ package_data={"ddtrace": ["py.typed"]},
py_modules=["ddtrace_gevent_check"],
python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*",
+ zip_safe=False,
# enum34 is an enum backport for earlier versions of python
# funcsigs backport required for vendored debtcollector
install_requires=[
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -157,8 +157,10 @@\n long_description_content_type=\"text/markdown\",\n license=\"BSD\",\n packages=find_packages(exclude=[\"tests*\"]),\n+ package_data={\"ddtrace\": [\"py.typed\"]},\n py_modules=[\"ddtrace_gevent_check\"],\n python_requires=\">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*\",\n+ zip_safe=False,\n # enum34 is an enum backport for earlier versions of python\n # funcsigs backport required for vendored debtcollector\n install_requires=[\n", "issue": "mypy stub files\nmypy throws an error when ddtrace is in use:\r\n\r\n\r\n\r\n\r\nIt would be nice if this library could ship with stub files or be put on https://github.com/python/typeshed. I'm not sure how to do that yet but maybe someone here is.\r\n\r\nMore info from mypy side here https://mypy.readthedocs.io/en/latest/running_mypy.html#missing-imports\n", "before_files": [{"content": "import os\nimport platform\nimport sys\n\nfrom setuptools import setup, find_packages, Extension\nfrom setuptools.command.test import test as TestCommand\n\n# ORDER MATTERS\n# Import this after setuptools or it will fail\nfrom Cython.Build import cythonize # noqa: I100\nimport Cython.Distutils\n\n\nHERE = os.path.dirname(os.path.abspath(__file__))\n\n\ndef load_module_from_project_file(mod_name, fname):\n \"\"\"\n Helper used to load a module from a file in this project\n\n DEV: Loading this way will by-pass loading all parent modules\n e.g. importing `ddtrace.vendor.psutil.setup` will load `ddtrace/__init__.py`\n which has side effects like loading the tracer\n \"\"\"\n fpath = os.path.join(HERE, fname)\n\n if sys.version_info >= (3, 5):\n import importlib.util\n\n spec = importlib.util.spec_from_file_location(mod_name, fpath)\n mod = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(mod)\n return mod\n elif sys.version_info >= (3, 3):\n from importlib.machinery import SourceFileLoader\n\n return SourceFileLoader(mod_name, fpath).load_module()\n else:\n import imp\n\n return imp.load_source(mod_name, fpath)\n\n\nclass Tox(TestCommand):\n\n user_options = [(\"tox-args=\", \"a\", \"Arguments to pass to tox\")]\n\n def initialize_options(self):\n TestCommand.initialize_options(self)\n self.tox_args = None\n\n def finalize_options(self):\n TestCommand.finalize_options(self)\n self.test_args = []\n self.test_suite = True\n\n def run_tests(self):\n # import here, cause outside the eggs aren't loaded\n import tox\n import shlex\n\n args = self.tox_args\n if args:\n args = shlex.split(self.tox_args)\n errno = tox.cmdline(args=args)\n sys.exit(errno)\n\n\nlong_description = \"\"\"\n# dd-trace-py\n\n`ddtrace` is Datadog's tracing library for Python. It is used to trace requests\nas they flow across web servers, databases and microservices so that developers\nhave great visibility into bottlenecks and troublesome requests.\n\n## Getting Started\n\nFor a basic product overview, installation and quick start, check out our\n[setup documentation][setup docs].\n\nFor more advanced usage and configuration, check out our [API\ndocumentation][api docs].\n\nFor descriptions of terminology used in APM, take a look at the [official\ndocumentation][visualization docs].\n\n[setup docs]: https://docs.datadoghq.com/tracing/setup/python/\n[api docs]: https://ddtrace.readthedocs.io/\n[visualization docs]: https://docs.datadoghq.com/tracing/visualization/\n\"\"\"\n\n\ndef get_exts_for(name):\n try:\n mod = load_module_from_project_file(\n \"ddtrace.vendor.{}.setup\".format(name), \"ddtrace/vendor/{}/setup.py\".format(name)\n )\n return mod.get_extensions()\n except Exception as e:\n print(\"WARNING: Failed to load %s extensions, skipping: %s\" % (name, e))\n return []\n\n\nif sys.byteorder == \"big\":\n encoding_macros = [(\"__BIG_ENDIAN__\", \"1\")]\nelse:\n encoding_macros = [(\"__LITTLE_ENDIAN__\", \"1\")]\n\n\nif platform.system() == \"Windows\":\n encoding_libraries = [\"ws2_32\"]\n extra_compile_args = []\n debug_compile_args = []\nelse:\n encoding_libraries = []\n extra_compile_args = [\"-DPy_BUILD_CORE\"]\n if \"DD_COMPILE_DEBUG\" in os.environ:\n if platform.system() == \"Linux\":\n debug_compile_args = [\"-g\", \"-O0\", \"-Werror\", \"-Wall\", \"-Wextra\", \"-Wpedantic\", \"-fanalyzer\"]\n else:\n debug_compile_args = [\n \"-g\",\n \"-O0\",\n \"-Werror\",\n \"-Wall\",\n \"-Wextra\",\n \"-Wpedantic\",\n # Cython is not deprecation-proof\n \"-Wno-deprecated-declarations\",\n ]\n else:\n debug_compile_args = []\n\n\nif sys.version_info[:2] >= (3, 4):\n ext_modules = [\n Extension(\n \"ddtrace.profiling.collector._memalloc\",\n sources=[\n \"ddtrace/profiling/collector/_memalloc.c\",\n \"ddtrace/profiling/collector/_memalloc_tb.c\",\n \"ddtrace/profiling/collector/_memalloc_heap.c\",\n ],\n extra_compile_args=debug_compile_args,\n ),\n ]\nelse:\n ext_modules = []\n\nsetup(\n name=\"ddtrace\",\n description=\"Datadog tracing code\",\n url=\"https://github.com/DataDog/dd-trace-py\",\n author=\"Datadog, Inc.\",\n author_email=\"[email protected]\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n license=\"BSD\",\n packages=find_packages(exclude=[\"tests*\"]),\n py_modules=[\"ddtrace_gevent_check\"],\n python_requires=\">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*\",\n # enum34 is an enum backport for earlier versions of python\n # funcsigs backport required for vendored debtcollector\n install_requires=[\n \"enum34; python_version<'3.4'\",\n \"funcsigs>=1.0.0; python_version=='2.7'\",\n \"typing; python_version<'3.5'\",\n \"packaging>=17.1\",\n \"protobuf>=3\",\n \"tenacity>=5\",\n \"attrs>=19.2.0\",\n \"six>=1.12.0\",\n \"pep562; python_version<'3.7'\",\n ],\n extras_require={\n # users can include opentracing by having:\n # install_requires=['ddtrace[opentracing]', ...]\n \"opentracing\": [\"opentracing>=2.0.0\"],\n },\n # plugin tox\n tests_require=[\"tox\", \"flake8\"],\n cmdclass={\"test\": Tox},\n entry_points={\n \"console_scripts\": [\n \"ddtrace-run = ddtrace.commands.ddtrace_run:main\",\n ],\n \"pytest11\": [\"ddtrace = ddtrace.contrib.pytest.plugin\"],\n \"gevent.plugins.monkey.did_patch_all\": [\n \"ddtrace_gevent_check = ddtrace_gevent_check:gevent_patch_all\",\n ],\n },\n classifiers=[\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n ],\n use_scm_version=True,\n setup_requires=[\"setuptools_scm[toml]>=4\", \"cython\"],\n ext_modules=ext_modules\n + cythonize(\n [\n Cython.Distutils.Extension(\n \"ddtrace.internal._rand\",\n sources=[\"ddtrace/internal/_rand.pyx\"],\n language=\"c\",\n ),\n Extension(\n \"ddtrace.internal._encoding\",\n [\"ddtrace/internal/_encoding.pyx\"],\n include_dirs=[\".\"],\n libraries=encoding_libraries,\n define_macros=encoding_macros,\n ),\n Cython.Distutils.Extension(\n \"ddtrace.profiling.collector.stack\",\n sources=[\"ddtrace/profiling/collector/stack.pyx\"],\n language=\"c\",\n extra_compile_args=extra_compile_args,\n ),\n Cython.Distutils.Extension(\n \"ddtrace.profiling.collector._traceback\",\n sources=[\"ddtrace/profiling/collector/_traceback.pyx\"],\n language=\"c\",\n ),\n Cython.Distutils.Extension(\n \"ddtrace.profiling.collector._threading\",\n sources=[\"ddtrace/profiling/collector/_threading.pyx\"],\n language=\"c\",\n ),\n Cython.Distutils.Extension(\n \"ddtrace.profiling.exporter.pprof\",\n sources=[\"ddtrace/profiling/exporter/pprof.pyx\"],\n language=\"c\",\n ),\n Cython.Distutils.Extension(\n \"ddtrace.profiling._build\",\n sources=[\"ddtrace/profiling/_build.pyx\"],\n language=\"c\",\n ),\n ],\n compile_time_env={\n \"PY_MAJOR_VERSION\": sys.version_info.major,\n \"PY_MINOR_VERSION\": sys.version_info.minor,\n \"PY_MICRO_VERSION\": sys.version_info.micro,\n },\n force=True,\n )\n + get_exts_for(\"wrapt\")\n + get_exts_for(\"psutil\"),\n)\n", "path": "setup.py"}], "after_files": [{"content": "import os\nimport platform\nimport sys\n\nfrom setuptools import setup, find_packages, Extension\nfrom setuptools.command.test import test as TestCommand\n\n# ORDER MATTERS\n# Import this after setuptools or it will fail\nfrom Cython.Build import cythonize # noqa: I100\nimport Cython.Distutils\n\n\nHERE = os.path.dirname(os.path.abspath(__file__))\n\n\ndef load_module_from_project_file(mod_name, fname):\n \"\"\"\n Helper used to load a module from a file in this project\n\n DEV: Loading this way will by-pass loading all parent modules\n e.g. importing `ddtrace.vendor.psutil.setup` will load `ddtrace/__init__.py`\n which has side effects like loading the tracer\n \"\"\"\n fpath = os.path.join(HERE, fname)\n\n if sys.version_info >= (3, 5):\n import importlib.util\n\n spec = importlib.util.spec_from_file_location(mod_name, fpath)\n mod = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(mod)\n return mod\n elif sys.version_info >= (3, 3):\n from importlib.machinery import SourceFileLoader\n\n return SourceFileLoader(mod_name, fpath).load_module()\n else:\n import imp\n\n return imp.load_source(mod_name, fpath)\n\n\nclass Tox(TestCommand):\n\n user_options = [(\"tox-args=\", \"a\", \"Arguments to pass to tox\")]\n\n def initialize_options(self):\n TestCommand.initialize_options(self)\n self.tox_args = None\n\n def finalize_options(self):\n TestCommand.finalize_options(self)\n self.test_args = []\n self.test_suite = True\n\n def run_tests(self):\n # import here, cause outside the eggs aren't loaded\n import tox\n import shlex\n\n args = self.tox_args\n if args:\n args = shlex.split(self.tox_args)\n errno = tox.cmdline(args=args)\n sys.exit(errno)\n\n\nlong_description = \"\"\"\n# dd-trace-py\n\n`ddtrace` is Datadog's tracing library for Python. It is used to trace requests\nas they flow across web servers, databases and microservices so that developers\nhave great visibility into bottlenecks and troublesome requests.\n\n## Getting Started\n\nFor a basic product overview, installation and quick start, check out our\n[setup documentation][setup docs].\n\nFor more advanced usage and configuration, check out our [API\ndocumentation][api docs].\n\nFor descriptions of terminology used in APM, take a look at the [official\ndocumentation][visualization docs].\n\n[setup docs]: https://docs.datadoghq.com/tracing/setup/python/\n[api docs]: https://ddtrace.readthedocs.io/\n[visualization docs]: https://docs.datadoghq.com/tracing/visualization/\n\"\"\"\n\n\ndef get_exts_for(name):\n try:\n mod = load_module_from_project_file(\n \"ddtrace.vendor.{}.setup\".format(name), \"ddtrace/vendor/{}/setup.py\".format(name)\n )\n return mod.get_extensions()\n except Exception as e:\n print(\"WARNING: Failed to load %s extensions, skipping: %s\" % (name, e))\n return []\n\n\nif sys.byteorder == \"big\":\n encoding_macros = [(\"__BIG_ENDIAN__\", \"1\")]\nelse:\n encoding_macros = [(\"__LITTLE_ENDIAN__\", \"1\")]\n\n\nif platform.system() == \"Windows\":\n encoding_libraries = [\"ws2_32\"]\n extra_compile_args = []\n debug_compile_args = []\nelse:\n encoding_libraries = []\n extra_compile_args = [\"-DPy_BUILD_CORE\"]\n if \"DD_COMPILE_DEBUG\" in os.environ:\n if platform.system() == \"Linux\":\n debug_compile_args = [\"-g\", \"-O0\", \"-Werror\", \"-Wall\", \"-Wextra\", \"-Wpedantic\", \"-fanalyzer\"]\n else:\n debug_compile_args = [\n \"-g\",\n \"-O0\",\n \"-Werror\",\n \"-Wall\",\n \"-Wextra\",\n \"-Wpedantic\",\n # Cython is not deprecation-proof\n \"-Wno-deprecated-declarations\",\n ]\n else:\n debug_compile_args = []\n\n\nif sys.version_info[:2] >= (3, 4):\n ext_modules = [\n Extension(\n \"ddtrace.profiling.collector._memalloc\",\n sources=[\n \"ddtrace/profiling/collector/_memalloc.c\",\n \"ddtrace/profiling/collector/_memalloc_tb.c\",\n \"ddtrace/profiling/collector/_memalloc_heap.c\",\n ],\n extra_compile_args=debug_compile_args,\n ),\n ]\nelse:\n ext_modules = []\n\nsetup(\n name=\"ddtrace\",\n description=\"Datadog tracing code\",\n url=\"https://github.com/DataDog/dd-trace-py\",\n author=\"Datadog, Inc.\",\n author_email=\"[email protected]\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n license=\"BSD\",\n packages=find_packages(exclude=[\"tests*\"]),\n package_data={\"ddtrace\": [\"py.typed\"]},\n py_modules=[\"ddtrace_gevent_check\"],\n python_requires=\">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*\",\n zip_safe=False,\n # enum34 is an enum backport for earlier versions of python\n # funcsigs backport required for vendored debtcollector\n install_requires=[\n \"enum34; python_version<'3.4'\",\n \"funcsigs>=1.0.0; python_version=='2.7'\",\n \"typing; python_version<'3.5'\",\n \"packaging>=17.1\",\n \"protobuf>=3\",\n \"tenacity>=5\",\n \"attrs>=19.2.0\",\n \"six>=1.12.0\",\n \"pep562; python_version<'3.7'\",\n ],\n extras_require={\n # users can include opentracing by having:\n # install_requires=['ddtrace[opentracing]', ...]\n \"opentracing\": [\"opentracing>=2.0.0\"],\n },\n # plugin tox\n tests_require=[\"tox\", \"flake8\"],\n cmdclass={\"test\": Tox},\n entry_points={\n \"console_scripts\": [\n \"ddtrace-run = ddtrace.commands.ddtrace_run:main\",\n ],\n \"pytest11\": [\"ddtrace = ddtrace.contrib.pytest.plugin\"],\n \"gevent.plugins.monkey.did_patch_all\": [\n \"ddtrace_gevent_check = ddtrace_gevent_check:gevent_patch_all\",\n ],\n },\n classifiers=[\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n ],\n use_scm_version=True,\n setup_requires=[\"setuptools_scm[toml]>=4\", \"cython\"],\n ext_modules=ext_modules\n + cythonize(\n [\n Cython.Distutils.Extension(\n \"ddtrace.internal._rand\",\n sources=[\"ddtrace/internal/_rand.pyx\"],\n language=\"c\",\n ),\n Extension(\n \"ddtrace.internal._encoding\",\n [\"ddtrace/internal/_encoding.pyx\"],\n include_dirs=[\".\"],\n libraries=encoding_libraries,\n define_macros=encoding_macros,\n ),\n Cython.Distutils.Extension(\n \"ddtrace.profiling.collector.stack\",\n sources=[\"ddtrace/profiling/collector/stack.pyx\"],\n language=\"c\",\n extra_compile_args=extra_compile_args,\n ),\n Cython.Distutils.Extension(\n \"ddtrace.profiling.collector._traceback\",\n sources=[\"ddtrace/profiling/collector/_traceback.pyx\"],\n language=\"c\",\n ),\n Cython.Distutils.Extension(\n \"ddtrace.profiling.collector._threading\",\n sources=[\"ddtrace/profiling/collector/_threading.pyx\"],\n language=\"c\",\n ),\n Cython.Distutils.Extension(\n \"ddtrace.profiling.exporter.pprof\",\n sources=[\"ddtrace/profiling/exporter/pprof.pyx\"],\n language=\"c\",\n ),\n Cython.Distutils.Extension(\n \"ddtrace.profiling._build\",\n sources=[\"ddtrace/profiling/_build.pyx\"],\n language=\"c\",\n ),\n ],\n compile_time_env={\n \"PY_MAJOR_VERSION\": sys.version_info.major,\n \"PY_MINOR_VERSION\": sys.version_info.minor,\n \"PY_MICRO_VERSION\": sys.version_info.micro,\n },\n force=True,\n )\n + get_exts_for(\"wrapt\")\n + get_exts_for(\"psutil\"),\n)\n", "path": "setup.py"}]}
| 2,977 | 158 |
gh_patches_debug_23060
|
rasdani/github-patches
|
git_diff
|
deis__deis-661
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
nodes:scale fails on Vagrant
Looks like a path issue when we shifted around the project in #641. The problem that exists here though is that the `contrib` directory no longer exists in the `deis/controller` image, since it only adds the `controller/` path as per https://github.com/opdemand/deis/blob/master/controller/Dockerfile#L19. Should we move `contrib` to `controller/contrib`, so that the directory is present in the docker image?
```
><> deis nodes:scale dev runtime=1
Scaling nodes... but first, coffee!
400 BAD REQUEST
[Errno 2] No such file or directory: u'/app/deis/contrib/vagrant/util/nodes_vagrantfile_template.rb'
```
Note that the source code now exists at `/app` in the container, so there will have to be another PR to change the path hardcoded at https://github.com/opdemand/deis/blob/master/controller/provider/vagrant.py#L82.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `controller/provider/vagrant.py`
Content:
```
1 """
2 Deis cloud provider implementation for local vagrant setups.
3 """
4
5 from __future__ import unicode_literals
6
7 from api.ssh import exec_ssh, connect_ssh
8
9 import json
10 import logging
11 import string
12 import subprocess
13 import uuid
14
15 from api.models import Layer
16 from api.models import Node
17
18 logger = logging.getLogger(__name__)
19
20 CONTRIB_PATH = '/app/deis/contrib/vagrant'
21
22 # Collect details for connecting to the host machine
23 try:
24 HOST_NODES_DIR = open('{}/.host_nodes_dir'.format(CONTRIB_PATH)).read().strip()
25 PKEY = open('{}/util/ssh_keys/id_rsa_vagrant-deis-controller'.format(CONTRIB_PATH)).read()
26 except IOError as err:
27 logger.warn(err)
28
29
30 def seed_flavors():
31 """Seed the database with default flavors for vagrant.
32
33 :rtype: list of dicts containing flavor data
34 """
35 flavors = []
36 for m in ['512', '1024', '2048']:
37 flavors.append({
38 'id': "vagrant-{}".format(m),
39 'provider': 'vagrant',
40 'params': json.dumps({
41 'memory': m
42 })
43 })
44 return flavors
45
46
47 def build_layer(layer):
48 """
49 Build a layer.
50
51 :param layer: a dict containing formation, id, params, and creds info
52 """
53
54 # This can also be done with `deis layers:update` now.
55 layer_ = Layer.objects.get(id=layer['id'], formation__id=layer['formation'])
56 layer_.ssh_username = 'vagrant'
57 layer_.save()
58
59
60 def destroy_layer(layer):
61 """
62 Destroy a layer.
63
64 :param layer: a dict containing formation, id, params, and creds info
65 """
66 pass
67
68
69 def build_node(node):
70 """
71 Build a node.
72
73 :param node: a dict containing formation, layer, params, and creds info.
74 :rtype: a tuple of (provider_id, fully_qualified_domain_name, metadata)
75 """
76
77 # Can't use the vagrant UUID because it's not booted yet
78 uid = str(uuid.uuid1())
79
80 # Create a new Vagrantfile from a template
81 node['params'].setdefault('memory', '512')
82 template = open('/app/deis/contrib/vagrant/util/nodes_vagrantfile_template.rb')
83 raw = string.Template(template.read())
84 ip_addr = '192.168.61.' + str(Node.objects.all().count() + 100)
85 result = raw.substitute({
86 'id': uid,
87 'ipaddress': ip_addr,
88 'memory': node['params']['memory']
89 })
90
91 # Make a folder for the VM with its own Vagrantfile. Vagrant will then create a .vagrant folder
92 # there too when it first gets booted.
93 node_dir = HOST_NODES_DIR + '/' + uid
94 mkdir = 'mkdir -p "{}"'.format(node_dir)
95 cp_tpl = 'echo "' + result.replace('"', '\\"') + '" > "{}/Vagrantfile"'.format(node_dir)
96 _host_ssh(commands=[mkdir, cp_tpl], creds=node['creds'])
97
98 # Boot the VM
99 _run_vagrant_command(uid, args=['up'], creds=node['creds'])
100
101 # Copy the layer's public SSH key to the VM so that the Controller can access it.
102 _run_vagrant_command(
103 uid,
104 args=[
105 'ssh',
106 '-c',
107 '"echo \\"' + node['ssh_public_key'] + '\\" >> /home/vagrant/.ssh/authorized_keys"'
108 ],
109 creds=node['creds'],
110 )
111
112 provider_id = uid
113 metadata = {
114 'id': uid,
115 'fqdn': ip_addr,
116 'flavor': node['params']['memory']
117 }
118 return provider_id, ip_addr, metadata
119
120
121 def destroy_node(node):
122 """
123 Destroy a node.
124
125 :param node: a dict containing a node's provider_id, params, and creds
126 """
127
128 # This is useful if node creation failed. So that there's a record in the DB, but it has no
129 # ID associated with it.
130 if node['provider_id'] is None:
131 return
132
133 # Shut the VM down and destroy it
134 try:
135 _run_vagrant_command(node['provider_id'], args=['destroy', '--force'], creds=node['creds'])
136 node_dir = HOST_NODES_DIR + '/' + node['provider_id']
137
138 # Sanity check before `rm -rf`
139 if 'contrib/vagrant' not in node_dir:
140 raise RuntimeError(
141 "Aborted node destruction: attempting to 'rm -rf' unexpected directory")
142
143 # Completely remove the folder that contained the VM
144 rm_vagrantfile = 'rm "{}/Vagrantfile"'.format(node_dir)
145 rm_node_dir = 'rm -rf "{}"'.format(node_dir)
146 _host_ssh(commands=[rm_vagrantfile, rm_node_dir], creds=node['creds'])
147 except RuntimeError as err:
148 # If we couldn't cd to the node dir, just log that as a warning
149 if 'no such file or directory' in str(err).lower():
150 logger.warn(err)
151 else:
152 raise
153
154
155 def _run_vagrant_command(node_id, args=[], creds={}):
156 """
157 args: A tuple of arguments to a vagrant command line.
158 e.g. ['up', 'my_vm_name', '--no-provision']
159 """
160
161 cd = 'cd "{}/{}"'.format(HOST_NODES_DIR, node_id)
162 command = ['vagrant'] + [arg for arg in args if arg is not None]
163 return _host_ssh(commands=[cd, ' '.join(command)], creds=creds)
164
165
166 def _host_ssh(creds={}, commands=[]):
167 """
168 Connect to the host machine. Namely the user's local machine.
169 """
170 if creds == {}:
171 raise RuntimeError("No credentials provided to _host_ssh()")
172 command = ' && '.join(commands)
173
174 # First check if we can access the host machine. It's likely that their
175 # IP address changes every time they request a DHCP lease.
176 # TODO: Find a way of passing this error onto the CLI client.
177 try:
178 subprocess.check_call([
179 'nc', '-z', '-w2', creds['host'], '22'
180 ], stderr=subprocess.PIPE)
181 except subprocess.CalledProcessError:
182 raise RuntimeError("Couldn't ping port 22 at host with IP " + creds['host'])
183
184 ssh = connect_ssh(creds['user'], creds['host'], 22, PKEY, timeout=120)
185 result, status = exec_ssh(ssh, command)
186 if status > 0:
187 raise RuntimeError(
188 'SSH to Vagrant host error: ' + result.decode('utf-8') +
189 'Command: ' + command.decode('utf-8'))
190 return result
191
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/controller/provider/vagrant.py b/controller/provider/vagrant.py
--- a/controller/provider/vagrant.py
+++ b/controller/provider/vagrant.py
@@ -17,12 +17,12 @@
logger = logging.getLogger(__name__)
-CONTRIB_PATH = '/app/deis/contrib/vagrant'
+VAGRANT_UTIL_PATH = '/app/provider/vagrant-util'
# Collect details for connecting to the host machine
try:
- HOST_NODES_DIR = open('{}/.host_nodes_dir'.format(CONTRIB_PATH)).read().strip()
- PKEY = open('{}/util/ssh_keys/id_rsa_vagrant-deis-controller'.format(CONTRIB_PATH)).read()
+ HOST_NODES_DIR = open('{}/.host_nodes_dir'.format(VAGRANT_UTIL_PATH)).read().strip()
+ PKEY = open('{}/ssh_keys/id_rsa_vagrant-deis-controller'.format(VAGRANT_UTIL_PATH)).read()
except IOError as err:
logger.warn(err)
@@ -79,7 +79,7 @@
# Create a new Vagrantfile from a template
node['params'].setdefault('memory', '512')
- template = open('/app/deis/contrib/vagrant/util/nodes_vagrantfile_template.rb')
+ template = open('{}/nodes_vagrantfile_template.rb'.format(VAGRANT_UTIL_PATH))
raw = string.Template(template.read())
ip_addr = '192.168.61.' + str(Node.objects.all().count() + 100)
result = raw.substitute({
|
{"golden_diff": "diff --git a/controller/provider/vagrant.py b/controller/provider/vagrant.py\n--- a/controller/provider/vagrant.py\n+++ b/controller/provider/vagrant.py\n@@ -17,12 +17,12 @@\n \n logger = logging.getLogger(__name__)\n \n-CONTRIB_PATH = '/app/deis/contrib/vagrant'\n+VAGRANT_UTIL_PATH = '/app/provider/vagrant-util'\n \n # Collect details for connecting to the host machine\n try:\n- HOST_NODES_DIR = open('{}/.host_nodes_dir'.format(CONTRIB_PATH)).read().strip()\n- PKEY = open('{}/util/ssh_keys/id_rsa_vagrant-deis-controller'.format(CONTRIB_PATH)).read()\n+ HOST_NODES_DIR = open('{}/.host_nodes_dir'.format(VAGRANT_UTIL_PATH)).read().strip()\n+ PKEY = open('{}/ssh_keys/id_rsa_vagrant-deis-controller'.format(VAGRANT_UTIL_PATH)).read()\n except IOError as err:\n logger.warn(err)\n \n@@ -79,7 +79,7 @@\n \n # Create a new Vagrantfile from a template\n node['params'].setdefault('memory', '512')\n- template = open('/app/deis/contrib/vagrant/util/nodes_vagrantfile_template.rb')\n+ template = open('{}/nodes_vagrantfile_template.rb'.format(VAGRANT_UTIL_PATH))\n raw = string.Template(template.read())\n ip_addr = '192.168.61.' + str(Node.objects.all().count() + 100)\n result = raw.substitute({\n", "issue": "nodes:scale fails on Vagrant\nLooks like a path issue when we shifted around the project in #641. The problem that exists here though is that the `contrib` directory no longer exists in the `deis/controller` image, since it only adds the `controller/` path as per https://github.com/opdemand/deis/blob/master/controller/Dockerfile#L19. Should we move `contrib` to `controller/contrib`, so that the directory is present in the docker image?\n\n```\n><> deis nodes:scale dev runtime=1\nScaling nodes... but first, coffee!\n400 BAD REQUEST\n[Errno 2] No such file or directory: u'/app/deis/contrib/vagrant/util/nodes_vagrantfile_template.rb'\n```\n\nNote that the source code now exists at `/app` in the container, so there will have to be another PR to change the path hardcoded at https://github.com/opdemand/deis/blob/master/controller/provider/vagrant.py#L82.\n\n", "before_files": [{"content": "\"\"\"\nDeis cloud provider implementation for local vagrant setups.\n\"\"\"\n\nfrom __future__ import unicode_literals\n\nfrom api.ssh import exec_ssh, connect_ssh\n\nimport json\nimport logging\nimport string\nimport subprocess\nimport uuid\n\nfrom api.models import Layer\nfrom api.models import Node\n\nlogger = logging.getLogger(__name__)\n\nCONTRIB_PATH = '/app/deis/contrib/vagrant'\n\n# Collect details for connecting to the host machine\ntry:\n HOST_NODES_DIR = open('{}/.host_nodes_dir'.format(CONTRIB_PATH)).read().strip()\n PKEY = open('{}/util/ssh_keys/id_rsa_vagrant-deis-controller'.format(CONTRIB_PATH)).read()\nexcept IOError as err:\n logger.warn(err)\n\n\ndef seed_flavors():\n \"\"\"Seed the database with default flavors for vagrant.\n\n :rtype: list of dicts containing flavor data\n \"\"\"\n flavors = []\n for m in ['512', '1024', '2048']:\n flavors.append({\n 'id': \"vagrant-{}\".format(m),\n 'provider': 'vagrant',\n 'params': json.dumps({\n 'memory': m\n })\n })\n return flavors\n\n\ndef build_layer(layer):\n \"\"\"\n Build a layer.\n\n :param layer: a dict containing formation, id, params, and creds info\n \"\"\"\n\n # This can also be done with `deis layers:update` now.\n layer_ = Layer.objects.get(id=layer['id'], formation__id=layer['formation'])\n layer_.ssh_username = 'vagrant'\n layer_.save()\n\n\ndef destroy_layer(layer):\n \"\"\"\n Destroy a layer.\n\n :param layer: a dict containing formation, id, params, and creds info\n \"\"\"\n pass\n\n\ndef build_node(node):\n \"\"\"\n Build a node.\n\n :param node: a dict containing formation, layer, params, and creds info.\n :rtype: a tuple of (provider_id, fully_qualified_domain_name, metadata)\n \"\"\"\n\n # Can't use the vagrant UUID because it's not booted yet\n uid = str(uuid.uuid1())\n\n # Create a new Vagrantfile from a template\n node['params'].setdefault('memory', '512')\n template = open('/app/deis/contrib/vagrant/util/nodes_vagrantfile_template.rb')\n raw = string.Template(template.read())\n ip_addr = '192.168.61.' + str(Node.objects.all().count() + 100)\n result = raw.substitute({\n 'id': uid,\n 'ipaddress': ip_addr,\n 'memory': node['params']['memory']\n })\n\n # Make a folder for the VM with its own Vagrantfile. Vagrant will then create a .vagrant folder\n # there too when it first gets booted.\n node_dir = HOST_NODES_DIR + '/' + uid\n mkdir = 'mkdir -p \"{}\"'.format(node_dir)\n cp_tpl = 'echo \"' + result.replace('\"', '\\\\\"') + '\" > \"{}/Vagrantfile\"'.format(node_dir)\n _host_ssh(commands=[mkdir, cp_tpl], creds=node['creds'])\n\n # Boot the VM\n _run_vagrant_command(uid, args=['up'], creds=node['creds'])\n\n # Copy the layer's public SSH key to the VM so that the Controller can access it.\n _run_vagrant_command(\n uid,\n args=[\n 'ssh',\n '-c',\n '\"echo \\\\\"' + node['ssh_public_key'] + '\\\\\" >> /home/vagrant/.ssh/authorized_keys\"'\n ],\n creds=node['creds'],\n )\n\n provider_id = uid\n metadata = {\n 'id': uid,\n 'fqdn': ip_addr,\n 'flavor': node['params']['memory']\n }\n return provider_id, ip_addr, metadata\n\n\ndef destroy_node(node):\n \"\"\"\n Destroy a node.\n\n :param node: a dict containing a node's provider_id, params, and creds\n \"\"\"\n\n # This is useful if node creation failed. So that there's a record in the DB, but it has no\n # ID associated with it.\n if node['provider_id'] is None:\n return\n\n # Shut the VM down and destroy it\n try:\n _run_vagrant_command(node['provider_id'], args=['destroy', '--force'], creds=node['creds'])\n node_dir = HOST_NODES_DIR + '/' + node['provider_id']\n\n # Sanity check before `rm -rf`\n if 'contrib/vagrant' not in node_dir:\n raise RuntimeError(\n \"Aborted node destruction: attempting to 'rm -rf' unexpected directory\")\n\n # Completely remove the folder that contained the VM\n rm_vagrantfile = 'rm \"{}/Vagrantfile\"'.format(node_dir)\n rm_node_dir = 'rm -rf \"{}\"'.format(node_dir)\n _host_ssh(commands=[rm_vagrantfile, rm_node_dir], creds=node['creds'])\n except RuntimeError as err:\n # If we couldn't cd to the node dir, just log that as a warning\n if 'no such file or directory' in str(err).lower():\n logger.warn(err)\n else:\n raise\n\n\ndef _run_vagrant_command(node_id, args=[], creds={}):\n \"\"\"\n args: A tuple of arguments to a vagrant command line.\n e.g. ['up', 'my_vm_name', '--no-provision']\n \"\"\"\n\n cd = 'cd \"{}/{}\"'.format(HOST_NODES_DIR, node_id)\n command = ['vagrant'] + [arg for arg in args if arg is not None]\n return _host_ssh(commands=[cd, ' '.join(command)], creds=creds)\n\n\ndef _host_ssh(creds={}, commands=[]):\n \"\"\"\n Connect to the host machine. Namely the user's local machine.\n \"\"\"\n if creds == {}:\n raise RuntimeError(\"No credentials provided to _host_ssh()\")\n command = ' && '.join(commands)\n\n # First check if we can access the host machine. It's likely that their\n # IP address changes every time they request a DHCP lease.\n # TODO: Find a way of passing this error onto the CLI client.\n try:\n subprocess.check_call([\n 'nc', '-z', '-w2', creds['host'], '22'\n ], stderr=subprocess.PIPE)\n except subprocess.CalledProcessError:\n raise RuntimeError(\"Couldn't ping port 22 at host with IP \" + creds['host'])\n\n ssh = connect_ssh(creds['user'], creds['host'], 22, PKEY, timeout=120)\n result, status = exec_ssh(ssh, command)\n if status > 0:\n raise RuntimeError(\n 'SSH to Vagrant host error: ' + result.decode('utf-8') +\n 'Command: ' + command.decode('utf-8'))\n return result\n", "path": "controller/provider/vagrant.py"}], "after_files": [{"content": "\"\"\"\nDeis cloud provider implementation for local vagrant setups.\n\"\"\"\n\nfrom __future__ import unicode_literals\n\nfrom api.ssh import exec_ssh, connect_ssh\n\nimport json\nimport logging\nimport string\nimport subprocess\nimport uuid\n\nfrom api.models import Layer\nfrom api.models import Node\n\nlogger = logging.getLogger(__name__)\n\nVAGRANT_UTIL_PATH = '/app/provider/vagrant-util'\n\n# Collect details for connecting to the host machine\ntry:\n HOST_NODES_DIR = open('{}/.host_nodes_dir'.format(VAGRANT_UTIL_PATH)).read().strip()\n PKEY = open('{}/ssh_keys/id_rsa_vagrant-deis-controller'.format(VAGRANT_UTIL_PATH)).read()\nexcept IOError as err:\n logger.warn(err)\n\n\ndef seed_flavors():\n \"\"\"Seed the database with default flavors for vagrant.\n\n :rtype: list of dicts containing flavor data\n \"\"\"\n flavors = []\n for m in ['512', '1024', '2048']:\n flavors.append({\n 'id': \"vagrant-{}\".format(m),\n 'provider': 'vagrant',\n 'params': json.dumps({\n 'memory': m\n })\n })\n return flavors\n\n\ndef build_layer(layer):\n \"\"\"\n Build a layer.\n\n :param layer: a dict containing formation, id, params, and creds info\n \"\"\"\n\n # This can also be done with `deis layers:update` now.\n layer_ = Layer.objects.get(id=layer['id'], formation__id=layer['formation'])\n layer_.ssh_username = 'vagrant'\n layer_.save()\n\n\ndef destroy_layer(layer):\n \"\"\"\n Destroy a layer.\n\n :param layer: a dict containing formation, id, params, and creds info\n \"\"\"\n pass\n\n\ndef build_node(node):\n \"\"\"\n Build a node.\n\n :param node: a dict containing formation, layer, params, and creds info.\n :rtype: a tuple of (provider_id, fully_qualified_domain_name, metadata)\n \"\"\"\n\n # Can't use the vagrant UUID because it's not booted yet\n uid = str(uuid.uuid1())\n\n # Create a new Vagrantfile from a template\n node['params'].setdefault('memory', '512')\n template = open('{}/nodes_vagrantfile_template.rb'.format(VAGRANT_UTIL_PATH))\n raw = string.Template(template.read())\n ip_addr = '192.168.61.' + str(Node.objects.all().count() + 100)\n result = raw.substitute({\n 'id': uid,\n 'ipaddress': ip_addr,\n 'memory': node['params']['memory']\n })\n\n # Make a folder for the VM with its own Vagrantfile. Vagrant will then create a .vagrant folder\n # there too when it first gets booted.\n node_dir = HOST_NODES_DIR + '/' + uid\n mkdir = 'mkdir -p \"{}\"'.format(node_dir)\n cp_tpl = 'echo \"' + result.replace('\"', '\\\\\"') + '\" > \"{}/Vagrantfile\"'.format(node_dir)\n _host_ssh(commands=[mkdir, cp_tpl], creds=node['creds'])\n\n # Boot the VM\n _run_vagrant_command(uid, args=['up'], creds=node['creds'])\n\n # Copy the layer's public SSH key to the VM so that the Controller can access it.\n _run_vagrant_command(\n uid,\n args=[\n 'ssh',\n '-c',\n '\"echo \\\\\"' + node['ssh_public_key'] + '\\\\\" >> /home/vagrant/.ssh/authorized_keys\"'\n ],\n creds=node['creds'],\n )\n\n provider_id = uid\n metadata = {\n 'id': uid,\n 'fqdn': ip_addr,\n 'flavor': node['params']['memory']\n }\n return provider_id, ip_addr, metadata\n\n\ndef destroy_node(node):\n \"\"\"\n Destroy a node.\n\n :param node: a dict containing a node's provider_id, params, and creds\n \"\"\"\n\n # This is useful if node creation failed. So that there's a record in the DB, but it has no\n # ID associated with it.\n if node['provider_id'] is None:\n return\n\n # Shut the VM down and destroy it\n try:\n _run_vagrant_command(node['provider_id'], args=['destroy', '--force'], creds=node['creds'])\n node_dir = HOST_NODES_DIR + '/' + node['provider_id']\n\n # Sanity check before `rm -rf`\n if 'contrib/vagrant' not in node_dir:\n raise RuntimeError(\n \"Aborted node destruction: attempting to 'rm -rf' unexpected directory\")\n\n # Completely remove the folder that contained the VM\n rm_vagrantfile = 'rm \"{}/Vagrantfile\"'.format(node_dir)\n rm_node_dir = 'rm -rf \"{}\"'.format(node_dir)\n _host_ssh(commands=[rm_vagrantfile, rm_node_dir], creds=node['creds'])\n except RuntimeError as err:\n # If we couldn't cd to the node dir, just log that as a warning\n if 'no such file or directory' in str(err).lower():\n logger.warn(err)\n else:\n raise\n\n\ndef _run_vagrant_command(node_id, args=[], creds={}):\n \"\"\"\n args: A tuple of arguments to a vagrant command line.\n e.g. ['up', 'my_vm_name', '--no-provision']\n \"\"\"\n\n cd = 'cd \"{}/{}\"'.format(HOST_NODES_DIR, node_id)\n command = ['vagrant'] + [arg for arg in args if arg is not None]\n return _host_ssh(commands=[cd, ' '.join(command)], creds=creds)\n\n\ndef _host_ssh(creds={}, commands=[]):\n \"\"\"\n Connect to the host machine. Namely the user's local machine.\n \"\"\"\n if creds == {}:\n raise RuntimeError(\"No credentials provided to _host_ssh()\")\n command = ' && '.join(commands)\n\n # First check if we can access the host machine. It's likely that their\n # IP address changes every time they request a DHCP lease.\n # TODO: Find a way of passing this error onto the CLI client.\n try:\n subprocess.check_call([\n 'nc', '-z', '-w2', creds['host'], '22'\n ], stderr=subprocess.PIPE)\n except subprocess.CalledProcessError:\n raise RuntimeError(\"Couldn't ping port 22 at host with IP \" + creds['host'])\n\n ssh = connect_ssh(creds['user'], creds['host'], 22, PKEY, timeout=120)\n result, status = exec_ssh(ssh, command)\n if status > 0:\n raise RuntimeError(\n 'SSH to Vagrant host error: ' + result.decode('utf-8') +\n 'Command: ' + command.decode('utf-8'))\n return result\n", "path": "controller/provider/vagrant.py"}]}
| 2,444 | 340 |
gh_patches_debug_22720
|
rasdani/github-patches
|
git_diff
|
privacyidea__privacyidea-3675
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Error 904 is still shown with policy Action login_mode: privacyIDEA
privacyidea 3.8
ubuntu 20
Since 3.7 the error message :´ERR904: The user can not be found in any resolver in this realm!¨ is deactivated when you enter a wrong username.
But this is still true if you have a policy :
Scope webui
Action login_mode: privacyIDEA
if you have a policy:
Scope webui
login_mode: userstore
Then the error (ERR904: The user can not be found in any resolver in this realm!) Is deactivated.
Kind regards
Sebastien
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `privacyidea/lib/auth.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # 2015-11-03 Cornelius Kölbel <[email protected]>
4 # Add check if an admin user exists
5 # 2014-12-15 Cornelius Kölbel, [email protected]
6 # Initial creation
7 #
8 # (c) Cornelius Kölbel
9 # Info: http://www.privacyidea.org
10 #
11 # This code is free software; you can redistribute it and/or
12 # modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE
13 # License as published by the Free Software Foundation; either
14 # version 3 of the License, or any later version.
15 #
16 # This code is distributed in the hope that it will be useful,
17 # but WITHOUT ANY WARRANTY; without even the implied warranty of
18 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 # GNU AFFERO GENERAL PUBLIC LICENSE for more details.
20 #
21 # You should have received a copy of the GNU Affero General Public
22 # License along with this program. If not, see <http://www.gnu.org/licenses/>.
23 #
24 from privacyidea.models import Admin
25 from privacyidea.lib.token import check_user_pass
26 from privacyidea.lib.policydecorators import libpolicy, login_mode
27 from privacyidea.lib.crypto import hash_with_pepper, verify_with_pepper
28 from privacyidea.lib.utils import fetch_one_resource
29
30
31 class ROLE(object):
32 ADMIN = "admin"
33 USER = "user"
34 VALIDATE = "validate"
35
36
37 def verify_db_admin(username, password):
38 """
39 This function is used to verify the username and the password against the
40 database table "Admin".
41 :param username: The administrator username
42 :param password: The password
43 :return: True if password is correct for the admin
44 :rtype: bool
45 """
46 success = False
47 qa = Admin.query.filter(Admin.username == username).first()
48 if qa:
49 success = verify_with_pepper(qa.password, password)
50
51 return success
52
53
54 def db_admin_exist(username):
55 """
56 Checks if a local admin in the database exists
57
58 :param username: The username of the admin
59 :return: True, if exist
60 """
61 return bool(get_db_admin(username))
62
63
64 def create_db_admin(app, username, email=None, password=None):
65 pw_dig = None
66 if password:
67 pw_dig = hash_with_pepper(password)
68 user = Admin(email=email, username=username, password=pw_dig)
69 user.save()
70
71
72 def list_db_admin():
73 admins = Admin.query.all()
74 print("Name \t email")
75 print(30*"=")
76 for admin in admins:
77 print("{0!s} \t {1!s}".format(admin.username, admin.email))
78
79
80 def get_db_admins():
81 admins = Admin.query.all()
82 return admins
83
84
85 def get_db_admin(username):
86 return Admin.query.filter(Admin.username == username).first()
87
88
89 def delete_db_admin(username):
90 print("Deleting admin {0!s}".format(username))
91 fetch_one_resource(Admin, username=username).delete()
92
93
94 @libpolicy(login_mode)
95 def check_webui_user(user_obj,
96 password,
97 options=None,
98 superuser_realms=None,
99 check_otp=False):
100 """
101 This function is used to authenticate the user at the web ui.
102 It checks against the userstore or against OTP/privacyidea (check_otp).
103 It returns a tuple of
104
105 * true/false if the user authenticated successfully
106 * the role of the user
107 * the "detail" dictionary of the response
108
109 :param user_obj: The user who tries to authenticate
110 :type user_obj: User Object
111 :param password: Password, static and or OTP
112 :param options: additional options like g and clientip
113 :type options: dict
114 :param superuser_realms: list of realms, that contain admins
115 :type superuser_realms: list
116 :param check_otp: If set, the user is not authenticated against the
117 userstore but against privacyidea
118 :return: tuple of bool, string and dict/None
119 """
120 options = options or {}
121 superuser_realms = superuser_realms or []
122 user_auth = False
123 role = ROLE.USER
124 details = None
125
126 if check_otp:
127 # check if the given password matches an OTP token
128 check, details = check_user_pass(user_obj, password, options=options)
129 details["loginmode"] = "privacyIDEA"
130 if check:
131 user_auth = True
132 else:
133 # check the password of the user against the userstore
134 if user_obj.check_password(password):
135 user_auth = True
136
137 # If the realm is in the SUPERUSER_REALM then the authorization role
138 # is risen to "admin".
139 if user_obj.realm in superuser_realms:
140 role = ROLE.ADMIN
141
142 return user_auth, role, details
143
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/privacyidea/lib/auth.py b/privacyidea/lib/auth.py
--- a/privacyidea/lib/auth.py
+++ b/privacyidea/lib/auth.py
@@ -26,6 +26,10 @@
from privacyidea.lib.policydecorators import libpolicy, login_mode
from privacyidea.lib.crypto import hash_with_pepper, verify_with_pepper
from privacyidea.lib.utils import fetch_one_resource
+import logging
+
+log = logging.getLogger(__name__)
+
class ROLE(object):
@@ -125,10 +129,13 @@
if check_otp:
# check if the given password matches an OTP token
- check, details = check_user_pass(user_obj, password, options=options)
- details["loginmode"] = "privacyIDEA"
- if check:
- user_auth = True
+ try:
+ check, details = check_user_pass(user_obj, password, options=options)
+ details["loginmode"] = "privacyIDEA"
+ if check:
+ user_auth = True
+ except Exception as e:
+ log.debug("Error authenticating user against privacyIDEA: {0!r}".format(e))
else:
# check the password of the user against the userstore
if user_obj.check_password(password):
|
{"golden_diff": "diff --git a/privacyidea/lib/auth.py b/privacyidea/lib/auth.py\n--- a/privacyidea/lib/auth.py\n+++ b/privacyidea/lib/auth.py\n@@ -26,6 +26,10 @@\n from privacyidea.lib.policydecorators import libpolicy, login_mode\n from privacyidea.lib.crypto import hash_with_pepper, verify_with_pepper\n from privacyidea.lib.utils import fetch_one_resource\n+import logging\n+\n+log = logging.getLogger(__name__)\n+\n \n \n class ROLE(object):\n@@ -125,10 +129,13 @@\n \n if check_otp:\n # check if the given password matches an OTP token\n- check, details = check_user_pass(user_obj, password, options=options)\n- details[\"loginmode\"] = \"privacyIDEA\"\n- if check:\n- user_auth = True\n+ try:\n+ check, details = check_user_pass(user_obj, password, options=options)\n+ details[\"loginmode\"] = \"privacyIDEA\"\n+ if check:\n+ user_auth = True\n+ except Exception as e:\n+ log.debug(\"Error authenticating user against privacyIDEA: {0!r}\".format(e))\n else:\n # check the password of the user against the userstore\n if user_obj.check_password(password):\n", "issue": "Error 904 is still shown with policy Action login_mode: privacyIDEA \n\r\nprivacyidea 3.8\r\nubuntu 20\r\n\r\nSince 3.7 the error message :\u00b4ERR904: The user can not be found in any resolver in this realm!\u00a8 is deactivated when you enter a wrong username.\r\n\r\nBut this is still true if you have a policy :\r\nScope webui\r\nAction login_mode: privacyIDEA \r\n\r\nif you have a policy:\r\nScope webui\r\nlogin_mode: userstore \r\nThen the error (ERR904: The user can not be found in any resolver in this realm!) Is deactivated.\r\n\r\nKind regards\r\nSebastien\r\n\r\n\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# 2015-11-03 Cornelius K\u00f6lbel <[email protected]>\n# Add check if an admin user exists\n# 2014-12-15 Cornelius K\u00f6lbel, [email protected]\n# Initial creation\n#\n# (c) Cornelius K\u00f6lbel\n# Info: http://www.privacyidea.org\n#\n# This code is free software; you can redistribute it and/or\n# modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE\n# License as published by the Free Software Foundation; either\n# version 3 of the License, or any later version.\n#\n# This code is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU AFFERO GENERAL PUBLIC LICENSE for more details.\n#\n# You should have received a copy of the GNU Affero General Public\n# License along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\nfrom privacyidea.models import Admin\nfrom privacyidea.lib.token import check_user_pass\nfrom privacyidea.lib.policydecorators import libpolicy, login_mode\nfrom privacyidea.lib.crypto import hash_with_pepper, verify_with_pepper\nfrom privacyidea.lib.utils import fetch_one_resource\n\n\nclass ROLE(object):\n ADMIN = \"admin\"\n USER = \"user\"\n VALIDATE = \"validate\"\n\n\ndef verify_db_admin(username, password):\n \"\"\"\n This function is used to verify the username and the password against the\n database table \"Admin\".\n :param username: The administrator username\n :param password: The password\n :return: True if password is correct for the admin\n :rtype: bool\n \"\"\"\n success = False\n qa = Admin.query.filter(Admin.username == username).first()\n if qa:\n success = verify_with_pepper(qa.password, password)\n\n return success\n\n\ndef db_admin_exist(username):\n \"\"\"\n Checks if a local admin in the database exists\n\n :param username: The username of the admin\n :return: True, if exist\n \"\"\"\n return bool(get_db_admin(username))\n\n\ndef create_db_admin(app, username, email=None, password=None):\n pw_dig = None\n if password:\n pw_dig = hash_with_pepper(password)\n user = Admin(email=email, username=username, password=pw_dig)\n user.save()\n\n\ndef list_db_admin():\n admins = Admin.query.all()\n print(\"Name \\t email\")\n print(30*\"=\")\n for admin in admins:\n print(\"{0!s} \\t {1!s}\".format(admin.username, admin.email))\n\n\ndef get_db_admins():\n admins = Admin.query.all()\n return admins\n\n\ndef get_db_admin(username):\n return Admin.query.filter(Admin.username == username).first()\n\n\ndef delete_db_admin(username):\n print(\"Deleting admin {0!s}\".format(username))\n fetch_one_resource(Admin, username=username).delete()\n\n\n@libpolicy(login_mode)\ndef check_webui_user(user_obj,\n password,\n options=None,\n superuser_realms=None,\n check_otp=False):\n \"\"\"\n This function is used to authenticate the user at the web ui.\n It checks against the userstore or against OTP/privacyidea (check_otp).\n It returns a tuple of\n\n * true/false if the user authenticated successfully\n * the role of the user\n * the \"detail\" dictionary of the response\n\n :param user_obj: The user who tries to authenticate\n :type user_obj: User Object\n :param password: Password, static and or OTP\n :param options: additional options like g and clientip\n :type options: dict\n :param superuser_realms: list of realms, that contain admins\n :type superuser_realms: list\n :param check_otp: If set, the user is not authenticated against the\n userstore but against privacyidea\n :return: tuple of bool, string and dict/None\n \"\"\"\n options = options or {}\n superuser_realms = superuser_realms or []\n user_auth = False\n role = ROLE.USER\n details = None\n\n if check_otp:\n # check if the given password matches an OTP token\n check, details = check_user_pass(user_obj, password, options=options)\n details[\"loginmode\"] = \"privacyIDEA\"\n if check:\n user_auth = True\n else:\n # check the password of the user against the userstore\n if user_obj.check_password(password):\n user_auth = True\n\n # If the realm is in the SUPERUSER_REALM then the authorization role\n # is risen to \"admin\".\n if user_obj.realm in superuser_realms:\n role = ROLE.ADMIN\n\n return user_auth, role, details\n", "path": "privacyidea/lib/auth.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# 2015-11-03 Cornelius K\u00f6lbel <[email protected]>\n# Add check if an admin user exists\n# 2014-12-15 Cornelius K\u00f6lbel, [email protected]\n# Initial creation\n#\n# (c) Cornelius K\u00f6lbel\n# Info: http://www.privacyidea.org\n#\n# This code is free software; you can redistribute it and/or\n# modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE\n# License as published by the Free Software Foundation; either\n# version 3 of the License, or any later version.\n#\n# This code is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU AFFERO GENERAL PUBLIC LICENSE for more details.\n#\n# You should have received a copy of the GNU Affero General Public\n# License along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\nfrom privacyidea.models import Admin\nfrom privacyidea.lib.token import check_user_pass\nfrom privacyidea.lib.policydecorators import libpolicy, login_mode\nfrom privacyidea.lib.crypto import hash_with_pepper, verify_with_pepper\nfrom privacyidea.lib.utils import fetch_one_resource\nimport logging\n\nlog = logging.getLogger(__name__)\n\n\n\nclass ROLE(object):\n ADMIN = \"admin\"\n USER = \"user\"\n VALIDATE = \"validate\"\n\n\ndef verify_db_admin(username, password):\n \"\"\"\n This function is used to verify the username and the password against the\n database table \"Admin\".\n :param username: The administrator username\n :param password: The password\n :return: True if password is correct for the admin\n :rtype: bool\n \"\"\"\n success = False\n qa = Admin.query.filter(Admin.username == username).first()\n if qa:\n success = verify_with_pepper(qa.password, password)\n\n return success\n\n\ndef db_admin_exist(username):\n \"\"\"\n Checks if a local admin in the database exists\n\n :param username: The username of the admin\n :return: True, if exist\n \"\"\"\n return bool(get_db_admin(username))\n\n\ndef create_db_admin(app, username, email=None, password=None):\n pw_dig = None\n if password:\n pw_dig = hash_with_pepper(password)\n user = Admin(email=email, username=username, password=pw_dig)\n user.save()\n\n\ndef list_db_admin():\n admins = Admin.query.all()\n print(\"Name \\t email\")\n print(30*\"=\")\n for admin in admins:\n print(\"{0!s} \\t {1!s}\".format(admin.username, admin.email))\n\n\ndef get_db_admins():\n admins = Admin.query.all()\n return admins\n\n\ndef get_db_admin(username):\n return Admin.query.filter(Admin.username == username).first()\n\n\ndef delete_db_admin(username):\n print(\"Deleting admin {0!s}\".format(username))\n fetch_one_resource(Admin, username=username).delete()\n\n\n@libpolicy(login_mode)\ndef check_webui_user(user_obj,\n password,\n options=None,\n superuser_realms=None,\n check_otp=False):\n \"\"\"\n This function is used to authenticate the user at the web ui.\n It checks against the userstore or against OTP/privacyidea (check_otp).\n It returns a tuple of\n\n * true/false if the user authenticated successfully\n * the role of the user\n * the \"detail\" dictionary of the response\n\n :param user_obj: The user who tries to authenticate\n :type user_obj: User Object\n :param password: Password, static and or OTP\n :param options: additional options like g and clientip\n :type options: dict\n :param superuser_realms: list of realms, that contain admins\n :type superuser_realms: list\n :param check_otp: If set, the user is not authenticated against the\n userstore but against privacyidea\n :return: tuple of bool, string and dict/None\n \"\"\"\n options = options or {}\n superuser_realms = superuser_realms or []\n user_auth = False\n role = ROLE.USER\n details = None\n\n if check_otp:\n # check if the given password matches an OTP token\n try:\n check, details = check_user_pass(user_obj, password, options=options)\n details[\"loginmode\"] = \"privacyIDEA\"\n if check:\n user_auth = True\n except Exception as e:\n log.debug(\"Error authenticating user against privacyIDEA: {0!r}\".format(e))\n else:\n # check the password of the user against the userstore\n if user_obj.check_password(password):\n user_auth = True\n\n # If the realm is in the SUPERUSER_REALM then the authorization role\n # is risen to \"admin\".\n if user_obj.realm in superuser_realms:\n role = ROLE.ADMIN\n\n return user_auth, role, details\n", "path": "privacyidea/lib/auth.py"}]}
| 1,801 | 283 |
gh_patches_debug_14761
|
rasdani/github-patches
|
git_diff
|
iterative__dvc-7965
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add TOML support for metrics
Right now, there is only TOML file support for params files. We need to add TOML support for metrics as well.
Here's a [link to the Discord question](https://discord.com/channels/485586884165107732/485596304961962003/865974923079319563) that brought this up.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `dvc/repo/metrics/show.py`
Content:
```
1 import logging
2 import os
3 from typing import List
4
5 from scmrepo.exceptions import SCMError
6
7 from dvc.fs.dvc import DvcFileSystem
8 from dvc.output import Output
9 from dvc.repo import locked
10 from dvc.repo.collect import StrPaths, collect
11 from dvc.repo.live import summary_fs_path
12 from dvc.scm import NoSCMError
13 from dvc.utils import error_handler, errored_revisions, onerror_collect
14 from dvc.utils.collections import ensure_list
15 from dvc.utils.serialize import load_yaml
16
17 logger = logging.getLogger(__name__)
18
19
20 def _is_metric(out: Output) -> bool:
21 return bool(out.metric) or bool(out.live)
22
23
24 def _to_fs_paths(metrics: List[Output]) -> StrPaths:
25 result = []
26 for out in metrics:
27 if out.metric:
28 result.append(out.repo.dvcfs.from_os_path(out.fs_path))
29 elif out.live:
30 fs_path = summary_fs_path(out)
31 if fs_path:
32 result.append(out.repo.dvcfs.from_os_path(fs_path))
33 return result
34
35
36 def _collect_metrics(repo, targets, revision, recursive):
37 metrics, fs_paths = collect(
38 repo,
39 targets=targets,
40 output_filter=_is_metric,
41 recursive=recursive,
42 rev=revision,
43 )
44 return _to_fs_paths(metrics) + list(fs_paths)
45
46
47 def _extract_metrics(metrics, path, rev):
48 if isinstance(metrics, (int, float)):
49 return metrics
50
51 if not isinstance(metrics, dict):
52 return None
53
54 ret = {}
55 for key, val in metrics.items():
56 m = _extract_metrics(val, path, rev)
57 if m not in (None, {}):
58 ret[key] = m
59 else:
60 logger.debug(
61 "Could not parse '%s' metric from '%s' at '%s' "
62 "due to its unsupported type: '%s'",
63 key,
64 path,
65 rev,
66 type(val).__name__,
67 )
68
69 return ret
70
71
72 @error_handler
73 def _read_metric(path, fs, rev, **kwargs):
74 val = load_yaml(path, fs=fs)
75 val = _extract_metrics(val, path, rev)
76 return val or {}
77
78
79 def _read_metrics(repo, metrics, rev, onerror=None):
80 fs = DvcFileSystem(repo=repo)
81
82 relpath = ""
83 if repo.root_dir != repo.fs.path.getcwd():
84 relpath = repo.fs.path.relpath(repo.root_dir, repo.fs.path.getcwd())
85
86 res = {}
87 for metric in metrics:
88 if not fs.isfile(metric):
89 continue
90
91 res[os.path.join(relpath, *fs.path.parts(metric))] = _read_metric(
92 metric, fs, rev, onerror=onerror
93 )
94
95 return res
96
97
98 def _gather_metrics(repo, targets, rev, recursive, onerror=None):
99 metrics = _collect_metrics(repo, targets, rev, recursive)
100 return _read_metrics(repo, metrics, rev, onerror=onerror)
101
102
103 @locked
104 def show(
105 repo,
106 targets=None,
107 all_branches=False,
108 all_tags=False,
109 recursive=False,
110 revs=None,
111 all_commits=False,
112 onerror=None,
113 ):
114 if onerror is None:
115 onerror = onerror_collect
116
117 targets = ensure_list(targets)
118 targets = [repo.dvcfs.from_os_path(target) for target in targets]
119
120 res = {}
121 for rev in repo.brancher(
122 revs=revs,
123 all_branches=all_branches,
124 all_tags=all_tags,
125 all_commits=all_commits,
126 ):
127 res[rev] = error_handler(_gather_metrics)(
128 repo, targets, rev, recursive, onerror=onerror
129 )
130
131 # Hide workspace metrics if they are the same as in the active branch
132 try:
133 active_branch = repo.scm.active_branch()
134 except (SCMError, NoSCMError):
135 # SCMError - detached head
136 # NoSCMError - no repo case
137 pass
138 else:
139 if res.get("workspace") == res.get(active_branch):
140 res.pop("workspace", None)
141
142 errored = errored_revisions(res)
143 if errored:
144 from dvc.ui import ui
145
146 ui.error_write(
147 "DVC failed to load some metrics for following revisions:"
148 f" '{', '.join(errored)}'."
149 )
150
151 return res
152
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/dvc/repo/metrics/show.py b/dvc/repo/metrics/show.py
--- a/dvc/repo/metrics/show.py
+++ b/dvc/repo/metrics/show.py
@@ -12,7 +12,7 @@
from dvc.scm import NoSCMError
from dvc.utils import error_handler, errored_revisions, onerror_collect
from dvc.utils.collections import ensure_list
-from dvc.utils.serialize import load_yaml
+from dvc.utils.serialize import LOADERS
logger = logging.getLogger(__name__)
@@ -71,7 +71,9 @@
@error_handler
def _read_metric(path, fs, rev, **kwargs):
- val = load_yaml(path, fs=fs)
+ suffix = fs.path.suffix(path).lower()
+ loader = LOADERS[suffix]
+ val = loader(path, fs=fs)
val = _extract_metrics(val, path, rev)
return val or {}
|
{"golden_diff": "diff --git a/dvc/repo/metrics/show.py b/dvc/repo/metrics/show.py\n--- a/dvc/repo/metrics/show.py\n+++ b/dvc/repo/metrics/show.py\n@@ -12,7 +12,7 @@\n from dvc.scm import NoSCMError\n from dvc.utils import error_handler, errored_revisions, onerror_collect\n from dvc.utils.collections import ensure_list\n-from dvc.utils.serialize import load_yaml\n+from dvc.utils.serialize import LOADERS\n \n logger = logging.getLogger(__name__)\n \n@@ -71,7 +71,9 @@\n \n @error_handler\n def _read_metric(path, fs, rev, **kwargs):\n- val = load_yaml(path, fs=fs)\n+ suffix = fs.path.suffix(path).lower()\n+ loader = LOADERS[suffix]\n+ val = loader(path, fs=fs)\n val = _extract_metrics(val, path, rev)\n return val or {}\n", "issue": "Add TOML support for metrics\nRight now, there is only TOML file support for params files. We need to add TOML support for metrics as well.\r\n\r\nHere's a [link to the Discord question](https://discord.com/channels/485586884165107732/485596304961962003/865974923079319563) that brought this up.\n", "before_files": [{"content": "import logging\nimport os\nfrom typing import List\n\nfrom scmrepo.exceptions import SCMError\n\nfrom dvc.fs.dvc import DvcFileSystem\nfrom dvc.output import Output\nfrom dvc.repo import locked\nfrom dvc.repo.collect import StrPaths, collect\nfrom dvc.repo.live import summary_fs_path\nfrom dvc.scm import NoSCMError\nfrom dvc.utils import error_handler, errored_revisions, onerror_collect\nfrom dvc.utils.collections import ensure_list\nfrom dvc.utils.serialize import load_yaml\n\nlogger = logging.getLogger(__name__)\n\n\ndef _is_metric(out: Output) -> bool:\n return bool(out.metric) or bool(out.live)\n\n\ndef _to_fs_paths(metrics: List[Output]) -> StrPaths:\n result = []\n for out in metrics:\n if out.metric:\n result.append(out.repo.dvcfs.from_os_path(out.fs_path))\n elif out.live:\n fs_path = summary_fs_path(out)\n if fs_path:\n result.append(out.repo.dvcfs.from_os_path(fs_path))\n return result\n\n\ndef _collect_metrics(repo, targets, revision, recursive):\n metrics, fs_paths = collect(\n repo,\n targets=targets,\n output_filter=_is_metric,\n recursive=recursive,\n rev=revision,\n )\n return _to_fs_paths(metrics) + list(fs_paths)\n\n\ndef _extract_metrics(metrics, path, rev):\n if isinstance(metrics, (int, float)):\n return metrics\n\n if not isinstance(metrics, dict):\n return None\n\n ret = {}\n for key, val in metrics.items():\n m = _extract_metrics(val, path, rev)\n if m not in (None, {}):\n ret[key] = m\n else:\n logger.debug(\n \"Could not parse '%s' metric from '%s' at '%s' \"\n \"due to its unsupported type: '%s'\",\n key,\n path,\n rev,\n type(val).__name__,\n )\n\n return ret\n\n\n@error_handler\ndef _read_metric(path, fs, rev, **kwargs):\n val = load_yaml(path, fs=fs)\n val = _extract_metrics(val, path, rev)\n return val or {}\n\n\ndef _read_metrics(repo, metrics, rev, onerror=None):\n fs = DvcFileSystem(repo=repo)\n\n relpath = \"\"\n if repo.root_dir != repo.fs.path.getcwd():\n relpath = repo.fs.path.relpath(repo.root_dir, repo.fs.path.getcwd())\n\n res = {}\n for metric in metrics:\n if not fs.isfile(metric):\n continue\n\n res[os.path.join(relpath, *fs.path.parts(metric))] = _read_metric(\n metric, fs, rev, onerror=onerror\n )\n\n return res\n\n\ndef _gather_metrics(repo, targets, rev, recursive, onerror=None):\n metrics = _collect_metrics(repo, targets, rev, recursive)\n return _read_metrics(repo, metrics, rev, onerror=onerror)\n\n\n@locked\ndef show(\n repo,\n targets=None,\n all_branches=False,\n all_tags=False,\n recursive=False,\n revs=None,\n all_commits=False,\n onerror=None,\n):\n if onerror is None:\n onerror = onerror_collect\n\n targets = ensure_list(targets)\n targets = [repo.dvcfs.from_os_path(target) for target in targets]\n\n res = {}\n for rev in repo.brancher(\n revs=revs,\n all_branches=all_branches,\n all_tags=all_tags,\n all_commits=all_commits,\n ):\n res[rev] = error_handler(_gather_metrics)(\n repo, targets, rev, recursive, onerror=onerror\n )\n\n # Hide workspace metrics if they are the same as in the active branch\n try:\n active_branch = repo.scm.active_branch()\n except (SCMError, NoSCMError):\n # SCMError - detached head\n # NoSCMError - no repo case\n pass\n else:\n if res.get(\"workspace\") == res.get(active_branch):\n res.pop(\"workspace\", None)\n\n errored = errored_revisions(res)\n if errored:\n from dvc.ui import ui\n\n ui.error_write(\n \"DVC failed to load some metrics for following revisions:\"\n f\" '{', '.join(errored)}'.\"\n )\n\n return res\n", "path": "dvc/repo/metrics/show.py"}], "after_files": [{"content": "import logging\nimport os\nfrom typing import List\n\nfrom scmrepo.exceptions import SCMError\n\nfrom dvc.fs.dvc import DvcFileSystem\nfrom dvc.output import Output\nfrom dvc.repo import locked\nfrom dvc.repo.collect import StrPaths, collect\nfrom dvc.repo.live import summary_fs_path\nfrom dvc.scm import NoSCMError\nfrom dvc.utils import error_handler, errored_revisions, onerror_collect\nfrom dvc.utils.collections import ensure_list\nfrom dvc.utils.serialize import LOADERS\n\nlogger = logging.getLogger(__name__)\n\n\ndef _is_metric(out: Output) -> bool:\n return bool(out.metric) or bool(out.live)\n\n\ndef _to_fs_paths(metrics: List[Output]) -> StrPaths:\n result = []\n for out in metrics:\n if out.metric:\n result.append(out.repo.dvcfs.from_os_path(out.fs_path))\n elif out.live:\n fs_path = summary_fs_path(out)\n if fs_path:\n result.append(out.repo.dvcfs.from_os_path(fs_path))\n return result\n\n\ndef _collect_metrics(repo, targets, revision, recursive):\n metrics, fs_paths = collect(\n repo,\n targets=targets,\n output_filter=_is_metric,\n recursive=recursive,\n rev=revision,\n )\n return _to_fs_paths(metrics) + list(fs_paths)\n\n\ndef _extract_metrics(metrics, path, rev):\n if isinstance(metrics, (int, float)):\n return metrics\n\n if not isinstance(metrics, dict):\n return None\n\n ret = {}\n for key, val in metrics.items():\n m = _extract_metrics(val, path, rev)\n if m not in (None, {}):\n ret[key] = m\n else:\n logger.debug(\n \"Could not parse '%s' metric from '%s' at '%s' \"\n \"due to its unsupported type: '%s'\",\n key,\n path,\n rev,\n type(val).__name__,\n )\n\n return ret\n\n\n@error_handler\ndef _read_metric(path, fs, rev, **kwargs):\n suffix = fs.path.suffix(path).lower()\n loader = LOADERS[suffix]\n val = loader(path, fs=fs)\n val = _extract_metrics(val, path, rev)\n return val or {}\n\n\ndef _read_metrics(repo, metrics, rev, onerror=None):\n fs = DvcFileSystem(repo=repo)\n\n relpath = \"\"\n if repo.root_dir != repo.fs.path.getcwd():\n relpath = repo.fs.path.relpath(repo.root_dir, repo.fs.path.getcwd())\n\n res = {}\n for metric in metrics:\n if not fs.isfile(metric):\n continue\n\n res[os.path.join(relpath, *fs.path.parts(metric))] = _read_metric(\n metric, fs, rev, onerror=onerror\n )\n\n return res\n\n\ndef _gather_metrics(repo, targets, rev, recursive, onerror=None):\n metrics = _collect_metrics(repo, targets, rev, recursive)\n return _read_metrics(repo, metrics, rev, onerror=onerror)\n\n\n@locked\ndef show(\n repo,\n targets=None,\n all_branches=False,\n all_tags=False,\n recursive=False,\n revs=None,\n all_commits=False,\n onerror=None,\n):\n if onerror is None:\n onerror = onerror_collect\n\n targets = ensure_list(targets)\n targets = [repo.dvcfs.from_os_path(target) for target in targets]\n\n res = {}\n for rev in repo.brancher(\n revs=revs,\n all_branches=all_branches,\n all_tags=all_tags,\n all_commits=all_commits,\n ):\n res[rev] = error_handler(_gather_metrics)(\n repo, targets, rev, recursive, onerror=onerror\n )\n\n # Hide workspace metrics if they are the same as in the active branch\n try:\n active_branch = repo.scm.active_branch()\n except (SCMError, NoSCMError):\n # SCMError - detached head\n # NoSCMError - no repo case\n pass\n else:\n if res.get(\"workspace\") == res.get(active_branch):\n res.pop(\"workspace\", None)\n\n errored = errored_revisions(res)\n if errored:\n from dvc.ui import ui\n\n ui.error_write(\n \"DVC failed to load some metrics for following revisions:\"\n f\" '{', '.join(errored)}'.\"\n )\n\n return res\n", "path": "dvc/repo/metrics/show.py"}]}
| 1,684 | 211 |
gh_patches_debug_31073
|
rasdani/github-patches
|
git_diff
|
fossasia__open-event-server-4162
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Ticket-tag: remove GET for /ticket-tags
Parent issue #4101.
Related issue: #4119.
Make `/ticket-tags` POST only.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `app/api/ticket_tags.py`
Content:
```
1 from flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship
2 from marshmallow_jsonapi.flask import Schema, Relationship
3 from marshmallow_jsonapi import fields
4 from sqlalchemy.orm.exc import NoResultFound
5 from flask_rest_jsonapi.exceptions import ObjectNotFound
6
7 from app.api.helpers.utilities import dasherize
8 from app.api.helpers.permissions import jwt_required
9 from app.models import db
10 from app.models.ticket import Ticket, TicketTag, ticket_tags_table
11 from app.models.event import Event
12 from app.api.helpers.db import safe_query
13 from app.api.helpers.utilities import require_relationship
14 from app.api.helpers.exceptions import ForbiddenException
15 from app.api.helpers.permission_manager import has_access
16
17
18 class TicketTagSchema(Schema):
19 """
20 Api schema for TicketTag Model
21 """
22
23 class Meta:
24 """
25 Meta class for TicketTag Api Schema
26 """
27 type_ = 'ticket-tag'
28 self_view = 'v1.ticket_tag_detail'
29 self_view_kwargs = {'id': '<id>'}
30 inflect = dasherize
31
32 id = fields.Str(dump_only=True)
33 name = fields.Str(allow_none=True)
34 tickets = Relationship(attribute='tickets',
35 self_view='v1.ticket_tag_ticket',
36 self_view_kwargs={'id': '<id>'},
37 related_view='v1.ticket_list',
38 related_view_kwargs={'ticket_tag_id': '<id>'},
39 schema='TicketSchema',
40 many=True,
41 type_='ticket')
42 event = Relationship(attribute='event',
43 self_view='v1.ticket_tag_event',
44 self_view_kwargs={'id': '<id>'},
45 related_view='v1.event_detail',
46 related_view_kwargs={'ticket_tag_id': '<id>'},
47 schema='EventSchema',
48 type_='event')
49
50
51 class TicketTagListPost(ResourceList):
52 """
53 List and create TicketTag
54 """
55 def before_post(self, args, kwargs, data):
56 """
57 before post method for checking required relationship
58 :param args:
59 :param kwargs:
60 :param data:
61 :return:
62 """
63 require_relationship(['event'], data)
64
65 if not has_access('is_coorganizer', event_id=data['event']):
66 raise ForbiddenException({'source': ''}, 'Co-organizer access is required.')
67
68 def after_create_object(self, obj, data, view_kwargs):
69 """
70 method to add ticket tags and ticket in association table
71 :param obj:
72 :param data:
73 :param view_kwargs:
74 :return:
75 """
76 if 'tickets' in data:
77 ticket_ids = data['tickets']
78 for ticket_id in ticket_ids:
79 try:
80 ticket = Ticket.query.filter_by(id=ticket_id).one()
81 except NoResultFound:
82 raise ObjectNotFound({'parameter': 'ticket_id'},
83 "Ticket: {} not found".format(ticket_id))
84 else:
85 ticket.tags.append(obj)
86 self.session.commit()
87
88 schema = TicketTagSchema
89 data_layer = {'session': db.session,
90 'model': TicketTag,
91 'methods': {
92 'after_create_object': after_create_object
93 }}
94
95
96 class TicketTagList(ResourceList):
97 """
98 List TicketTags based on event_id or ticket_id
99 """
100 def query(self, view_kwargs):
101 """
102 method to query Ticket tags based on different params
103 :param view_kwargs:
104 :return:
105 """
106 query_ = self.session.query(TicketTag)
107 if view_kwargs.get('ticket_id'):
108 ticket = safe_query(self, Ticket, 'id', view_kwargs['ticket_id'], 'ticket_id')
109 query_ = query_.join(ticket_tags_table).filter_by(ticket_id=ticket.id)
110 if view_kwargs.get('event_id'):
111 event = safe_query(self, Event, 'id', view_kwargs['event_id'], 'event_id')
112 query_ = query_.join(Event).filter(Event.id == event.id)
113 elif view_kwargs.get('event_identifier'):
114 event = safe_query(self, Event, 'identifier', view_kwargs['event_identifier'], 'event_identifier')
115 query_ = query_.join(Event).filter(Event.id == event.id)
116 return query_
117
118 view_kwargs = True
119 schema = TicketTagSchema
120 methods = ['GET', ]
121 data_layer = {'session': db.session,
122 'model': TicketTag,
123 'methods': {
124 'query': query
125 }}
126
127
128 class TicketTagDetail(ResourceDetail):
129 """
130 TicketTag detail by id
131 """
132 decorators = (jwt_required,)
133 schema = TicketTagSchema
134 data_layer = {'session': db.session,
135 'model': TicketTag}
136
137
138 class TicketTagRelationshipRequired(ResourceRelationship):
139 """
140 TicketTag Relationship
141 """
142 decorators = (jwt_required,)
143 methods = ['GET', 'PATCH']
144 schema = TicketTagSchema
145 data_layer = {'session': db.session,
146 'model': TicketTag}
147
148
149 class TicketTagRelationshipOptional(ResourceRelationship):
150 """
151 TicketTag Relationship
152 """
153 decorators = (jwt_required,)
154 schema = TicketTagSchema
155 data_layer = {'session': db.session,
156 'model': TicketTag}
157
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/app/api/ticket_tags.py b/app/api/ticket_tags.py
--- a/app/api/ticket_tags.py
+++ b/app/api/ticket_tags.py
@@ -1,8 +1,6 @@
from flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship
from marshmallow_jsonapi.flask import Schema, Relationship
from marshmallow_jsonapi import fields
-from sqlalchemy.orm.exc import NoResultFound
-from flask_rest_jsonapi.exceptions import ObjectNotFound
from app.api.helpers.utilities import dasherize
from app.api.helpers.permissions import jwt_required
@@ -65,32 +63,10 @@
if not has_access('is_coorganizer', event_id=data['event']):
raise ForbiddenException({'source': ''}, 'Co-organizer access is required.')
- def after_create_object(self, obj, data, view_kwargs):
- """
- method to add ticket tags and ticket in association table
- :param obj:
- :param data:
- :param view_kwargs:
- :return:
- """
- if 'tickets' in data:
- ticket_ids = data['tickets']
- for ticket_id in ticket_ids:
- try:
- ticket = Ticket.query.filter_by(id=ticket_id).one()
- except NoResultFound:
- raise ObjectNotFound({'parameter': 'ticket_id'},
- "Ticket: {} not found".format(ticket_id))
- else:
- ticket.tags.append(obj)
- self.session.commit()
-
schema = TicketTagSchema
+ methods = ['POST', ]
data_layer = {'session': db.session,
- 'model': TicketTag,
- 'methods': {
- 'after_create_object': after_create_object
- }}
+ 'model': TicketTag}
class TicketTagList(ResourceList):
|
{"golden_diff": "diff --git a/app/api/ticket_tags.py b/app/api/ticket_tags.py\n--- a/app/api/ticket_tags.py\n+++ b/app/api/ticket_tags.py\n@@ -1,8 +1,6 @@\n from flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship\n from marshmallow_jsonapi.flask import Schema, Relationship\n from marshmallow_jsonapi import fields\n-from sqlalchemy.orm.exc import NoResultFound\n-from flask_rest_jsonapi.exceptions import ObjectNotFound\n \n from app.api.helpers.utilities import dasherize\n from app.api.helpers.permissions import jwt_required\n@@ -65,32 +63,10 @@\n if not has_access('is_coorganizer', event_id=data['event']):\n raise ForbiddenException({'source': ''}, 'Co-organizer access is required.')\n \n- def after_create_object(self, obj, data, view_kwargs):\n- \"\"\"\n- method to add ticket tags and ticket in association table\n- :param obj:\n- :param data:\n- :param view_kwargs:\n- :return:\n- \"\"\"\n- if 'tickets' in data:\n- ticket_ids = data['tickets']\n- for ticket_id in ticket_ids:\n- try:\n- ticket = Ticket.query.filter_by(id=ticket_id).one()\n- except NoResultFound:\n- raise ObjectNotFound({'parameter': 'ticket_id'},\n- \"Ticket: {} not found\".format(ticket_id))\n- else:\n- ticket.tags.append(obj)\n- self.session.commit()\n-\n schema = TicketTagSchema\n+ methods = ['POST', ]\n data_layer = {'session': db.session,\n- 'model': TicketTag,\n- 'methods': {\n- 'after_create_object': after_create_object\n- }}\n+ 'model': TicketTag}\n \n \n class TicketTagList(ResourceList):\n", "issue": "Ticket-tag: remove GET for /ticket-tags \nParent issue #4101.\r\nRelated issue: #4119.\r\n\r\nMake `/ticket-tags` POST only.\n", "before_files": [{"content": "from flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship\nfrom marshmallow_jsonapi.flask import Schema, Relationship\nfrom marshmallow_jsonapi import fields\nfrom sqlalchemy.orm.exc import NoResultFound\nfrom flask_rest_jsonapi.exceptions import ObjectNotFound\n\nfrom app.api.helpers.utilities import dasherize\nfrom app.api.helpers.permissions import jwt_required\nfrom app.models import db\nfrom app.models.ticket import Ticket, TicketTag, ticket_tags_table\nfrom app.models.event import Event\nfrom app.api.helpers.db import safe_query\nfrom app.api.helpers.utilities import require_relationship\nfrom app.api.helpers.exceptions import ForbiddenException\nfrom app.api.helpers.permission_manager import has_access\n\n\nclass TicketTagSchema(Schema):\n \"\"\"\n Api schema for TicketTag Model\n \"\"\"\n\n class Meta:\n \"\"\"\n Meta class for TicketTag Api Schema\n \"\"\"\n type_ = 'ticket-tag'\n self_view = 'v1.ticket_tag_detail'\n self_view_kwargs = {'id': '<id>'}\n inflect = dasherize\n\n id = fields.Str(dump_only=True)\n name = fields.Str(allow_none=True)\n tickets = Relationship(attribute='tickets',\n self_view='v1.ticket_tag_ticket',\n self_view_kwargs={'id': '<id>'},\n related_view='v1.ticket_list',\n related_view_kwargs={'ticket_tag_id': '<id>'},\n schema='TicketSchema',\n many=True,\n type_='ticket')\n event = Relationship(attribute='event',\n self_view='v1.ticket_tag_event',\n self_view_kwargs={'id': '<id>'},\n related_view='v1.event_detail',\n related_view_kwargs={'ticket_tag_id': '<id>'},\n schema='EventSchema',\n type_='event')\n\n\nclass TicketTagListPost(ResourceList):\n \"\"\"\n List and create TicketTag\n \"\"\"\n def before_post(self, args, kwargs, data):\n \"\"\"\n before post method for checking required relationship\n :param args:\n :param kwargs:\n :param data:\n :return:\n \"\"\"\n require_relationship(['event'], data)\n\n if not has_access('is_coorganizer', event_id=data['event']):\n raise ForbiddenException({'source': ''}, 'Co-organizer access is required.')\n\n def after_create_object(self, obj, data, view_kwargs):\n \"\"\"\n method to add ticket tags and ticket in association table\n :param obj:\n :param data:\n :param view_kwargs:\n :return:\n \"\"\"\n if 'tickets' in data:\n ticket_ids = data['tickets']\n for ticket_id in ticket_ids:\n try:\n ticket = Ticket.query.filter_by(id=ticket_id).one()\n except NoResultFound:\n raise ObjectNotFound({'parameter': 'ticket_id'},\n \"Ticket: {} not found\".format(ticket_id))\n else:\n ticket.tags.append(obj)\n self.session.commit()\n\n schema = TicketTagSchema\n data_layer = {'session': db.session,\n 'model': TicketTag,\n 'methods': {\n 'after_create_object': after_create_object\n }}\n\n\nclass TicketTagList(ResourceList):\n \"\"\"\n List TicketTags based on event_id or ticket_id\n \"\"\"\n def query(self, view_kwargs):\n \"\"\"\n method to query Ticket tags based on different params\n :param view_kwargs:\n :return:\n \"\"\"\n query_ = self.session.query(TicketTag)\n if view_kwargs.get('ticket_id'):\n ticket = safe_query(self, Ticket, 'id', view_kwargs['ticket_id'], 'ticket_id')\n query_ = query_.join(ticket_tags_table).filter_by(ticket_id=ticket.id)\n if view_kwargs.get('event_id'):\n event = safe_query(self, Event, 'id', view_kwargs['event_id'], 'event_id')\n query_ = query_.join(Event).filter(Event.id == event.id)\n elif view_kwargs.get('event_identifier'):\n event = safe_query(self, Event, 'identifier', view_kwargs['event_identifier'], 'event_identifier')\n query_ = query_.join(Event).filter(Event.id == event.id)\n return query_\n\n view_kwargs = True\n schema = TicketTagSchema\n methods = ['GET', ]\n data_layer = {'session': db.session,\n 'model': TicketTag,\n 'methods': {\n 'query': query\n }}\n\n\nclass TicketTagDetail(ResourceDetail):\n \"\"\"\n TicketTag detail by id\n \"\"\"\n decorators = (jwt_required,)\n schema = TicketTagSchema\n data_layer = {'session': db.session,\n 'model': TicketTag}\n\n\nclass TicketTagRelationshipRequired(ResourceRelationship):\n \"\"\"\n TicketTag Relationship\n \"\"\"\n decorators = (jwt_required,)\n methods = ['GET', 'PATCH']\n schema = TicketTagSchema\n data_layer = {'session': db.session,\n 'model': TicketTag}\n\n\nclass TicketTagRelationshipOptional(ResourceRelationship):\n \"\"\"\n TicketTag Relationship\n \"\"\"\n decorators = (jwt_required,)\n schema = TicketTagSchema\n data_layer = {'session': db.session,\n 'model': TicketTag}\n", "path": "app/api/ticket_tags.py"}], "after_files": [{"content": "from flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship\nfrom marshmallow_jsonapi.flask import Schema, Relationship\nfrom marshmallow_jsonapi import fields\n\nfrom app.api.helpers.utilities import dasherize\nfrom app.api.helpers.permissions import jwt_required\nfrom app.models import db\nfrom app.models.ticket import Ticket, TicketTag, ticket_tags_table\nfrom app.models.event import Event\nfrom app.api.helpers.db import safe_query\nfrom app.api.helpers.utilities import require_relationship\nfrom app.api.helpers.exceptions import ForbiddenException\nfrom app.api.helpers.permission_manager import has_access\n\n\nclass TicketTagSchema(Schema):\n \"\"\"\n Api schema for TicketTag Model\n \"\"\"\n\n class Meta:\n \"\"\"\n Meta class for TicketTag Api Schema\n \"\"\"\n type_ = 'ticket-tag'\n self_view = 'v1.ticket_tag_detail'\n self_view_kwargs = {'id': '<id>'}\n inflect = dasherize\n\n id = fields.Str(dump_only=True)\n name = fields.Str(allow_none=True)\n tickets = Relationship(attribute='tickets',\n self_view='v1.ticket_tag_ticket',\n self_view_kwargs={'id': '<id>'},\n related_view='v1.ticket_list',\n related_view_kwargs={'ticket_tag_id': '<id>'},\n schema='TicketSchema',\n many=True,\n type_='ticket')\n event = Relationship(attribute='event',\n self_view='v1.ticket_tag_event',\n self_view_kwargs={'id': '<id>'},\n related_view='v1.event_detail',\n related_view_kwargs={'ticket_tag_id': '<id>'},\n schema='EventSchema',\n type_='event')\n\n\nclass TicketTagListPost(ResourceList):\n \"\"\"\n List and create TicketTag\n \"\"\"\n def before_post(self, args, kwargs, data):\n \"\"\"\n before post method for checking required relationship\n :param args:\n :param kwargs:\n :param data:\n :return:\n \"\"\"\n require_relationship(['event'], data)\n\n if not has_access('is_coorganizer', event_id=data['event']):\n raise ForbiddenException({'source': ''}, 'Co-organizer access is required.')\n\n schema = TicketTagSchema\n methods = ['POST', ]\n data_layer = {'session': db.session,\n 'model': TicketTag}\n\n\nclass TicketTagList(ResourceList):\n \"\"\"\n List TicketTags based on event_id or ticket_id\n \"\"\"\n def query(self, view_kwargs):\n \"\"\"\n method to query Ticket tags based on different params\n :param view_kwargs:\n :return:\n \"\"\"\n query_ = self.session.query(TicketTag)\n if view_kwargs.get('ticket_id'):\n ticket = safe_query(self, Ticket, 'id', view_kwargs['ticket_id'], 'ticket_id')\n query_ = query_.join(ticket_tags_table).filter_by(ticket_id=ticket.id)\n if view_kwargs.get('event_id'):\n event = safe_query(self, Event, 'id', view_kwargs['event_id'], 'event_id')\n query_ = query_.join(Event).filter(Event.id == event.id)\n elif view_kwargs.get('event_identifier'):\n event = safe_query(self, Event, 'identifier', view_kwargs['event_identifier'], 'event_identifier')\n query_ = query_.join(Event).filter(Event.id == event.id)\n return query_\n\n view_kwargs = True\n schema = TicketTagSchema\n methods = ['GET', ]\n data_layer = {'session': db.session,\n 'model': TicketTag,\n 'methods': {\n 'query': query\n }}\n\n\nclass TicketTagDetail(ResourceDetail):\n \"\"\"\n TicketTag detail by id\n \"\"\"\n decorators = (jwt_required,)\n schema = TicketTagSchema\n data_layer = {'session': db.session,\n 'model': TicketTag}\n\n\nclass TicketTagRelationshipRequired(ResourceRelationship):\n \"\"\"\n TicketTag Relationship\n \"\"\"\n decorators = (jwt_required,)\n methods = ['GET', 'PATCH']\n schema = TicketTagSchema\n data_layer = {'session': db.session,\n 'model': TicketTag}\n\n\nclass TicketTagRelationshipOptional(ResourceRelationship):\n \"\"\"\n TicketTag Relationship\n \"\"\"\n decorators = (jwt_required,)\n schema = TicketTagSchema\n data_layer = {'session': db.session,\n 'model': TicketTag}\n", "path": "app/api/ticket_tags.py"}]}
| 1,737 | 392 |
gh_patches_debug_27001
|
rasdani/github-patches
|
git_diff
|
Kinto__kinto-1015
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
updated default kinto config breaks docker (kinto server not reacheable from host)
In e4e70fb, the default listen ip has been changed to 127.0.0.1.
I couldn't manage to export the port using docker, using this command from the [install documentation](http://kinto.readthedocs.io/en/latest/tutorials/install.html#environment-variables):
```
docker run --env-file kinto.env --link kinto_db:db -p 127.0.0.1:8888:8888 --name kinto_web croco/kinto:latest
```
For instance, when I do `curl http://127.0.0.1:8888` from the host I get `Recv failure: Connection reset by peer`.
When I reverted kinto.ini to listen on 0.0.0.0, curl from the host succeeds.
Maybe there is an environment variable to override the `host` entry in the `server:main` section? Then it would be fine, provided an updated documentation...
This is not visible with the published kinto/kinto-server image, since it's still 5.1. I built my own from master because I needed an armhf image.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kinto/config/__init__.py`
Content:
```
1 import os
2 import codecs
3 from time import strftime
4
5 from kinto.core import utils as core_utils
6
7 from kinto import logger
8 from kinto import __version__
9
10 HERE = os.path.abspath(os.path.dirname(__file__))
11
12
13 def render_template(template, destination, **kwargs):
14 template = os.path.join(HERE, template)
15 folder = os.path.dirname(destination)
16
17 if folder and not os.path.exists(folder):
18 os.makedirs(folder)
19
20 logger.info("Created config {}".format(os.path.abspath(destination)))
21
22 with codecs.open(template, 'r', encoding='utf-8') as f:
23 raw_template = f.read()
24 rendered = raw_template.format(**kwargs)
25 with codecs.open(destination, 'w+', encoding='utf-8') as output:
26 output.write(rendered)
27
28
29 def init(config_file, backend):
30 values = {}
31
32 values['secret'] = core_utils.random_bytes_hex(32)
33
34 values['kinto_version'] = __version__
35 values['config_file_timestamp'] = core_utils._encoded(
36 strftime('%a, %d %b %Y %H:%M:%S %z'))
37
38 values['storage_backend'] = "kinto.core.storage.%s" % backend
39 values['cache_backend'] = "kinto.core.cache.%s" % backend
40 values['permission_backend'] = "kinto.core.permission.%s" % backend
41
42 if backend == 'postgresql':
43 postgresql_url = "postgres://postgres:postgres@localhost/postgres"
44 values['storage_url'] = postgresql_url
45 values['cache_url'] = postgresql_url
46 values['permission_url'] = postgresql_url
47
48 elif backend == 'redis':
49 redis_url = "redis://localhost:6379"
50 values['storage_backend'] = "kinto_redis.storage"
51 values['cache_backend'] = "kinto_redis.cache"
52 values['permission_backend'] = "kinto_redis.permission"
53
54 values['storage_url'] = redis_url + "/1"
55 values['cache_url'] = redis_url + "/2"
56 values['permission_url'] = redis_url + "/3"
57
58 else:
59 values['storage_url'] = ''
60 values['cache_url'] = ''
61 values['permission_url'] = ''
62
63 render_template("kinto.tpl", config_file, **values)
64
```
Path: `kinto/__main__.py`
Content:
```
1 from __future__ import print_function
2 import argparse
3 import os
4 import sys
5 import logging
6 import logging.config
7 from six.moves import input
8
9 from kinto.core import scripts
10 from pyramid.scripts import pserve
11 from pyramid.paster import bootstrap
12 from kinto import __version__
13 from kinto.config import init
14
15 DEFAULT_CONFIG_FILE = 'config/kinto.ini'
16 DEFAULT_PORT = 8888
17 DEFAULT_LOG_LEVEL = logging.INFO
18 DEFAULT_LOG_FORMAT = "%(levelname)-5.5s %(message)s"
19
20
21 def main(args=None):
22 """The main routine."""
23 if args is None:
24 args = sys.argv[1:]
25
26 parser = argparse.ArgumentParser(description="Kinto Command-Line "
27 "Interface")
28 # XXX: deprecate this option, unnatural as first argument.
29 parser.add_argument('--ini',
30 help='Application configuration file',
31 dest='ini_file',
32 required=False,
33 default=DEFAULT_CONFIG_FILE)
34
35 parser.add_argument('-q', '--quiet', action='store_const',
36 const=logging.CRITICAL, dest='verbosity',
37 help='Show only critical errors.')
38
39 parser.add_argument('--debug', action='store_const',
40 const=logging.DEBUG, dest='verbosity',
41 help='Show all messages, including debug messages.')
42
43 commands = ('init', 'start', 'migrate', 'delete-collection', 'version')
44 subparsers = parser.add_subparsers(title='subcommands',
45 description='Main Kinto CLI commands',
46 dest='subcommand',
47 help="Choose and run with --help")
48 subparsers.required = True
49
50 for command in commands:
51 subparser = subparsers.add_parser(command)
52 subparser.set_defaults(which=command)
53
54 if command == 'init':
55 subparser.add_argument('--backend',
56 help='{memory,redis,postgresql}',
57 dest='backend',
58 required=False,
59 default=None)
60 elif command == 'migrate':
61 subparser.add_argument('--dry-run',
62 action='store_true',
63 help='Simulate the migration operations '
64 'and show information',
65 dest='dry_run',
66 required=False,
67 default=False)
68 elif command == 'delete-collection':
69 subparser.add_argument('--bucket',
70 help='The bucket where the collection '
71 'belongs to.',
72 required=True)
73 subparser.add_argument('--collection',
74 help='The collection to remove.',
75 required=True)
76
77 elif command == 'start':
78 subparser.add_argument('--reload',
79 action='store_true',
80 help='Restart when code or config changes',
81 required=False,
82 default=False)
83 subparser.add_argument('--port',
84 type=int,
85 help='Listening port number',
86 required=False,
87 default=DEFAULT_PORT)
88
89 # Parse command-line arguments
90 parsed_args = vars(parser.parse_args(args))
91
92 config_file = parsed_args['ini_file']
93 which_command = parsed_args['which']
94
95 # Initialize logging from
96 level = parsed_args.get('verbosity') or DEFAULT_LOG_LEVEL
97 logging.basicConfig(level=level, format=DEFAULT_LOG_FORMAT)
98
99 if which_command == 'init':
100 if os.path.exists(config_file):
101 print("%s already exists." % config_file, file=sys.stderr)
102 return 1
103
104 backend = parsed_args['backend']
105 if not backend:
106 while True:
107 prompt = ("Select the backend you would like to use: "
108 "(1 - postgresql, 2 - redis, default - memory) ")
109 answer = input(prompt).strip()
110 try:
111 backends = {"1": "postgresql", "2": "redis", "": "memory"}
112 backend = backends[answer]
113 break
114 except KeyError:
115 pass
116
117 init(config_file, backend)
118
119 # Install postgresql libraries if necessary
120 if backend == "postgresql":
121 try:
122 import psycopg2 # NOQA
123 except ImportError:
124 import pip
125 pip.main(['install', "kinto[postgresql]"])
126 elif backend == "redis":
127 try:
128 import kinto_redis # NOQA
129 except ImportError:
130 import pip
131 pip.main(['install', "kinto[redis]"])
132
133 elif which_command == 'migrate':
134 dry_run = parsed_args['dry_run']
135 env = bootstrap(config_file)
136 scripts.migrate(env, dry_run=dry_run)
137
138 elif which_command == 'delete-collection':
139 env = bootstrap(config_file)
140 return scripts.delete_collection(env,
141 parsed_args['bucket'],
142 parsed_args['collection'])
143
144 elif which_command == 'start':
145 pserve_argv = ['pserve', config_file]
146 if parsed_args['reload']:
147 pserve_argv.append('--reload')
148 pserve_argv.append('http_port=%s' % parsed_args['port'])
149 pserve.main(pserve_argv)
150
151 elif which_command == 'version':
152 print(__version__)
153
154 return 0
155
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/kinto/__main__.py b/kinto/__main__.py
--- a/kinto/__main__.py
+++ b/kinto/__main__.py
@@ -57,6 +57,11 @@
dest='backend',
required=False,
default=None)
+ subparser.add_argument('--host',
+ help='Host to listen() on.',
+ dest='host',
+ required=False,
+ default='127.0.0.1')
elif command == 'migrate':
subparser.add_argument('--dry-run',
action='store_true',
@@ -114,7 +119,7 @@
except KeyError:
pass
- init(config_file, backend)
+ init(config_file, backend, parsed_args['host'])
# Install postgresql libraries if necessary
if backend == "postgresql":
diff --git a/kinto/config/__init__.py b/kinto/config/__init__.py
--- a/kinto/config/__init__.py
+++ b/kinto/config/__init__.py
@@ -26,9 +26,10 @@
output.write(rendered)
-def init(config_file, backend):
+def init(config_file, backend, host='127.0.0.1'):
values = {}
+ values['host'] = host
values['secret'] = core_utils.random_bytes_hex(32)
values['kinto_version'] = __version__
|
{"golden_diff": "diff --git a/kinto/__main__.py b/kinto/__main__.py\n--- a/kinto/__main__.py\n+++ b/kinto/__main__.py\n@@ -57,6 +57,11 @@\n dest='backend',\n required=False,\n default=None)\n+ subparser.add_argument('--host',\n+ help='Host to listen() on.',\n+ dest='host',\n+ required=False,\n+ default='127.0.0.1')\n elif command == 'migrate':\n subparser.add_argument('--dry-run',\n action='store_true',\n@@ -114,7 +119,7 @@\n except KeyError:\n pass\n \n- init(config_file, backend)\n+ init(config_file, backend, parsed_args['host'])\n \n # Install postgresql libraries if necessary\n if backend == \"postgresql\":\ndiff --git a/kinto/config/__init__.py b/kinto/config/__init__.py\n--- a/kinto/config/__init__.py\n+++ b/kinto/config/__init__.py\n@@ -26,9 +26,10 @@\n output.write(rendered)\n \n \n-def init(config_file, backend):\n+def init(config_file, backend, host='127.0.0.1'):\n values = {}\n \n+ values['host'] = host\n values['secret'] = core_utils.random_bytes_hex(32)\n \n values['kinto_version'] = __version__\n", "issue": "updated default kinto config breaks docker (kinto server not reacheable from host)\nIn e4e70fb, the default listen ip has been changed to 127.0.0.1.\r\nI couldn't manage to export the port using docker, using this command from the [install documentation](http://kinto.readthedocs.io/en/latest/tutorials/install.html#environment-variables):\r\n```\r\ndocker run --env-file kinto.env --link kinto_db:db -p 127.0.0.1:8888:8888 --name kinto_web croco/kinto:latest\r\n```\r\nFor instance, when I do `curl http://127.0.0.1:8888` from the host I get `Recv failure: Connection reset by peer`.\r\n\r\nWhen I reverted kinto.ini to listen on 0.0.0.0, curl from the host succeeds.\r\n\r\nMaybe there is an environment variable to override the `host` entry in the `server:main` section? Then it would be fine, provided an updated documentation...\r\n\r\nThis is not visible with the published kinto/kinto-server image, since it's still 5.1. I built my own from master because I needed an armhf image.\n", "before_files": [{"content": "import os\nimport codecs\nfrom time import strftime\n\nfrom kinto.core import utils as core_utils\n\nfrom kinto import logger\nfrom kinto import __version__\n\nHERE = os.path.abspath(os.path.dirname(__file__))\n\n\ndef render_template(template, destination, **kwargs):\n template = os.path.join(HERE, template)\n folder = os.path.dirname(destination)\n\n if folder and not os.path.exists(folder):\n os.makedirs(folder)\n\n logger.info(\"Created config {}\".format(os.path.abspath(destination)))\n\n with codecs.open(template, 'r', encoding='utf-8') as f:\n raw_template = f.read()\n rendered = raw_template.format(**kwargs)\n with codecs.open(destination, 'w+', encoding='utf-8') as output:\n output.write(rendered)\n\n\ndef init(config_file, backend):\n values = {}\n\n values['secret'] = core_utils.random_bytes_hex(32)\n\n values['kinto_version'] = __version__\n values['config_file_timestamp'] = core_utils._encoded(\n strftime('%a, %d %b %Y %H:%M:%S %z'))\n\n values['storage_backend'] = \"kinto.core.storage.%s\" % backend\n values['cache_backend'] = \"kinto.core.cache.%s\" % backend\n values['permission_backend'] = \"kinto.core.permission.%s\" % backend\n\n if backend == 'postgresql':\n postgresql_url = \"postgres://postgres:postgres@localhost/postgres\"\n values['storage_url'] = postgresql_url\n values['cache_url'] = postgresql_url\n values['permission_url'] = postgresql_url\n\n elif backend == 'redis':\n redis_url = \"redis://localhost:6379\"\n values['storage_backend'] = \"kinto_redis.storage\"\n values['cache_backend'] = \"kinto_redis.cache\"\n values['permission_backend'] = \"kinto_redis.permission\"\n\n values['storage_url'] = redis_url + \"/1\"\n values['cache_url'] = redis_url + \"/2\"\n values['permission_url'] = redis_url + \"/3\"\n\n else:\n values['storage_url'] = ''\n values['cache_url'] = ''\n values['permission_url'] = ''\n\n render_template(\"kinto.tpl\", config_file, **values)\n", "path": "kinto/config/__init__.py"}, {"content": "from __future__ import print_function\nimport argparse\nimport os\nimport sys\nimport logging\nimport logging.config\nfrom six.moves import input\n\nfrom kinto.core import scripts\nfrom pyramid.scripts import pserve\nfrom pyramid.paster import bootstrap\nfrom kinto import __version__\nfrom kinto.config import init\n\nDEFAULT_CONFIG_FILE = 'config/kinto.ini'\nDEFAULT_PORT = 8888\nDEFAULT_LOG_LEVEL = logging.INFO\nDEFAULT_LOG_FORMAT = \"%(levelname)-5.5s %(message)s\"\n\n\ndef main(args=None):\n \"\"\"The main routine.\"\"\"\n if args is None:\n args = sys.argv[1:]\n\n parser = argparse.ArgumentParser(description=\"Kinto Command-Line \"\n \"Interface\")\n # XXX: deprecate this option, unnatural as first argument.\n parser.add_argument('--ini',\n help='Application configuration file',\n dest='ini_file',\n required=False,\n default=DEFAULT_CONFIG_FILE)\n\n parser.add_argument('-q', '--quiet', action='store_const',\n const=logging.CRITICAL, dest='verbosity',\n help='Show only critical errors.')\n\n parser.add_argument('--debug', action='store_const',\n const=logging.DEBUG, dest='verbosity',\n help='Show all messages, including debug messages.')\n\n commands = ('init', 'start', 'migrate', 'delete-collection', 'version')\n subparsers = parser.add_subparsers(title='subcommands',\n description='Main Kinto CLI commands',\n dest='subcommand',\n help=\"Choose and run with --help\")\n subparsers.required = True\n\n for command in commands:\n subparser = subparsers.add_parser(command)\n subparser.set_defaults(which=command)\n\n if command == 'init':\n subparser.add_argument('--backend',\n help='{memory,redis,postgresql}',\n dest='backend',\n required=False,\n default=None)\n elif command == 'migrate':\n subparser.add_argument('--dry-run',\n action='store_true',\n help='Simulate the migration operations '\n 'and show information',\n dest='dry_run',\n required=False,\n default=False)\n elif command == 'delete-collection':\n subparser.add_argument('--bucket',\n help='The bucket where the collection '\n 'belongs to.',\n required=True)\n subparser.add_argument('--collection',\n help='The collection to remove.',\n required=True)\n\n elif command == 'start':\n subparser.add_argument('--reload',\n action='store_true',\n help='Restart when code or config changes',\n required=False,\n default=False)\n subparser.add_argument('--port',\n type=int,\n help='Listening port number',\n required=False,\n default=DEFAULT_PORT)\n\n # Parse command-line arguments\n parsed_args = vars(parser.parse_args(args))\n\n config_file = parsed_args['ini_file']\n which_command = parsed_args['which']\n\n # Initialize logging from\n level = parsed_args.get('verbosity') or DEFAULT_LOG_LEVEL\n logging.basicConfig(level=level, format=DEFAULT_LOG_FORMAT)\n\n if which_command == 'init':\n if os.path.exists(config_file):\n print(\"%s already exists.\" % config_file, file=sys.stderr)\n return 1\n\n backend = parsed_args['backend']\n if not backend:\n while True:\n prompt = (\"Select the backend you would like to use: \"\n \"(1 - postgresql, 2 - redis, default - memory) \")\n answer = input(prompt).strip()\n try:\n backends = {\"1\": \"postgresql\", \"2\": \"redis\", \"\": \"memory\"}\n backend = backends[answer]\n break\n except KeyError:\n pass\n\n init(config_file, backend)\n\n # Install postgresql libraries if necessary\n if backend == \"postgresql\":\n try:\n import psycopg2 # NOQA\n except ImportError:\n import pip\n pip.main(['install', \"kinto[postgresql]\"])\n elif backend == \"redis\":\n try:\n import kinto_redis # NOQA\n except ImportError:\n import pip\n pip.main(['install', \"kinto[redis]\"])\n\n elif which_command == 'migrate':\n dry_run = parsed_args['dry_run']\n env = bootstrap(config_file)\n scripts.migrate(env, dry_run=dry_run)\n\n elif which_command == 'delete-collection':\n env = bootstrap(config_file)\n return scripts.delete_collection(env,\n parsed_args['bucket'],\n parsed_args['collection'])\n\n elif which_command == 'start':\n pserve_argv = ['pserve', config_file]\n if parsed_args['reload']:\n pserve_argv.append('--reload')\n pserve_argv.append('http_port=%s' % parsed_args['port'])\n pserve.main(pserve_argv)\n\n elif which_command == 'version':\n print(__version__)\n\n return 0\n", "path": "kinto/__main__.py"}], "after_files": [{"content": "import os\nimport codecs\nfrom time import strftime\n\nfrom kinto.core import utils as core_utils\n\nfrom kinto import logger\nfrom kinto import __version__\n\nHERE = os.path.abspath(os.path.dirname(__file__))\n\n\ndef render_template(template, destination, **kwargs):\n template = os.path.join(HERE, template)\n folder = os.path.dirname(destination)\n\n if folder and not os.path.exists(folder):\n os.makedirs(folder)\n\n logger.info(\"Created config {}\".format(os.path.abspath(destination)))\n\n with codecs.open(template, 'r', encoding='utf-8') as f:\n raw_template = f.read()\n rendered = raw_template.format(**kwargs)\n with codecs.open(destination, 'w+', encoding='utf-8') as output:\n output.write(rendered)\n\n\ndef init(config_file, backend, host='127.0.0.1'):\n values = {}\n\n values['host'] = host\n values['secret'] = core_utils.random_bytes_hex(32)\n\n values['kinto_version'] = __version__\n values['config_file_timestamp'] = core_utils._encoded(\n strftime('%a, %d %b %Y %H:%M:%S %z'))\n\n values['storage_backend'] = \"kinto.core.storage.%s\" % backend\n values['cache_backend'] = \"kinto.core.cache.%s\" % backend\n values['permission_backend'] = \"kinto.core.permission.%s\" % backend\n\n if backend == 'postgresql':\n postgresql_url = \"postgres://postgres:postgres@localhost/postgres\"\n values['storage_url'] = postgresql_url\n values['cache_url'] = postgresql_url\n values['permission_url'] = postgresql_url\n\n elif backend == 'redis':\n redis_url = \"redis://localhost:6379\"\n values['storage_backend'] = \"kinto_redis.storage\"\n values['cache_backend'] = \"kinto_redis.cache\"\n values['permission_backend'] = \"kinto_redis.permission\"\n\n values['storage_url'] = redis_url + \"/1\"\n values['cache_url'] = redis_url + \"/2\"\n values['permission_url'] = redis_url + \"/3\"\n\n else:\n values['storage_url'] = ''\n values['cache_url'] = ''\n values['permission_url'] = ''\n\n render_template(\"kinto.tpl\", config_file, **values)\n", "path": "kinto/config/__init__.py"}, {"content": "from __future__ import print_function\nimport argparse\nimport os\nimport sys\nimport logging\nimport logging.config\nfrom six.moves import input\n\nfrom kinto.core import scripts\nfrom pyramid.scripts import pserve\nfrom pyramid.paster import bootstrap\nfrom kinto import __version__\nfrom kinto.config import init\n\nDEFAULT_CONFIG_FILE = 'config/kinto.ini'\nDEFAULT_PORT = 8888\nDEFAULT_LOG_LEVEL = logging.INFO\nDEFAULT_LOG_FORMAT = \"%(levelname)-5.5s %(message)s\"\n\n\ndef main(args=None):\n \"\"\"The main routine.\"\"\"\n if args is None:\n args = sys.argv[1:]\n\n parser = argparse.ArgumentParser(description=\"Kinto Command-Line \"\n \"Interface\")\n # XXX: deprecate this option, unnatural as first argument.\n parser.add_argument('--ini',\n help='Application configuration file',\n dest='ini_file',\n required=False,\n default=DEFAULT_CONFIG_FILE)\n\n parser.add_argument('-q', '--quiet', action='store_const',\n const=logging.CRITICAL, dest='verbosity',\n help='Show only critical errors.')\n\n parser.add_argument('--debug', action='store_const',\n const=logging.DEBUG, dest='verbosity',\n help='Show all messages, including debug messages.')\n\n commands = ('init', 'start', 'migrate', 'delete-collection', 'version')\n subparsers = parser.add_subparsers(title='subcommands',\n description='Main Kinto CLI commands',\n dest='subcommand',\n help=\"Choose and run with --help\")\n subparsers.required = True\n\n for command in commands:\n subparser = subparsers.add_parser(command)\n subparser.set_defaults(which=command)\n\n if command == 'init':\n subparser.add_argument('--backend',\n help='{memory,redis,postgresql}',\n dest='backend',\n required=False,\n default=None)\n subparser.add_argument('--host',\n help='Host to listen() on.',\n dest='host',\n required=False,\n default='127.0.0.1')\n elif command == 'migrate':\n subparser.add_argument('--dry-run',\n action='store_true',\n help='Simulate the migration operations '\n 'and show information',\n dest='dry_run',\n required=False,\n default=False)\n elif command == 'delete-collection':\n subparser.add_argument('--bucket',\n help='The bucket where the collection '\n 'belongs to.',\n required=True)\n subparser.add_argument('--collection',\n help='The collection to remove.',\n required=True)\n\n elif command == 'start':\n subparser.add_argument('--reload',\n action='store_true',\n help='Restart when code or config changes',\n required=False,\n default=False)\n subparser.add_argument('--port',\n type=int,\n help='Listening port number',\n required=False,\n default=DEFAULT_PORT)\n\n # Parse command-line arguments\n parsed_args = vars(parser.parse_args(args))\n\n config_file = parsed_args['ini_file']\n which_command = parsed_args['which']\n\n # Initialize logging from\n level = parsed_args.get('verbosity') or DEFAULT_LOG_LEVEL\n logging.basicConfig(level=level, format=DEFAULT_LOG_FORMAT)\n\n if which_command == 'init':\n if os.path.exists(config_file):\n print(\"%s already exists.\" % config_file, file=sys.stderr)\n return 1\n\n backend = parsed_args['backend']\n if not backend:\n while True:\n prompt = (\"Select the backend you would like to use: \"\n \"(1 - postgresql, 2 - redis, default - memory) \")\n answer = input(prompt).strip()\n try:\n backends = {\"1\": \"postgresql\", \"2\": \"redis\", \"\": \"memory\"}\n backend = backends[answer]\n break\n except KeyError:\n pass\n\n init(config_file, backend, parsed_args['host'])\n\n # Install postgresql libraries if necessary\n if backend == \"postgresql\":\n try:\n import psycopg2 # NOQA\n except ImportError:\n import pip\n pip.main(['install', \"kinto[postgresql]\"])\n elif backend == \"redis\":\n try:\n import kinto_redis # NOQA\n except ImportError:\n import pip\n pip.main(['install', \"kinto[redis]\"])\n\n elif which_command == 'migrate':\n dry_run = parsed_args['dry_run']\n env = bootstrap(config_file)\n scripts.migrate(env, dry_run=dry_run)\n\n elif which_command == 'delete-collection':\n env = bootstrap(config_file)\n return scripts.delete_collection(env,\n parsed_args['bucket'],\n parsed_args['collection'])\n\n elif which_command == 'start':\n pserve_argv = ['pserve', config_file]\n if parsed_args['reload']:\n pserve_argv.append('--reload')\n pserve_argv.append('http_port=%s' % parsed_args['port'])\n pserve.main(pserve_argv)\n\n elif which_command == 'version':\n print(__version__)\n\n return 0\n", "path": "kinto/__main__.py"}]}
| 2,560 | 316 |
gh_patches_debug_11325
|
rasdani/github-patches
|
git_diff
|
getredash__redash-4359
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Password Auth enabling itself when using LDAP
### Issue Summary
When using LDAP for auth, the checkbox for "Password Login Enabled" in settings becomes greyed out. However, when changing any other setting on that page and clicking save, the "Password Login Enabled" gets enabled. I can't find any way to them disable it other than doing so manually in the Postgres 'organizations' table.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `redash/handlers/authentication.py`
Content:
```
1 import logging
2
3 from flask import abort, flash, redirect, render_template, request, url_for
4
5 from flask_login import current_user, login_required, login_user, logout_user
6 from redash import __version__, limiter, models, settings
7 from redash.authentication import current_org, get_login_url, get_next_path
8 from redash.authentication.account import (
9 BadSignature,
10 SignatureExpired,
11 send_password_reset_email,
12 send_user_disabled_email,
13 send_verify_email,
14 validate_token,
15 )
16 from redash.handlers import routes
17 from redash.handlers.base import json_response, org_scoped_rule
18 from redash.version_check import get_latest_version
19 from sqlalchemy.orm.exc import NoResultFound
20
21 logger = logging.getLogger(__name__)
22
23
24 def get_google_auth_url(next_path):
25 if settings.MULTI_ORG:
26 google_auth_url = url_for(
27 "google_oauth.authorize_org", next=next_path, org_slug=current_org.slug
28 )
29 else:
30 google_auth_url = url_for("google_oauth.authorize", next=next_path)
31 return google_auth_url
32
33
34 def render_token_login_page(template, org_slug, token, invite):
35 try:
36 user_id = validate_token(token)
37 org = current_org._get_current_object()
38 user = models.User.get_by_id_and_org(user_id, org)
39 except NoResultFound:
40 logger.exception(
41 "Bad user id in token. Token= , User id= %s, Org=%s",
42 user_id,
43 token,
44 org_slug,
45 )
46 return (
47 render_template(
48 "error.html",
49 error_message="Invalid invite link. Please ask for a new one.",
50 ),
51 400,
52 )
53 except (SignatureExpired, BadSignature):
54 logger.exception("Failed to verify invite token: %s, org=%s", token, org_slug)
55 return (
56 render_template(
57 "error.html",
58 error_message="Your invite link has expired. Please ask for a new one.",
59 ),
60 400,
61 )
62
63 if invite and user.details.get("is_invitation_pending") is False:
64 return (
65 render_template(
66 "error.html",
67 error_message=(
68 "This invitation has already been accepted. "
69 "Please try resetting your password instead."
70 ),
71 ),
72 400,
73 )
74
75 status_code = 200
76 if request.method == "POST":
77 if "password" not in request.form:
78 flash("Bad Request")
79 status_code = 400
80 elif not request.form["password"]:
81 flash("Cannot use empty password.")
82 status_code = 400
83 elif len(request.form["password"]) < 6:
84 flash("Password length is too short (<6).")
85 status_code = 400
86 else:
87 if invite:
88 user.is_invitation_pending = False
89 user.hash_password(request.form["password"])
90 models.db.session.add(user)
91 login_user(user)
92 models.db.session.commit()
93 return redirect(url_for("redash.index", org_slug=org_slug))
94
95 google_auth_url = get_google_auth_url(url_for("redash.index", org_slug=org_slug))
96
97 return (
98 render_template(
99 template,
100 show_google_openid=settings.GOOGLE_OAUTH_ENABLED,
101 google_auth_url=google_auth_url,
102 show_saml_login=current_org.get_setting("auth_saml_enabled"),
103 show_remote_user_login=settings.REMOTE_USER_LOGIN_ENABLED,
104 show_ldap_login=settings.LDAP_LOGIN_ENABLED,
105 org_slug=org_slug,
106 user=user,
107 ),
108 status_code,
109 )
110
111
112 @routes.route(org_scoped_rule("/invite/<token>"), methods=["GET", "POST"])
113 def invite(token, org_slug=None):
114 return render_token_login_page("invite.html", org_slug, token, True)
115
116
117 @routes.route(org_scoped_rule("/reset/<token>"), methods=["GET", "POST"])
118 def reset(token, org_slug=None):
119 return render_token_login_page("reset.html", org_slug, token, False)
120
121
122 @routes.route(org_scoped_rule("/verify/<token>"), methods=["GET"])
123 def verify(token, org_slug=None):
124 try:
125 user_id = validate_token(token)
126 org = current_org._get_current_object()
127 user = models.User.get_by_id_and_org(user_id, org)
128 except (BadSignature, NoResultFound):
129 logger.exception(
130 "Failed to verify email verification token: %s, org=%s", token, org_slug
131 )
132 return (
133 render_template(
134 "error.html",
135 error_message="Your verification link is invalid. Please ask for a new one.",
136 ),
137 400,
138 )
139
140 user.is_email_verified = True
141 models.db.session.add(user)
142 models.db.session.commit()
143
144 template_context = {"org_slug": org_slug} if settings.MULTI_ORG else {}
145 next_url = url_for("redash.index", **template_context)
146
147 return render_template("verify.html", next_url=next_url)
148
149
150 @routes.route(org_scoped_rule("/forgot"), methods=["GET", "POST"])
151 def forgot_password(org_slug=None):
152 if not current_org.get_setting("auth_password_login_enabled"):
153 abort(404)
154
155 submitted = False
156 if request.method == "POST" and request.form["email"]:
157 submitted = True
158 email = request.form["email"]
159 try:
160 org = current_org._get_current_object()
161 user = models.User.get_by_email_and_org(email, org)
162 if user.is_disabled:
163 send_user_disabled_email(user)
164 else:
165 send_password_reset_email(user)
166 except NoResultFound:
167 logging.error("No user found for forgot password: %s", email)
168
169 return render_template("forgot.html", submitted=submitted)
170
171
172 @routes.route(org_scoped_rule("/verification_email/"), methods=["POST"])
173 def verification_email(org_slug=None):
174 if not current_user.is_email_verified:
175 send_verify_email(current_user, current_org)
176
177 return json_response(
178 {
179 "message": "Please check your email inbox in order to verify your email address."
180 }
181 )
182
183
184 @routes.route(org_scoped_rule("/login"), methods=["GET", "POST"])
185 @limiter.limit(settings.THROTTLE_LOGIN_PATTERN)
186 def login(org_slug=None):
187 # We intentionally use == as otherwise it won't actually use the proxy. So weird :O
188 # noinspection PyComparisonWithNone
189 if current_org == None and not settings.MULTI_ORG:
190 return redirect("/setup")
191 elif current_org == None:
192 return redirect("/")
193
194 index_url = url_for("redash.index", org_slug=org_slug)
195 unsafe_next_path = request.args.get("next", index_url)
196 next_path = get_next_path(unsafe_next_path)
197 if current_user.is_authenticated:
198 return redirect(next_path)
199
200 if request.method == "POST":
201 try:
202 org = current_org._get_current_object()
203 user = models.User.get_by_email_and_org(request.form["email"], org)
204 if (
205 user
206 and not user.is_disabled
207 and user.verify_password(request.form["password"])
208 ):
209 remember = "remember" in request.form
210 login_user(user, remember=remember)
211 return redirect(next_path)
212 else:
213 flash("Wrong email or password.")
214 except NoResultFound:
215 flash("Wrong email or password.")
216
217 google_auth_url = get_google_auth_url(next_path)
218
219 return render_template(
220 "login.html",
221 org_slug=org_slug,
222 next=next_path,
223 email=request.form.get("email", ""),
224 show_google_openid=settings.GOOGLE_OAUTH_ENABLED,
225 google_auth_url=google_auth_url,
226 show_password_login=current_org.get_setting("auth_password_login_enabled"),
227 show_saml_login=current_org.get_setting("auth_saml_enabled"),
228 show_remote_user_login=settings.REMOTE_USER_LOGIN_ENABLED,
229 show_ldap_login=settings.LDAP_LOGIN_ENABLED,
230 )
231
232
233 @routes.route(org_scoped_rule("/logout"))
234 def logout(org_slug=None):
235 logout_user()
236 return redirect(get_login_url(next=None))
237
238
239 def base_href():
240 if settings.MULTI_ORG:
241 base_href = url_for("redash.index", _external=True, org_slug=current_org.slug)
242 else:
243 base_href = url_for("redash.index", _external=True)
244
245 return base_href
246
247
248 def date_time_format_config():
249 date_format = current_org.get_setting("date_format")
250 date_format_list = set(["DD/MM/YY", "MM/DD/YY", "YYYY-MM-DD", settings.DATE_FORMAT])
251 time_format = current_org.get_setting("time_format")
252 time_format_list = set(["HH:mm", "HH:mm:ss", "HH:mm:ss.SSS", settings.TIME_FORMAT])
253 return {
254 "dateFormat": date_format,
255 "dateFormatList": list(date_format_list),
256 "timeFormatList": list(time_format_list),
257 "dateTimeFormat": "{0} {1}".format(date_format, time_format),
258 }
259
260
261 def number_format_config():
262 return {
263 "integerFormat": current_org.get_setting("integer_format"),
264 "floatFormat": current_org.get_setting("float_format"),
265 }
266
267
268 def client_config():
269 if not current_user.is_api_user() and current_user.is_authenticated:
270 client_config = {
271 "newVersionAvailable": bool(get_latest_version()),
272 "version": __version__,
273 }
274 else:
275 client_config = {}
276
277 if (
278 current_user.has_permission("admin")
279 and current_org.get_setting("beacon_consent") is None
280 ):
281 client_config["showBeaconConsentMessage"] = True
282
283 defaults = {
284 "allowScriptsInUserInput": settings.ALLOW_SCRIPTS_IN_USER_INPUT,
285 "showPermissionsControl": current_org.get_setting(
286 "feature_show_permissions_control"
287 ),
288 "allowCustomJSVisualizations": settings.FEATURE_ALLOW_CUSTOM_JS_VISUALIZATIONS,
289 "autoPublishNamedQueries": settings.FEATURE_AUTO_PUBLISH_NAMED_QUERIES,
290 "extendedAlertOptions": settings.FEATURE_EXTENDED_ALERT_OPTIONS,
291 "mailSettingsMissing": not settings.email_server_is_configured(),
292 "dashboardRefreshIntervals": settings.DASHBOARD_REFRESH_INTERVALS,
293 "queryRefreshIntervals": settings.QUERY_REFRESH_INTERVALS,
294 "googleLoginEnabled": settings.GOOGLE_OAUTH_ENABLED,
295 "pageSize": settings.PAGE_SIZE,
296 "pageSizeOptions": settings.PAGE_SIZE_OPTIONS,
297 "tableCellMaxJSONSize": settings.TABLE_CELL_MAX_JSON_SIZE,
298 }
299
300 client_config.update(defaults)
301 client_config.update({"basePath": base_href()})
302 client_config.update(date_time_format_config())
303 client_config.update(number_format_config())
304
305 return client_config
306
307
308 def messages():
309 messages = []
310
311 if not current_user.is_email_verified:
312 messages.append("email-not-verified")
313
314 if settings.ALLOW_PARAMETERS_IN_EMBEDS:
315 messages.append("using-deprecated-embed-feature")
316
317 return messages
318
319
320 @routes.route("/api/config", methods=["GET"])
321 def config(org_slug=None):
322 return json_response(
323 {"org_slug": current_org.slug, "client_config": client_config()}
324 )
325
326
327 @routes.route(org_scoped_rule("/api/session"), methods=["GET"])
328 @login_required
329 def session(org_slug=None):
330 if current_user.is_api_user():
331 user = {"permissions": [], "apiKey": current_user.id}
332 else:
333 user = {
334 "profile_image_url": current_user.profile_image_url,
335 "id": current_user.id,
336 "name": current_user.name,
337 "email": current_user.email,
338 "groups": current_user.group_ids,
339 "permissions": current_user.permissions,
340 }
341
342 return json_response(
343 {
344 "user": user,
345 "messages": messages(),
346 "org_slug": current_org.slug,
347 "client_config": client_config(),
348 }
349 )
350
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/redash/handlers/authentication.py b/redash/handlers/authentication.py
--- a/redash/handlers/authentication.py
+++ b/redash/handlers/authentication.py
@@ -292,6 +292,7 @@
"dashboardRefreshIntervals": settings.DASHBOARD_REFRESH_INTERVALS,
"queryRefreshIntervals": settings.QUERY_REFRESH_INTERVALS,
"googleLoginEnabled": settings.GOOGLE_OAUTH_ENABLED,
+ "ldapLoginEnabled": settings.LDAP_LOGIN_ENABLED,
"pageSize": settings.PAGE_SIZE,
"pageSizeOptions": settings.PAGE_SIZE_OPTIONS,
"tableCellMaxJSONSize": settings.TABLE_CELL_MAX_JSON_SIZE,
|
{"golden_diff": "diff --git a/redash/handlers/authentication.py b/redash/handlers/authentication.py\n--- a/redash/handlers/authentication.py\n+++ b/redash/handlers/authentication.py\n@@ -292,6 +292,7 @@\n \"dashboardRefreshIntervals\": settings.DASHBOARD_REFRESH_INTERVALS,\n \"queryRefreshIntervals\": settings.QUERY_REFRESH_INTERVALS,\n \"googleLoginEnabled\": settings.GOOGLE_OAUTH_ENABLED,\n+ \"ldapLoginEnabled\": settings.LDAP_LOGIN_ENABLED,\n \"pageSize\": settings.PAGE_SIZE,\n \"pageSizeOptions\": settings.PAGE_SIZE_OPTIONS,\n \"tableCellMaxJSONSize\": settings.TABLE_CELL_MAX_JSON_SIZE,\n", "issue": "Password Auth enabling itself when using LDAP\n### Issue Summary\r\n\r\nWhen using LDAP for auth, the checkbox for \"Password Login Enabled\" in settings becomes greyed out. However, when changing any other setting on that page and clicking save, the \"Password Login Enabled\" gets enabled. I can't find any way to them disable it other than doing so manually in the Postgres 'organizations' table.\r\n\r\n\n", "before_files": [{"content": "import logging\n\nfrom flask import abort, flash, redirect, render_template, request, url_for\n\nfrom flask_login import current_user, login_required, login_user, logout_user\nfrom redash import __version__, limiter, models, settings\nfrom redash.authentication import current_org, get_login_url, get_next_path\nfrom redash.authentication.account import (\n BadSignature,\n SignatureExpired,\n send_password_reset_email,\n send_user_disabled_email,\n send_verify_email,\n validate_token,\n)\nfrom redash.handlers import routes\nfrom redash.handlers.base import json_response, org_scoped_rule\nfrom redash.version_check import get_latest_version\nfrom sqlalchemy.orm.exc import NoResultFound\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_google_auth_url(next_path):\n if settings.MULTI_ORG:\n google_auth_url = url_for(\n \"google_oauth.authorize_org\", next=next_path, org_slug=current_org.slug\n )\n else:\n google_auth_url = url_for(\"google_oauth.authorize\", next=next_path)\n return google_auth_url\n\n\ndef render_token_login_page(template, org_slug, token, invite):\n try:\n user_id = validate_token(token)\n org = current_org._get_current_object()\n user = models.User.get_by_id_and_org(user_id, org)\n except NoResultFound:\n logger.exception(\n \"Bad user id in token. Token= , User id= %s, Org=%s\",\n user_id,\n token,\n org_slug,\n )\n return (\n render_template(\n \"error.html\",\n error_message=\"Invalid invite link. Please ask for a new one.\",\n ),\n 400,\n )\n except (SignatureExpired, BadSignature):\n logger.exception(\"Failed to verify invite token: %s, org=%s\", token, org_slug)\n return (\n render_template(\n \"error.html\",\n error_message=\"Your invite link has expired. Please ask for a new one.\",\n ),\n 400,\n )\n\n if invite and user.details.get(\"is_invitation_pending\") is False:\n return (\n render_template(\n \"error.html\",\n error_message=(\n \"This invitation has already been accepted. \"\n \"Please try resetting your password instead.\"\n ),\n ),\n 400,\n )\n\n status_code = 200\n if request.method == \"POST\":\n if \"password\" not in request.form:\n flash(\"Bad Request\")\n status_code = 400\n elif not request.form[\"password\"]:\n flash(\"Cannot use empty password.\")\n status_code = 400\n elif len(request.form[\"password\"]) < 6:\n flash(\"Password length is too short (<6).\")\n status_code = 400\n else:\n if invite:\n user.is_invitation_pending = False\n user.hash_password(request.form[\"password\"])\n models.db.session.add(user)\n login_user(user)\n models.db.session.commit()\n return redirect(url_for(\"redash.index\", org_slug=org_slug))\n\n google_auth_url = get_google_auth_url(url_for(\"redash.index\", org_slug=org_slug))\n\n return (\n render_template(\n template,\n show_google_openid=settings.GOOGLE_OAUTH_ENABLED,\n google_auth_url=google_auth_url,\n show_saml_login=current_org.get_setting(\"auth_saml_enabled\"),\n show_remote_user_login=settings.REMOTE_USER_LOGIN_ENABLED,\n show_ldap_login=settings.LDAP_LOGIN_ENABLED,\n org_slug=org_slug,\n user=user,\n ),\n status_code,\n )\n\n\[email protected](org_scoped_rule(\"/invite/<token>\"), methods=[\"GET\", \"POST\"])\ndef invite(token, org_slug=None):\n return render_token_login_page(\"invite.html\", org_slug, token, True)\n\n\[email protected](org_scoped_rule(\"/reset/<token>\"), methods=[\"GET\", \"POST\"])\ndef reset(token, org_slug=None):\n return render_token_login_page(\"reset.html\", org_slug, token, False)\n\n\[email protected](org_scoped_rule(\"/verify/<token>\"), methods=[\"GET\"])\ndef verify(token, org_slug=None):\n try:\n user_id = validate_token(token)\n org = current_org._get_current_object()\n user = models.User.get_by_id_and_org(user_id, org)\n except (BadSignature, NoResultFound):\n logger.exception(\n \"Failed to verify email verification token: %s, org=%s\", token, org_slug\n )\n return (\n render_template(\n \"error.html\",\n error_message=\"Your verification link is invalid. Please ask for a new one.\",\n ),\n 400,\n )\n\n user.is_email_verified = True\n models.db.session.add(user)\n models.db.session.commit()\n\n template_context = {\"org_slug\": org_slug} if settings.MULTI_ORG else {}\n next_url = url_for(\"redash.index\", **template_context)\n\n return render_template(\"verify.html\", next_url=next_url)\n\n\[email protected](org_scoped_rule(\"/forgot\"), methods=[\"GET\", \"POST\"])\ndef forgot_password(org_slug=None):\n if not current_org.get_setting(\"auth_password_login_enabled\"):\n abort(404)\n\n submitted = False\n if request.method == \"POST\" and request.form[\"email\"]:\n submitted = True\n email = request.form[\"email\"]\n try:\n org = current_org._get_current_object()\n user = models.User.get_by_email_and_org(email, org)\n if user.is_disabled:\n send_user_disabled_email(user)\n else:\n send_password_reset_email(user)\n except NoResultFound:\n logging.error(\"No user found for forgot password: %s\", email)\n\n return render_template(\"forgot.html\", submitted=submitted)\n\n\[email protected](org_scoped_rule(\"/verification_email/\"), methods=[\"POST\"])\ndef verification_email(org_slug=None):\n if not current_user.is_email_verified:\n send_verify_email(current_user, current_org)\n\n return json_response(\n {\n \"message\": \"Please check your email inbox in order to verify your email address.\"\n }\n )\n\n\[email protected](org_scoped_rule(\"/login\"), methods=[\"GET\", \"POST\"])\[email protected](settings.THROTTLE_LOGIN_PATTERN)\ndef login(org_slug=None):\n # We intentionally use == as otherwise it won't actually use the proxy. So weird :O\n # noinspection PyComparisonWithNone\n if current_org == None and not settings.MULTI_ORG:\n return redirect(\"/setup\")\n elif current_org == None:\n return redirect(\"/\")\n\n index_url = url_for(\"redash.index\", org_slug=org_slug)\n unsafe_next_path = request.args.get(\"next\", index_url)\n next_path = get_next_path(unsafe_next_path)\n if current_user.is_authenticated:\n return redirect(next_path)\n\n if request.method == \"POST\":\n try:\n org = current_org._get_current_object()\n user = models.User.get_by_email_and_org(request.form[\"email\"], org)\n if (\n user\n and not user.is_disabled\n and user.verify_password(request.form[\"password\"])\n ):\n remember = \"remember\" in request.form\n login_user(user, remember=remember)\n return redirect(next_path)\n else:\n flash(\"Wrong email or password.\")\n except NoResultFound:\n flash(\"Wrong email or password.\")\n\n google_auth_url = get_google_auth_url(next_path)\n\n return render_template(\n \"login.html\",\n org_slug=org_slug,\n next=next_path,\n email=request.form.get(\"email\", \"\"),\n show_google_openid=settings.GOOGLE_OAUTH_ENABLED,\n google_auth_url=google_auth_url,\n show_password_login=current_org.get_setting(\"auth_password_login_enabled\"),\n show_saml_login=current_org.get_setting(\"auth_saml_enabled\"),\n show_remote_user_login=settings.REMOTE_USER_LOGIN_ENABLED,\n show_ldap_login=settings.LDAP_LOGIN_ENABLED,\n )\n\n\[email protected](org_scoped_rule(\"/logout\"))\ndef logout(org_slug=None):\n logout_user()\n return redirect(get_login_url(next=None))\n\n\ndef base_href():\n if settings.MULTI_ORG:\n base_href = url_for(\"redash.index\", _external=True, org_slug=current_org.slug)\n else:\n base_href = url_for(\"redash.index\", _external=True)\n\n return base_href\n\n\ndef date_time_format_config():\n date_format = current_org.get_setting(\"date_format\")\n date_format_list = set([\"DD/MM/YY\", \"MM/DD/YY\", \"YYYY-MM-DD\", settings.DATE_FORMAT])\n time_format = current_org.get_setting(\"time_format\")\n time_format_list = set([\"HH:mm\", \"HH:mm:ss\", \"HH:mm:ss.SSS\", settings.TIME_FORMAT])\n return {\n \"dateFormat\": date_format,\n \"dateFormatList\": list(date_format_list),\n \"timeFormatList\": list(time_format_list),\n \"dateTimeFormat\": \"{0} {1}\".format(date_format, time_format),\n }\n\n\ndef number_format_config():\n return {\n \"integerFormat\": current_org.get_setting(\"integer_format\"),\n \"floatFormat\": current_org.get_setting(\"float_format\"),\n }\n\n\ndef client_config():\n if not current_user.is_api_user() and current_user.is_authenticated:\n client_config = {\n \"newVersionAvailable\": bool(get_latest_version()),\n \"version\": __version__,\n }\n else:\n client_config = {}\n\n if (\n current_user.has_permission(\"admin\")\n and current_org.get_setting(\"beacon_consent\") is None\n ):\n client_config[\"showBeaconConsentMessage\"] = True\n\n defaults = {\n \"allowScriptsInUserInput\": settings.ALLOW_SCRIPTS_IN_USER_INPUT,\n \"showPermissionsControl\": current_org.get_setting(\n \"feature_show_permissions_control\"\n ),\n \"allowCustomJSVisualizations\": settings.FEATURE_ALLOW_CUSTOM_JS_VISUALIZATIONS,\n \"autoPublishNamedQueries\": settings.FEATURE_AUTO_PUBLISH_NAMED_QUERIES,\n \"extendedAlertOptions\": settings.FEATURE_EXTENDED_ALERT_OPTIONS,\n \"mailSettingsMissing\": not settings.email_server_is_configured(),\n \"dashboardRefreshIntervals\": settings.DASHBOARD_REFRESH_INTERVALS,\n \"queryRefreshIntervals\": settings.QUERY_REFRESH_INTERVALS,\n \"googleLoginEnabled\": settings.GOOGLE_OAUTH_ENABLED,\n \"pageSize\": settings.PAGE_SIZE,\n \"pageSizeOptions\": settings.PAGE_SIZE_OPTIONS,\n \"tableCellMaxJSONSize\": settings.TABLE_CELL_MAX_JSON_SIZE,\n }\n\n client_config.update(defaults)\n client_config.update({\"basePath\": base_href()})\n client_config.update(date_time_format_config())\n client_config.update(number_format_config())\n\n return client_config\n\n\ndef messages():\n messages = []\n\n if not current_user.is_email_verified:\n messages.append(\"email-not-verified\")\n\n if settings.ALLOW_PARAMETERS_IN_EMBEDS:\n messages.append(\"using-deprecated-embed-feature\")\n\n return messages\n\n\[email protected](\"/api/config\", methods=[\"GET\"])\ndef config(org_slug=None):\n return json_response(\n {\"org_slug\": current_org.slug, \"client_config\": client_config()}\n )\n\n\[email protected](org_scoped_rule(\"/api/session\"), methods=[\"GET\"])\n@login_required\ndef session(org_slug=None):\n if current_user.is_api_user():\n user = {\"permissions\": [], \"apiKey\": current_user.id}\n else:\n user = {\n \"profile_image_url\": current_user.profile_image_url,\n \"id\": current_user.id,\n \"name\": current_user.name,\n \"email\": current_user.email,\n \"groups\": current_user.group_ids,\n \"permissions\": current_user.permissions,\n }\n\n return json_response(\n {\n \"user\": user,\n \"messages\": messages(),\n \"org_slug\": current_org.slug,\n \"client_config\": client_config(),\n }\n )\n", "path": "redash/handlers/authentication.py"}], "after_files": [{"content": "import logging\n\nfrom flask import abort, flash, redirect, render_template, request, url_for\n\nfrom flask_login import current_user, login_required, login_user, logout_user\nfrom redash import __version__, limiter, models, settings\nfrom redash.authentication import current_org, get_login_url, get_next_path\nfrom redash.authentication.account import (\n BadSignature,\n SignatureExpired,\n send_password_reset_email,\n send_user_disabled_email,\n send_verify_email,\n validate_token,\n)\nfrom redash.handlers import routes\nfrom redash.handlers.base import json_response, org_scoped_rule\nfrom redash.version_check import get_latest_version\nfrom sqlalchemy.orm.exc import NoResultFound\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_google_auth_url(next_path):\n if settings.MULTI_ORG:\n google_auth_url = url_for(\n \"google_oauth.authorize_org\", next=next_path, org_slug=current_org.slug\n )\n else:\n google_auth_url = url_for(\"google_oauth.authorize\", next=next_path)\n return google_auth_url\n\n\ndef render_token_login_page(template, org_slug, token, invite):\n try:\n user_id = validate_token(token)\n org = current_org._get_current_object()\n user = models.User.get_by_id_and_org(user_id, org)\n except NoResultFound:\n logger.exception(\n \"Bad user id in token. Token= , User id= %s, Org=%s\",\n user_id,\n token,\n org_slug,\n )\n return (\n render_template(\n \"error.html\",\n error_message=\"Invalid invite link. Please ask for a new one.\",\n ),\n 400,\n )\n except (SignatureExpired, BadSignature):\n logger.exception(\"Failed to verify invite token: %s, org=%s\", token, org_slug)\n return (\n render_template(\n \"error.html\",\n error_message=\"Your invite link has expired. Please ask for a new one.\",\n ),\n 400,\n )\n\n if invite and user.details.get(\"is_invitation_pending\") is False:\n return (\n render_template(\n \"error.html\",\n error_message=(\n \"This invitation has already been accepted. \"\n \"Please try resetting your password instead.\"\n ),\n ),\n 400,\n )\n\n status_code = 200\n if request.method == \"POST\":\n if \"password\" not in request.form:\n flash(\"Bad Request\")\n status_code = 400\n elif not request.form[\"password\"]:\n flash(\"Cannot use empty password.\")\n status_code = 400\n elif len(request.form[\"password\"]) < 6:\n flash(\"Password length is too short (<6).\")\n status_code = 400\n else:\n if invite:\n user.is_invitation_pending = False\n user.hash_password(request.form[\"password\"])\n models.db.session.add(user)\n login_user(user)\n models.db.session.commit()\n return redirect(url_for(\"redash.index\", org_slug=org_slug))\n\n google_auth_url = get_google_auth_url(url_for(\"redash.index\", org_slug=org_slug))\n\n return (\n render_template(\n template,\n show_google_openid=settings.GOOGLE_OAUTH_ENABLED,\n google_auth_url=google_auth_url,\n show_saml_login=current_org.get_setting(\"auth_saml_enabled\"),\n show_remote_user_login=settings.REMOTE_USER_LOGIN_ENABLED,\n show_ldap_login=settings.LDAP_LOGIN_ENABLED,\n org_slug=org_slug,\n user=user,\n ),\n status_code,\n )\n\n\[email protected](org_scoped_rule(\"/invite/<token>\"), methods=[\"GET\", \"POST\"])\ndef invite(token, org_slug=None):\n return render_token_login_page(\"invite.html\", org_slug, token, True)\n\n\[email protected](org_scoped_rule(\"/reset/<token>\"), methods=[\"GET\", \"POST\"])\ndef reset(token, org_slug=None):\n return render_token_login_page(\"reset.html\", org_slug, token, False)\n\n\[email protected](org_scoped_rule(\"/verify/<token>\"), methods=[\"GET\"])\ndef verify(token, org_slug=None):\n try:\n user_id = validate_token(token)\n org = current_org._get_current_object()\n user = models.User.get_by_id_and_org(user_id, org)\n except (BadSignature, NoResultFound):\n logger.exception(\n \"Failed to verify email verification token: %s, org=%s\", token, org_slug\n )\n return (\n render_template(\n \"error.html\",\n error_message=\"Your verification link is invalid. Please ask for a new one.\",\n ),\n 400,\n )\n\n user.is_email_verified = True\n models.db.session.add(user)\n models.db.session.commit()\n\n template_context = {\"org_slug\": org_slug} if settings.MULTI_ORG else {}\n next_url = url_for(\"redash.index\", **template_context)\n\n return render_template(\"verify.html\", next_url=next_url)\n\n\[email protected](org_scoped_rule(\"/forgot\"), methods=[\"GET\", \"POST\"])\ndef forgot_password(org_slug=None):\n if not current_org.get_setting(\"auth_password_login_enabled\"):\n abort(404)\n\n submitted = False\n if request.method == \"POST\" and request.form[\"email\"]:\n submitted = True\n email = request.form[\"email\"]\n try:\n org = current_org._get_current_object()\n user = models.User.get_by_email_and_org(email, org)\n if user.is_disabled:\n send_user_disabled_email(user)\n else:\n send_password_reset_email(user)\n except NoResultFound:\n logging.error(\"No user found for forgot password: %s\", email)\n\n return render_template(\"forgot.html\", submitted=submitted)\n\n\[email protected](org_scoped_rule(\"/verification_email/\"), methods=[\"POST\"])\ndef verification_email(org_slug=None):\n if not current_user.is_email_verified:\n send_verify_email(current_user, current_org)\n\n return json_response(\n {\n \"message\": \"Please check your email inbox in order to verify your email address.\"\n }\n )\n\n\[email protected](org_scoped_rule(\"/login\"), methods=[\"GET\", \"POST\"])\[email protected](settings.THROTTLE_LOGIN_PATTERN)\ndef login(org_slug=None):\n # We intentionally use == as otherwise it won't actually use the proxy. So weird :O\n # noinspection PyComparisonWithNone\n if current_org == None and not settings.MULTI_ORG:\n return redirect(\"/setup\")\n elif current_org == None:\n return redirect(\"/\")\n\n index_url = url_for(\"redash.index\", org_slug=org_slug)\n unsafe_next_path = request.args.get(\"next\", index_url)\n next_path = get_next_path(unsafe_next_path)\n if current_user.is_authenticated:\n return redirect(next_path)\n\n if request.method == \"POST\":\n try:\n org = current_org._get_current_object()\n user = models.User.get_by_email_and_org(request.form[\"email\"], org)\n if (\n user\n and not user.is_disabled\n and user.verify_password(request.form[\"password\"])\n ):\n remember = \"remember\" in request.form\n login_user(user, remember=remember)\n return redirect(next_path)\n else:\n flash(\"Wrong email or password.\")\n except NoResultFound:\n flash(\"Wrong email or password.\")\n\n google_auth_url = get_google_auth_url(next_path)\n\n return render_template(\n \"login.html\",\n org_slug=org_slug,\n next=next_path,\n email=request.form.get(\"email\", \"\"),\n show_google_openid=settings.GOOGLE_OAUTH_ENABLED,\n google_auth_url=google_auth_url,\n show_password_login=current_org.get_setting(\"auth_password_login_enabled\"),\n show_saml_login=current_org.get_setting(\"auth_saml_enabled\"),\n show_remote_user_login=settings.REMOTE_USER_LOGIN_ENABLED,\n show_ldap_login=settings.LDAP_LOGIN_ENABLED,\n )\n\n\[email protected](org_scoped_rule(\"/logout\"))\ndef logout(org_slug=None):\n logout_user()\n return redirect(get_login_url(next=None))\n\n\ndef base_href():\n if settings.MULTI_ORG:\n base_href = url_for(\"redash.index\", _external=True, org_slug=current_org.slug)\n else:\n base_href = url_for(\"redash.index\", _external=True)\n\n return base_href\n\n\ndef date_time_format_config():\n date_format = current_org.get_setting(\"date_format\")\n date_format_list = set([\"DD/MM/YY\", \"MM/DD/YY\", \"YYYY-MM-DD\", settings.DATE_FORMAT])\n time_format = current_org.get_setting(\"time_format\")\n time_format_list = set([\"HH:mm\", \"HH:mm:ss\", \"HH:mm:ss.SSS\", settings.TIME_FORMAT])\n return {\n \"dateFormat\": date_format,\n \"dateFormatList\": list(date_format_list),\n \"timeFormatList\": list(time_format_list),\n \"dateTimeFormat\": \"{0} {1}\".format(date_format, time_format),\n }\n\n\ndef number_format_config():\n return {\n \"integerFormat\": current_org.get_setting(\"integer_format\"),\n \"floatFormat\": current_org.get_setting(\"float_format\"),\n }\n\n\ndef client_config():\n if not current_user.is_api_user() and current_user.is_authenticated:\n client_config = {\n \"newVersionAvailable\": bool(get_latest_version()),\n \"version\": __version__,\n }\n else:\n client_config = {}\n\n if (\n current_user.has_permission(\"admin\")\n and current_org.get_setting(\"beacon_consent\") is None\n ):\n client_config[\"showBeaconConsentMessage\"] = True\n\n defaults = {\n \"allowScriptsInUserInput\": settings.ALLOW_SCRIPTS_IN_USER_INPUT,\n \"showPermissionsControl\": current_org.get_setting(\n \"feature_show_permissions_control\"\n ),\n \"allowCustomJSVisualizations\": settings.FEATURE_ALLOW_CUSTOM_JS_VISUALIZATIONS,\n \"autoPublishNamedQueries\": settings.FEATURE_AUTO_PUBLISH_NAMED_QUERIES,\n \"extendedAlertOptions\": settings.FEATURE_EXTENDED_ALERT_OPTIONS,\n \"mailSettingsMissing\": not settings.email_server_is_configured(),\n \"dashboardRefreshIntervals\": settings.DASHBOARD_REFRESH_INTERVALS,\n \"queryRefreshIntervals\": settings.QUERY_REFRESH_INTERVALS,\n \"googleLoginEnabled\": settings.GOOGLE_OAUTH_ENABLED,\n \"ldapLoginEnabled\": settings.LDAP_LOGIN_ENABLED,\n \"pageSize\": settings.PAGE_SIZE,\n \"pageSizeOptions\": settings.PAGE_SIZE_OPTIONS,\n \"tableCellMaxJSONSize\": settings.TABLE_CELL_MAX_JSON_SIZE,\n }\n\n client_config.update(defaults)\n client_config.update({\"basePath\": base_href()})\n client_config.update(date_time_format_config())\n client_config.update(number_format_config())\n\n return client_config\n\n\ndef messages():\n messages = []\n\n if not current_user.is_email_verified:\n messages.append(\"email-not-verified\")\n\n if settings.ALLOW_PARAMETERS_IN_EMBEDS:\n messages.append(\"using-deprecated-embed-feature\")\n\n return messages\n\n\[email protected](\"/api/config\", methods=[\"GET\"])\ndef config(org_slug=None):\n return json_response(\n {\"org_slug\": current_org.slug, \"client_config\": client_config()}\n )\n\n\[email protected](org_scoped_rule(\"/api/session\"), methods=[\"GET\"])\n@login_required\ndef session(org_slug=None):\n if current_user.is_api_user():\n user = {\"permissions\": [], \"apiKey\": current_user.id}\n else:\n user = {\n \"profile_image_url\": current_user.profile_image_url,\n \"id\": current_user.id,\n \"name\": current_user.name,\n \"email\": current_user.email,\n \"groups\": current_user.group_ids,\n \"permissions\": current_user.permissions,\n }\n\n return json_response(\n {\n \"user\": user,\n \"messages\": messages(),\n \"org_slug\": current_org.slug,\n \"client_config\": client_config(),\n }\n )\n", "path": "redash/handlers/authentication.py"}]}
| 3,823 | 144 |
gh_patches_debug_3046
|
rasdani/github-patches
|
git_diff
|
certbot__certbot-6134
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Reuse ACMEv1 accounts for ACMEv2 production
The lines removed following the discussion at https://github.com/certbot/certbot/pull/5902#discussion_r192532446 need to be added back.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `certbot/constants.py`
Content:
```
1 """Certbot constants."""
2 import logging
3 import os
4 import pkg_resources
5
6 from acme import challenges
7
8
9 SETUPTOOLS_PLUGINS_ENTRY_POINT = "certbot.plugins"
10 """Setuptools entry point group name for plugins."""
11
12 OLD_SETUPTOOLS_PLUGINS_ENTRY_POINT = "letsencrypt.plugins"
13 """Plugins Setuptools entry point before rename."""
14
15 CLI_DEFAULTS = dict(
16 config_files=[
17 "/etc/letsencrypt/cli.ini",
18 # http://freedesktop.org/wiki/Software/xdg-user-dirs/
19 os.path.join(os.environ.get("XDG_CONFIG_HOME", "~/.config"),
20 "letsencrypt", "cli.ini"),
21 ],
22
23 # Main parser
24 verbose_count=-int(logging.INFO / 10),
25 text_mode=False,
26 max_log_backups=1000,
27 noninteractive_mode=False,
28 force_interactive=False,
29 domains=[],
30 certname=None,
31 dry_run=False,
32 register_unsafely_without_email=False,
33 update_registration=False,
34 email=None,
35 eff_email=None,
36 reinstall=False,
37 expand=False,
38 renew_by_default=False,
39 renew_with_new_domains=False,
40 autorenew=True,
41 allow_subset_of_names=False,
42 tos=False,
43 account=None,
44 duplicate=False,
45 os_packages_only=False,
46 no_self_upgrade=False,
47 no_bootstrap=False,
48 quiet=False,
49 staging=False,
50 debug=False,
51 debug_challenges=False,
52 no_verify_ssl=False,
53 tls_sni_01_port=challenges.TLSSNI01Response.PORT,
54 tls_sni_01_address="",
55 http01_port=challenges.HTTP01Response.PORT,
56 http01_address="",
57 break_my_certs=False,
58 rsa_key_size=2048,
59 must_staple=False,
60 redirect=None,
61 hsts=None,
62 uir=None,
63 staple=None,
64 strict_permissions=False,
65 pref_challs=[],
66 validate_hooks=True,
67 directory_hooks=True,
68 reuse_key=False,
69 disable_renew_updates=False,
70
71 # Subparsers
72 num=None,
73 user_agent=None,
74 user_agent_comment=None,
75 csr=None,
76 reason=0,
77 delete_after_revoke=None,
78 rollback_checkpoints=1,
79 init=False,
80 prepare=False,
81 ifaces=None,
82
83 # Path parsers
84 auth_cert_path="./cert.pem",
85 auth_chain_path="./chain.pem",
86 key_path=None,
87 config_dir="/etc/letsencrypt",
88 work_dir="/var/lib/letsencrypt",
89 logs_dir="/var/log/letsencrypt",
90 server="https://acme-v01.api.letsencrypt.org/directory",
91
92 # Plugins parsers
93 configurator=None,
94 authenticator=None,
95 installer=None,
96 apache=False,
97 nginx=False,
98 standalone=False,
99 manual=False,
100 webroot=False,
101 dns_cloudflare=False,
102 dns_cloudxns=False,
103 dns_digitalocean=False,
104 dns_dnsimple=False,
105 dns_dnsmadeeasy=False,
106 dns_google=False,
107 dns_luadns=False,
108 dns_nsone=False,
109 dns_rfc2136=False,
110 dns_route53=False
111
112 )
113 STAGING_URI = "https://acme-staging-v02.api.letsencrypt.org/directory"
114
115 # The set of reasons for revoking a certificate is defined in RFC 5280 in
116 # section 5.3.1. The reasons that users are allowed to submit are restricted to
117 # those accepted by the ACME server implementation. They are listed in
118 # `letsencrypt.boulder.revocation.reasons.go`.
119 REVOCATION_REASONS = {
120 "unspecified": 0,
121 "keycompromise": 1,
122 "affiliationchanged": 3,
123 "superseded": 4,
124 "cessationofoperation": 5}
125
126 """Defaults for CLI flags and `.IConfig` attributes."""
127
128 QUIET_LOGGING_LEVEL = logging.WARNING
129 """Logging level to use in quiet mode."""
130
131 RENEWER_DEFAULTS = dict(
132 renewer_enabled="yes",
133 renew_before_expiry="30 days",
134 # This value should ensure that there is never a deployment delay by
135 # default.
136 deploy_before_expiry="99 years",
137 )
138 """Defaults for renewer script."""
139
140
141 ENHANCEMENTS = ["redirect", "ensure-http-header", "ocsp-stapling", "spdy"]
142 """List of possible :class:`certbot.interfaces.IInstaller`
143 enhancements.
144
145 List of expected options parameters:
146 - redirect: None
147 - ensure-http-header: name of header (i.e. Strict-Transport-Security)
148 - ocsp-stapling: certificate chain file path
149 - spdy: TODO
150
151 """
152
153 ARCHIVE_DIR = "archive"
154 """Archive directory, relative to `IConfig.config_dir`."""
155
156 CONFIG_DIRS_MODE = 0o755
157 """Directory mode for ``.IConfig.config_dir`` et al."""
158
159 ACCOUNTS_DIR = "accounts"
160 """Directory where all accounts are saved."""
161
162 LE_REUSE_SERVERS = {
163 'acme-staging-v02.api.letsencrypt.org/directory':
164 'acme-staging.api.letsencrypt.org/directory'
165 }
166 """Servers that can reuse accounts from other servers."""
167
168 BACKUP_DIR = "backups"
169 """Directory (relative to `IConfig.work_dir`) where backups are kept."""
170
171 CSR_DIR = "csr"
172 """See `.IConfig.csr_dir`."""
173
174 IN_PROGRESS_DIR = "IN_PROGRESS"
175 """Directory used before a permanent checkpoint is finalized (relative to
176 `IConfig.work_dir`)."""
177
178 KEY_DIR = "keys"
179 """Directory (relative to `IConfig.config_dir`) where keys are saved."""
180
181 LIVE_DIR = "live"
182 """Live directory, relative to `IConfig.config_dir`."""
183
184 TEMP_CHECKPOINT_DIR = "temp_checkpoint"
185 """Temporary checkpoint directory (relative to `IConfig.work_dir`)."""
186
187 RENEWAL_CONFIGS_DIR = "renewal"
188 """Renewal configs directory, relative to `IConfig.config_dir`."""
189
190 RENEWAL_HOOKS_DIR = "renewal-hooks"
191 """Basename of directory containing hooks to run with the renew command."""
192
193 RENEWAL_PRE_HOOKS_DIR = "pre"
194 """Basename of directory containing pre-hooks to run with the renew command."""
195
196 RENEWAL_DEPLOY_HOOKS_DIR = "deploy"
197 """Basename of directory containing deploy-hooks to run with the renew command."""
198
199 RENEWAL_POST_HOOKS_DIR = "post"
200 """Basename of directory containing post-hooks to run with the renew command."""
201
202 FORCE_INTERACTIVE_FLAG = "--force-interactive"
203 """Flag to disable TTY checking in IDisplay."""
204
205 EFF_SUBSCRIBE_URI = "https://supporters.eff.org/subscribe/certbot"
206 """EFF URI used to submit the e-mail address of users who opt-in."""
207
208 SSL_DHPARAMS_DEST = "ssl-dhparams.pem"
209 """Name of the ssl_dhparams file as saved in `IConfig.config_dir`."""
210
211 SSL_DHPARAMS_SRC = pkg_resources.resource_filename(
212 "certbot", "ssl-dhparams.pem")
213 """Path to the nginx ssl_dhparams file found in the Certbot distribution."""
214
215 UPDATED_SSL_DHPARAMS_DIGEST = ".updated-ssl-dhparams-pem-digest.txt"
216 """Name of the hash of the updated or informed ssl_dhparams as saved in `IConfig.config_dir`."""
217
218 ALL_SSL_DHPARAMS_HASHES = [
219 '9ba6429597aeed2d8617a7705b56e96d044f64b07971659382e426675105654b',
220 ]
221 """SHA256 hashes of the contents of all versions of SSL_DHPARAMS_SRC"""
222
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/certbot/constants.py b/certbot/constants.py
--- a/certbot/constants.py
+++ b/certbot/constants.py
@@ -160,6 +160,7 @@
"""Directory where all accounts are saved."""
LE_REUSE_SERVERS = {
+ 'acme-v02.api.letsencrypt.org/directory': 'acme-v01.api.letsencrypt.org/directory',
'acme-staging-v02.api.letsencrypt.org/directory':
'acme-staging.api.letsencrypt.org/directory'
}
|
{"golden_diff": "diff --git a/certbot/constants.py b/certbot/constants.py\n--- a/certbot/constants.py\n+++ b/certbot/constants.py\n@@ -160,6 +160,7 @@\n \"\"\"Directory where all accounts are saved.\"\"\"\n \n LE_REUSE_SERVERS = {\n+ 'acme-v02.api.letsencrypt.org/directory': 'acme-v01.api.letsencrypt.org/directory',\n 'acme-staging-v02.api.letsencrypt.org/directory':\n 'acme-staging.api.letsencrypt.org/directory'\n }\n", "issue": "Reuse ACMEv1 accounts for ACMEv2 production\nThe lines removed following the discussion at https://github.com/certbot/certbot/pull/5902#discussion_r192532446 need to be added back.\n", "before_files": [{"content": "\"\"\"Certbot constants.\"\"\"\nimport logging\nimport os\nimport pkg_resources\n\nfrom acme import challenges\n\n\nSETUPTOOLS_PLUGINS_ENTRY_POINT = \"certbot.plugins\"\n\"\"\"Setuptools entry point group name for plugins.\"\"\"\n\nOLD_SETUPTOOLS_PLUGINS_ENTRY_POINT = \"letsencrypt.plugins\"\n\"\"\"Plugins Setuptools entry point before rename.\"\"\"\n\nCLI_DEFAULTS = dict(\n config_files=[\n \"/etc/letsencrypt/cli.ini\",\n # http://freedesktop.org/wiki/Software/xdg-user-dirs/\n os.path.join(os.environ.get(\"XDG_CONFIG_HOME\", \"~/.config\"),\n \"letsencrypt\", \"cli.ini\"),\n ],\n\n # Main parser\n verbose_count=-int(logging.INFO / 10),\n text_mode=False,\n max_log_backups=1000,\n noninteractive_mode=False,\n force_interactive=False,\n domains=[],\n certname=None,\n dry_run=False,\n register_unsafely_without_email=False,\n update_registration=False,\n email=None,\n eff_email=None,\n reinstall=False,\n expand=False,\n renew_by_default=False,\n renew_with_new_domains=False,\n autorenew=True,\n allow_subset_of_names=False,\n tos=False,\n account=None,\n duplicate=False,\n os_packages_only=False,\n no_self_upgrade=False,\n no_bootstrap=False,\n quiet=False,\n staging=False,\n debug=False,\n debug_challenges=False,\n no_verify_ssl=False,\n tls_sni_01_port=challenges.TLSSNI01Response.PORT,\n tls_sni_01_address=\"\",\n http01_port=challenges.HTTP01Response.PORT,\n http01_address=\"\",\n break_my_certs=False,\n rsa_key_size=2048,\n must_staple=False,\n redirect=None,\n hsts=None,\n uir=None,\n staple=None,\n strict_permissions=False,\n pref_challs=[],\n validate_hooks=True,\n directory_hooks=True,\n reuse_key=False,\n disable_renew_updates=False,\n\n # Subparsers\n num=None,\n user_agent=None,\n user_agent_comment=None,\n csr=None,\n reason=0,\n delete_after_revoke=None,\n rollback_checkpoints=1,\n init=False,\n prepare=False,\n ifaces=None,\n\n # Path parsers\n auth_cert_path=\"./cert.pem\",\n auth_chain_path=\"./chain.pem\",\n key_path=None,\n config_dir=\"/etc/letsencrypt\",\n work_dir=\"/var/lib/letsencrypt\",\n logs_dir=\"/var/log/letsencrypt\",\n server=\"https://acme-v01.api.letsencrypt.org/directory\",\n\n # Plugins parsers\n configurator=None,\n authenticator=None,\n installer=None,\n apache=False,\n nginx=False,\n standalone=False,\n manual=False,\n webroot=False,\n dns_cloudflare=False,\n dns_cloudxns=False,\n dns_digitalocean=False,\n dns_dnsimple=False,\n dns_dnsmadeeasy=False,\n dns_google=False,\n dns_luadns=False,\n dns_nsone=False,\n dns_rfc2136=False,\n dns_route53=False\n\n)\nSTAGING_URI = \"https://acme-staging-v02.api.letsencrypt.org/directory\"\n\n# The set of reasons for revoking a certificate is defined in RFC 5280 in\n# section 5.3.1. The reasons that users are allowed to submit are restricted to\n# those accepted by the ACME server implementation. They are listed in\n# `letsencrypt.boulder.revocation.reasons.go`.\nREVOCATION_REASONS = {\n \"unspecified\": 0,\n \"keycompromise\": 1,\n \"affiliationchanged\": 3,\n \"superseded\": 4,\n \"cessationofoperation\": 5}\n\n\"\"\"Defaults for CLI flags and `.IConfig` attributes.\"\"\"\n\nQUIET_LOGGING_LEVEL = logging.WARNING\n\"\"\"Logging level to use in quiet mode.\"\"\"\n\nRENEWER_DEFAULTS = dict(\n renewer_enabled=\"yes\",\n renew_before_expiry=\"30 days\",\n # This value should ensure that there is never a deployment delay by\n # default.\n deploy_before_expiry=\"99 years\",\n)\n\"\"\"Defaults for renewer script.\"\"\"\n\n\nENHANCEMENTS = [\"redirect\", \"ensure-http-header\", \"ocsp-stapling\", \"spdy\"]\n\"\"\"List of possible :class:`certbot.interfaces.IInstaller`\nenhancements.\n\nList of expected options parameters:\n- redirect: None\n- ensure-http-header: name of header (i.e. Strict-Transport-Security)\n- ocsp-stapling: certificate chain file path\n- spdy: TODO\n\n\"\"\"\n\nARCHIVE_DIR = \"archive\"\n\"\"\"Archive directory, relative to `IConfig.config_dir`.\"\"\"\n\nCONFIG_DIRS_MODE = 0o755\n\"\"\"Directory mode for ``.IConfig.config_dir`` et al.\"\"\"\n\nACCOUNTS_DIR = \"accounts\"\n\"\"\"Directory where all accounts are saved.\"\"\"\n\nLE_REUSE_SERVERS = {\n 'acme-staging-v02.api.letsencrypt.org/directory':\n 'acme-staging.api.letsencrypt.org/directory'\n}\n\"\"\"Servers that can reuse accounts from other servers.\"\"\"\n\nBACKUP_DIR = \"backups\"\n\"\"\"Directory (relative to `IConfig.work_dir`) where backups are kept.\"\"\"\n\nCSR_DIR = \"csr\"\n\"\"\"See `.IConfig.csr_dir`.\"\"\"\n\nIN_PROGRESS_DIR = \"IN_PROGRESS\"\n\"\"\"Directory used before a permanent checkpoint is finalized (relative to\n`IConfig.work_dir`).\"\"\"\n\nKEY_DIR = \"keys\"\n\"\"\"Directory (relative to `IConfig.config_dir`) where keys are saved.\"\"\"\n\nLIVE_DIR = \"live\"\n\"\"\"Live directory, relative to `IConfig.config_dir`.\"\"\"\n\nTEMP_CHECKPOINT_DIR = \"temp_checkpoint\"\n\"\"\"Temporary checkpoint directory (relative to `IConfig.work_dir`).\"\"\"\n\nRENEWAL_CONFIGS_DIR = \"renewal\"\n\"\"\"Renewal configs directory, relative to `IConfig.config_dir`.\"\"\"\n\nRENEWAL_HOOKS_DIR = \"renewal-hooks\"\n\"\"\"Basename of directory containing hooks to run with the renew command.\"\"\"\n\nRENEWAL_PRE_HOOKS_DIR = \"pre\"\n\"\"\"Basename of directory containing pre-hooks to run with the renew command.\"\"\"\n\nRENEWAL_DEPLOY_HOOKS_DIR = \"deploy\"\n\"\"\"Basename of directory containing deploy-hooks to run with the renew command.\"\"\"\n\nRENEWAL_POST_HOOKS_DIR = \"post\"\n\"\"\"Basename of directory containing post-hooks to run with the renew command.\"\"\"\n\nFORCE_INTERACTIVE_FLAG = \"--force-interactive\"\n\"\"\"Flag to disable TTY checking in IDisplay.\"\"\"\n\nEFF_SUBSCRIBE_URI = \"https://supporters.eff.org/subscribe/certbot\"\n\"\"\"EFF URI used to submit the e-mail address of users who opt-in.\"\"\"\n\nSSL_DHPARAMS_DEST = \"ssl-dhparams.pem\"\n\"\"\"Name of the ssl_dhparams file as saved in `IConfig.config_dir`.\"\"\"\n\nSSL_DHPARAMS_SRC = pkg_resources.resource_filename(\n \"certbot\", \"ssl-dhparams.pem\")\n\"\"\"Path to the nginx ssl_dhparams file found in the Certbot distribution.\"\"\"\n\nUPDATED_SSL_DHPARAMS_DIGEST = \".updated-ssl-dhparams-pem-digest.txt\"\n\"\"\"Name of the hash of the updated or informed ssl_dhparams as saved in `IConfig.config_dir`.\"\"\"\n\nALL_SSL_DHPARAMS_HASHES = [\n '9ba6429597aeed2d8617a7705b56e96d044f64b07971659382e426675105654b',\n]\n\"\"\"SHA256 hashes of the contents of all versions of SSL_DHPARAMS_SRC\"\"\"\n", "path": "certbot/constants.py"}], "after_files": [{"content": "\"\"\"Certbot constants.\"\"\"\nimport logging\nimport os\nimport pkg_resources\n\nfrom acme import challenges\n\n\nSETUPTOOLS_PLUGINS_ENTRY_POINT = \"certbot.plugins\"\n\"\"\"Setuptools entry point group name for plugins.\"\"\"\n\nOLD_SETUPTOOLS_PLUGINS_ENTRY_POINT = \"letsencrypt.plugins\"\n\"\"\"Plugins Setuptools entry point before rename.\"\"\"\n\nCLI_DEFAULTS = dict(\n config_files=[\n \"/etc/letsencrypt/cli.ini\",\n # http://freedesktop.org/wiki/Software/xdg-user-dirs/\n os.path.join(os.environ.get(\"XDG_CONFIG_HOME\", \"~/.config\"),\n \"letsencrypt\", \"cli.ini\"),\n ],\n\n # Main parser\n verbose_count=-int(logging.INFO / 10),\n text_mode=False,\n max_log_backups=1000,\n noninteractive_mode=False,\n force_interactive=False,\n domains=[],\n certname=None,\n dry_run=False,\n register_unsafely_without_email=False,\n update_registration=False,\n email=None,\n eff_email=None,\n reinstall=False,\n expand=False,\n renew_by_default=False,\n renew_with_new_domains=False,\n autorenew=True,\n allow_subset_of_names=False,\n tos=False,\n account=None,\n duplicate=False,\n os_packages_only=False,\n no_self_upgrade=False,\n no_bootstrap=False,\n quiet=False,\n staging=False,\n debug=False,\n debug_challenges=False,\n no_verify_ssl=False,\n tls_sni_01_port=challenges.TLSSNI01Response.PORT,\n tls_sni_01_address=\"\",\n http01_port=challenges.HTTP01Response.PORT,\n http01_address=\"\",\n break_my_certs=False,\n rsa_key_size=2048,\n must_staple=False,\n redirect=None,\n hsts=None,\n uir=None,\n staple=None,\n strict_permissions=False,\n pref_challs=[],\n validate_hooks=True,\n directory_hooks=True,\n reuse_key=False,\n disable_renew_updates=False,\n\n # Subparsers\n num=None,\n user_agent=None,\n user_agent_comment=None,\n csr=None,\n reason=0,\n delete_after_revoke=None,\n rollback_checkpoints=1,\n init=False,\n prepare=False,\n ifaces=None,\n\n # Path parsers\n auth_cert_path=\"./cert.pem\",\n auth_chain_path=\"./chain.pem\",\n key_path=None,\n config_dir=\"/etc/letsencrypt\",\n work_dir=\"/var/lib/letsencrypt\",\n logs_dir=\"/var/log/letsencrypt\",\n server=\"https://acme-v01.api.letsencrypt.org/directory\",\n\n # Plugins parsers\n configurator=None,\n authenticator=None,\n installer=None,\n apache=False,\n nginx=False,\n standalone=False,\n manual=False,\n webroot=False,\n dns_cloudflare=False,\n dns_cloudxns=False,\n dns_digitalocean=False,\n dns_dnsimple=False,\n dns_dnsmadeeasy=False,\n dns_google=False,\n dns_luadns=False,\n dns_nsone=False,\n dns_rfc2136=False,\n dns_route53=False\n\n)\nSTAGING_URI = \"https://acme-staging-v02.api.letsencrypt.org/directory\"\n\n# The set of reasons for revoking a certificate is defined in RFC 5280 in\n# section 5.3.1. The reasons that users are allowed to submit are restricted to\n# those accepted by the ACME server implementation. They are listed in\n# `letsencrypt.boulder.revocation.reasons.go`.\nREVOCATION_REASONS = {\n \"unspecified\": 0,\n \"keycompromise\": 1,\n \"affiliationchanged\": 3,\n \"superseded\": 4,\n \"cessationofoperation\": 5}\n\n\"\"\"Defaults for CLI flags and `.IConfig` attributes.\"\"\"\n\nQUIET_LOGGING_LEVEL = logging.WARNING\n\"\"\"Logging level to use in quiet mode.\"\"\"\n\nRENEWER_DEFAULTS = dict(\n renewer_enabled=\"yes\",\n renew_before_expiry=\"30 days\",\n # This value should ensure that there is never a deployment delay by\n # default.\n deploy_before_expiry=\"99 years\",\n)\n\"\"\"Defaults for renewer script.\"\"\"\n\n\nENHANCEMENTS = [\"redirect\", \"ensure-http-header\", \"ocsp-stapling\", \"spdy\"]\n\"\"\"List of possible :class:`certbot.interfaces.IInstaller`\nenhancements.\n\nList of expected options parameters:\n- redirect: None\n- ensure-http-header: name of header (i.e. Strict-Transport-Security)\n- ocsp-stapling: certificate chain file path\n- spdy: TODO\n\n\"\"\"\n\nARCHIVE_DIR = \"archive\"\n\"\"\"Archive directory, relative to `IConfig.config_dir`.\"\"\"\n\nCONFIG_DIRS_MODE = 0o755\n\"\"\"Directory mode for ``.IConfig.config_dir`` et al.\"\"\"\n\nACCOUNTS_DIR = \"accounts\"\n\"\"\"Directory where all accounts are saved.\"\"\"\n\nLE_REUSE_SERVERS = {\n 'acme-v02.api.letsencrypt.org/directory': 'acme-v01.api.letsencrypt.org/directory',\n 'acme-staging-v02.api.letsencrypt.org/directory':\n 'acme-staging.api.letsencrypt.org/directory'\n}\n\"\"\"Servers that can reuse accounts from other servers.\"\"\"\n\nBACKUP_DIR = \"backups\"\n\"\"\"Directory (relative to `IConfig.work_dir`) where backups are kept.\"\"\"\n\nCSR_DIR = \"csr\"\n\"\"\"See `.IConfig.csr_dir`.\"\"\"\n\nIN_PROGRESS_DIR = \"IN_PROGRESS\"\n\"\"\"Directory used before a permanent checkpoint is finalized (relative to\n`IConfig.work_dir`).\"\"\"\n\nKEY_DIR = \"keys\"\n\"\"\"Directory (relative to `IConfig.config_dir`) where keys are saved.\"\"\"\n\nLIVE_DIR = \"live\"\n\"\"\"Live directory, relative to `IConfig.config_dir`.\"\"\"\n\nTEMP_CHECKPOINT_DIR = \"temp_checkpoint\"\n\"\"\"Temporary checkpoint directory (relative to `IConfig.work_dir`).\"\"\"\n\nRENEWAL_CONFIGS_DIR = \"renewal\"\n\"\"\"Renewal configs directory, relative to `IConfig.config_dir`.\"\"\"\n\nRENEWAL_HOOKS_DIR = \"renewal-hooks\"\n\"\"\"Basename of directory containing hooks to run with the renew command.\"\"\"\n\nRENEWAL_PRE_HOOKS_DIR = \"pre\"\n\"\"\"Basename of directory containing pre-hooks to run with the renew command.\"\"\"\n\nRENEWAL_DEPLOY_HOOKS_DIR = \"deploy\"\n\"\"\"Basename of directory containing deploy-hooks to run with the renew command.\"\"\"\n\nRENEWAL_POST_HOOKS_DIR = \"post\"\n\"\"\"Basename of directory containing post-hooks to run with the renew command.\"\"\"\n\nFORCE_INTERACTIVE_FLAG = \"--force-interactive\"\n\"\"\"Flag to disable TTY checking in IDisplay.\"\"\"\n\nEFF_SUBSCRIBE_URI = \"https://supporters.eff.org/subscribe/certbot\"\n\"\"\"EFF URI used to submit the e-mail address of users who opt-in.\"\"\"\n\nSSL_DHPARAMS_DEST = \"ssl-dhparams.pem\"\n\"\"\"Name of the ssl_dhparams file as saved in `IConfig.config_dir`.\"\"\"\n\nSSL_DHPARAMS_SRC = pkg_resources.resource_filename(\n \"certbot\", \"ssl-dhparams.pem\")\n\"\"\"Path to the nginx ssl_dhparams file found in the Certbot distribution.\"\"\"\n\nUPDATED_SSL_DHPARAMS_DIGEST = \".updated-ssl-dhparams-pem-digest.txt\"\n\"\"\"Name of the hash of the updated or informed ssl_dhparams as saved in `IConfig.config_dir`.\"\"\"\n\nALL_SSL_DHPARAMS_HASHES = [\n '9ba6429597aeed2d8617a7705b56e96d044f64b07971659382e426675105654b',\n]\n\"\"\"SHA256 hashes of the contents of all versions of SSL_DHPARAMS_SRC\"\"\"\n", "path": "certbot/constants.py"}]}
| 2,554 | 123 |
gh_patches_debug_43065
|
rasdani/github-patches
|
git_diff
|
numba__numba-1692
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Cannot use cffi.from_buffer when explicitly passing signature to JIT
I was experimenting with passing arrays to CFFI functions (see https://github.com/numba/numba/pull/1464) but found that it only works when not specifying the signature. This change makes the test fail in 0.23.1:
``` diff
diff --git a/numba/tests/test_cffi.py b/numba/tests/test_cffi.py
index ca304f9..fa1752d 100644
--- a/numba/tests/test_cffi.py
+++ b/numba/tests/test_cffi.py
@@ -82,7 +82,9 @@ class TestCFFI(TestCase):
np.testing.assert_equal(pyfunc(x), cfunc(x))
def test_pass_numpy_array_float32(self):
- self._test_pass_numpy_array(vector_sin_float32, np.float32)
+ x = np.arange(10).astype(np.float32)
+ cfunc = jit('float32[:](float32[:])', nopython=True)(vector_sin_float32)
+ np.testing.assert_equal(vector_sin_float32(x), cfunc(x))
def test_pass_numpy_array_float64(self):
self._test_pass_numpy_array(vector_sin_float64, np.float64)
```
I was not able to try it with latest numba since it requires llvmlite 0.9.0 and it's not available in conda yet.
The workaround for me has been to provide a separate function without signature, but it's a pity because it is preventing me to provide different specializations for CFFI functions (see https://github.com/Pybonacci/cffi_test/).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `numba/typing/cffi_utils.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """
3 Support for CFFI. Allows checking whether objects are CFFI functions and
4 obtaining the pointer and numba signature.
5 """
6 from __future__ import print_function, division, absolute_import
7
8 from types import BuiltinFunctionType
9 import ctypes
10
11 from numba import types
12 from . import templates
13
14 try:
15 import cffi
16 ffi = cffi.FFI()
17 except ImportError:
18 ffi = None
19
20 SUPPORTED = ffi is not None
21 _ool_func_types = {}
22 _ool_func_ptr = {}
23 _ffi_instances = set()
24
25
26 def is_ffi_instance(obj):
27 # Compiled FFI modules have a member, ffi, which is an instance of
28 # CompiledFFI, which behaves similarly to an instance of cffi.FFI. In
29 # order to simplify handling a CompiledFFI object, we treat them as
30 # if they're cffi.FFI instances for typing and lowering purposes.
31 try:
32 return obj in _ffi_instances or isinstance(obj, cffi.FFI)
33 except TypeError: # Unhashable type possible
34 return False
35
36 def is_cffi_func(obj):
37 """Check whether the obj is a CFFI function"""
38 try:
39 return ffi.typeof(obj).kind == 'function'
40 except TypeError:
41 try:
42 return obj in _ool_func_types
43 except:
44 return False
45
46 def get_pointer(cffi_func):
47 """
48 Get a pointer to the underlying function for a CFFI function as an
49 integer.
50 """
51 if cffi_func in _ool_func_ptr:
52 return _ool_func_ptr[cffi_func]
53 return int(ffi.cast("uintptr_t", cffi_func))
54
55
56 _cached_type_map = None
57
58 def _type_map():
59 """
60 Lazily compute type map, as calling ffi.typeof() involves costly
61 parsing of C code...
62 """
63 global _cached_type_map
64 if _cached_type_map is None:
65 _cached_type_map = {
66 ffi.typeof('char') : types.int8,
67 ffi.typeof('short') : types.short,
68 ffi.typeof('int') : types.intc,
69 ffi.typeof('long') : types.long_,
70 ffi.typeof('long long') : types.longlong,
71 ffi.typeof('unsigned char') : types.uchar,
72 ffi.typeof('unsigned short') : types.ushort,
73 ffi.typeof('unsigned int') : types.uintc,
74 ffi.typeof('unsigned long') : types.ulong,
75 ffi.typeof('unsigned long long') : types.ulonglong,
76 ffi.typeof('int8_t') : types.char,
77 ffi.typeof('uint8_t') : types.uchar,
78 ffi.typeof('int16_t') : types.short,
79 ffi.typeof('uint16_t') : types.ushort,
80 ffi.typeof('int32_t') : types.intc,
81 ffi.typeof('uint32_t') : types.uintc,
82 ffi.typeof('int64_t') : types.longlong,
83 ffi.typeof('uint64_t') : types.ulonglong,
84 ffi.typeof('float') : types.float_,
85 ffi.typeof('double') : types.double,
86 ffi.typeof('char *') : types.voidptr,
87 ffi.typeof('void *') : types.voidptr,
88 ffi.typeof('uint8_t *') : types.CPointer(types.uint8),
89 ffi.typeof('float *') : types.CPointer(types.float32),
90 ffi.typeof('double *') : types.CPointer(types.float64),
91 ffi.typeof('ssize_t') : types.intp,
92 ffi.typeof('size_t') : types.uintp,
93 ffi.typeof('void') : types.void,
94 }
95 return _cached_type_map
96
97
98 def map_type(cffi_type):
99 """
100 Map CFFI type to numba type.
101 """
102 kind = getattr(cffi_type, 'kind', '')
103 if kind in ('struct', 'union'):
104 raise TypeError("No support for struct or union")
105 elif kind == 'function':
106 if cffi_type.ellipsis:
107 raise TypeError("vararg function is not supported")
108 restype = map_type(cffi_type.result)
109 argtypes = [map_type(arg) for arg in cffi_type.args]
110 return templates.signature(restype, *argtypes)
111 else:
112 result = _type_map().get(cffi_type)
113
114 if result is None:
115 raise TypeError(cffi_type)
116
117 return result
118
119
120 def make_function_type(cffi_func):
121 """
122 Return a Numba type for the given CFFI function pointer.
123 """
124 cffi_type = _ool_func_types.get(cffi_func) or ffi.typeof(cffi_func)
125 sig = map_type(cffi_type)
126 return types.ExternalFunctionPointer(sig, get_pointer=get_pointer)
127
128
129 class ExternCFunction(types.ExternalFunction):
130 # XXX unused?
131
132 def __init__(self, symbol, cstring):
133 """Parse C function declaration/signature"""
134 parser = cffi.cparser.Parser()
135 rft = parser.parse_type(cstring) # "RawFunctionType"
136 type_map = _type_map()
137 self.restype = type_map[rft.result.build_backend_type(ffi, None)]
138 self.argtypes = [type_map[arg.build_backend_type(ffi, None)] for arg in rft.args]
139 signature = templates.signature(self.restype, *self.argtypes)
140 super(ExternCFunction, self).__init__(symbol, signature)
141
142
143 registry = templates.Registry()
144
145 @registry.register
146 class FFI_from_buffer(templates.AbstractTemplate):
147 key = 'ffi.from_buffer'
148
149 def generic(self, args, kws):
150 if kws or (len(args) != 1):
151 return
152 [ary] = args
153 if not (isinstance(ary, types.Array) and ary.layout in ('C', 'F')):
154 return
155 ptr = types.CPointer(ary.dtype)
156 return templates.signature(ptr, ary)
157
158 @registry.register_attr
159 class FFIAttribute(templates.AttributeTemplate):
160 key = types.ffi
161
162 def resolve_from_buffer(self, ffi):
163 return types.BoundFunction(FFI_from_buffer, types.ffi)
164
165
166 def register_module(mod):
167 """
168 Add typing for all functions in an out-of-line CFFI module to the typemap
169 """
170 for f in dir(mod.lib):
171 f = getattr(mod.lib, f)
172 if isinstance(f, BuiltinFunctionType):
173 _ool_func_types[f] = mod.ffi.typeof(f)
174 addr = mod.ffi.addressof(mod.lib, f.__name__)
175 _ool_func_ptr[f] = int(mod.ffi.cast("uintptr_t", addr))
176 _ffi_instances.add(mod.ffi)
177
```
Path: `numba/targets/cffiimpl.py`
Content:
```
1 """
2 Implementation of some CFFI functions
3 """
4
5 from __future__ import print_function, absolute_import, division
6
7 from numba.targets.imputils import Registry
8 from numba import types
9 from . import arrayobj
10
11 registry = Registry()
12
13 @registry.lower('ffi.from_buffer', types.Array)
14 def from_buffer(context, builder, sig, args):
15 assert len(sig.args) == 1
16 assert len(args) == 1
17 [fromty] = sig.args
18 [val] = args
19 # Type inference should have prevented passing a buffer from an
20 # array to a pointer of the wrong type
21 assert fromty.dtype == sig.return_type.dtype
22 ary = arrayobj.make_array(fromty)(context, builder, val)
23 return ary.data
24
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/numba/targets/cffiimpl.py b/numba/targets/cffiimpl.py
--- a/numba/targets/cffiimpl.py
+++ b/numba/targets/cffiimpl.py
@@ -10,7 +10,7 @@
registry = Registry()
[email protected]('ffi.from_buffer', types.Array)
[email protected]('ffi.from_buffer', types.Buffer)
def from_buffer(context, builder, sig, args):
assert len(sig.args) == 1
assert len(args) == 1
diff --git a/numba/typing/cffi_utils.py b/numba/typing/cffi_utils.py
--- a/numba/typing/cffi_utils.py
+++ b/numba/typing/cffi_utils.py
@@ -9,6 +9,7 @@
import ctypes
from numba import types
+from numba.errors import TypingError
from . import templates
try:
@@ -63,7 +64,7 @@
global _cached_type_map
if _cached_type_map is None:
_cached_type_map = {
- ffi.typeof('char') : types.int8,
+ ffi.typeof('char') : types.char,
ffi.typeof('short') : types.short,
ffi.typeof('int') : types.intc,
ffi.typeof('long') : types.long_,
@@ -83,11 +84,6 @@
ffi.typeof('uint64_t') : types.ulonglong,
ffi.typeof('float') : types.float_,
ffi.typeof('double') : types.double,
- ffi.typeof('char *') : types.voidptr,
- ffi.typeof('void *') : types.voidptr,
- ffi.typeof('uint8_t *') : types.CPointer(types.uint8),
- ffi.typeof('float *') : types.CPointer(types.float32),
- ffi.typeof('double *') : types.CPointer(types.float64),
ffi.typeof('ssize_t') : types.intp,
ffi.typeof('size_t') : types.uintp,
ffi.typeof('void') : types.void,
@@ -101,20 +97,24 @@
"""
kind = getattr(cffi_type, 'kind', '')
if kind in ('struct', 'union'):
- raise TypeError("No support for struct or union")
+ raise TypeError("No support for CFFI %s" % (kind,))
elif kind == 'function':
if cffi_type.ellipsis:
raise TypeError("vararg function is not supported")
restype = map_type(cffi_type.result)
argtypes = [map_type(arg) for arg in cffi_type.args]
return templates.signature(restype, *argtypes)
+ elif kind == 'pointer':
+ pointee = cffi_type.item
+ if pointee.kind == 'void':
+ return types.voidptr
+ else:
+ return types.CPointer(map_type(pointee))
else:
result = _type_map().get(cffi_type)
-
- if result is None:
- raise TypeError(cffi_type)
-
- return result
+ if result is None:
+ raise TypeError(cffi_type)
+ return result
def make_function_type(cffi_func):
@@ -147,11 +147,15 @@
key = 'ffi.from_buffer'
def generic(self, args, kws):
- if kws or (len(args) != 1):
+ if kws or len(args) != 1:
return
[ary] = args
- if not (isinstance(ary, types.Array) and ary.layout in ('C', 'F')):
- return
+ if not isinstance(ary, types.Buffer):
+ raise TypingError("from_buffer() expected a buffer object, got %s"
+ % (ary,))
+ if ary.layout not in ('C', 'F'):
+ raise TypingError("from_buffer() unsupported on non-contiguous buffers (got %s)"
+ % (ary,))
ptr = types.CPointer(ary.dtype)
return templates.signature(ptr, ary)
|
{"golden_diff": "diff --git a/numba/targets/cffiimpl.py b/numba/targets/cffiimpl.py\n--- a/numba/targets/cffiimpl.py\n+++ b/numba/targets/cffiimpl.py\n@@ -10,7 +10,7 @@\n \n registry = Registry()\n \[email protected]('ffi.from_buffer', types.Array)\[email protected]('ffi.from_buffer', types.Buffer)\n def from_buffer(context, builder, sig, args):\n assert len(sig.args) == 1\n assert len(args) == 1\ndiff --git a/numba/typing/cffi_utils.py b/numba/typing/cffi_utils.py\n--- a/numba/typing/cffi_utils.py\n+++ b/numba/typing/cffi_utils.py\n@@ -9,6 +9,7 @@\n import ctypes\n \n from numba import types\n+from numba.errors import TypingError\n from . import templates\n \n try:\n@@ -63,7 +64,7 @@\n global _cached_type_map\n if _cached_type_map is None:\n _cached_type_map = {\n- ffi.typeof('char') : types.int8,\n+ ffi.typeof('char') : types.char,\n ffi.typeof('short') : types.short,\n ffi.typeof('int') : types.intc,\n ffi.typeof('long') : types.long_,\n@@ -83,11 +84,6 @@\n ffi.typeof('uint64_t') : types.ulonglong,\n ffi.typeof('float') : types.float_,\n ffi.typeof('double') : types.double,\n- ffi.typeof('char *') : types.voidptr,\n- ffi.typeof('void *') : types.voidptr,\n- ffi.typeof('uint8_t *') : types.CPointer(types.uint8),\n- ffi.typeof('float *') : types.CPointer(types.float32),\n- ffi.typeof('double *') : types.CPointer(types.float64),\n ffi.typeof('ssize_t') : types.intp,\n ffi.typeof('size_t') : types.uintp,\n ffi.typeof('void') : types.void,\n@@ -101,20 +97,24 @@\n \"\"\"\n kind = getattr(cffi_type, 'kind', '')\n if kind in ('struct', 'union'):\n- raise TypeError(\"No support for struct or union\")\n+ raise TypeError(\"No support for CFFI %s\" % (kind,))\n elif kind == 'function':\n if cffi_type.ellipsis:\n raise TypeError(\"vararg function is not supported\")\n restype = map_type(cffi_type.result)\n argtypes = [map_type(arg) for arg in cffi_type.args]\n return templates.signature(restype, *argtypes)\n+ elif kind == 'pointer':\n+ pointee = cffi_type.item\n+ if pointee.kind == 'void':\n+ return types.voidptr\n+ else:\n+ return types.CPointer(map_type(pointee))\n else:\n result = _type_map().get(cffi_type)\n-\n- if result is None:\n- raise TypeError(cffi_type)\n-\n- return result\n+ if result is None:\n+ raise TypeError(cffi_type)\n+ return result\n \n \n def make_function_type(cffi_func):\n@@ -147,11 +147,15 @@\n key = 'ffi.from_buffer'\n \n def generic(self, args, kws):\n- if kws or (len(args) != 1):\n+ if kws or len(args) != 1:\n return\n [ary] = args\n- if not (isinstance(ary, types.Array) and ary.layout in ('C', 'F')):\n- return\n+ if not isinstance(ary, types.Buffer):\n+ raise TypingError(\"from_buffer() expected a buffer object, got %s\"\n+ % (ary,))\n+ if ary.layout not in ('C', 'F'):\n+ raise TypingError(\"from_buffer() unsupported on non-contiguous buffers (got %s)\"\n+ % (ary,))\n ptr = types.CPointer(ary.dtype)\n return templates.signature(ptr, ary)\n", "issue": "Cannot use cffi.from_buffer when explicitly passing signature to JIT\nI was experimenting with passing arrays to CFFI functions (see https://github.com/numba/numba/pull/1464) but found that it only works when not specifying the signature. This change makes the test fail in 0.23.1:\n\n``` diff\ndiff --git a/numba/tests/test_cffi.py b/numba/tests/test_cffi.py\nindex ca304f9..fa1752d 100644\n--- a/numba/tests/test_cffi.py\n+++ b/numba/tests/test_cffi.py\n@@ -82,7 +82,9 @@ class TestCFFI(TestCase):\n np.testing.assert_equal(pyfunc(x), cfunc(x))\n\n def test_pass_numpy_array_float32(self):\n- self._test_pass_numpy_array(vector_sin_float32, np.float32)\n+ x = np.arange(10).astype(np.float32)\n+ cfunc = jit('float32[:](float32[:])', nopython=True)(vector_sin_float32)\n+ np.testing.assert_equal(vector_sin_float32(x), cfunc(x))\n\n def test_pass_numpy_array_float64(self):\n self._test_pass_numpy_array(vector_sin_float64, np.float64)\n```\n\nI was not able to try it with latest numba since it requires llvmlite 0.9.0 and it's not available in conda yet.\n\nThe workaround for me has been to provide a separate function without signature, but it's a pity because it is preventing me to provide different specializations for CFFI functions (see https://github.com/Pybonacci/cffi_test/).\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nSupport for CFFI. Allows checking whether objects are CFFI functions and\nobtaining the pointer and numba signature.\n\"\"\"\nfrom __future__ import print_function, division, absolute_import\n\nfrom types import BuiltinFunctionType\nimport ctypes\n\nfrom numba import types\nfrom . import templates\n\ntry:\n import cffi\n ffi = cffi.FFI()\nexcept ImportError:\n ffi = None\n\nSUPPORTED = ffi is not None\n_ool_func_types = {}\n_ool_func_ptr = {}\n_ffi_instances = set()\n\n\ndef is_ffi_instance(obj):\n # Compiled FFI modules have a member, ffi, which is an instance of\n # CompiledFFI, which behaves similarly to an instance of cffi.FFI. In\n # order to simplify handling a CompiledFFI object, we treat them as\n # if they're cffi.FFI instances for typing and lowering purposes.\n try:\n return obj in _ffi_instances or isinstance(obj, cffi.FFI)\n except TypeError: # Unhashable type possible\n return False\n\ndef is_cffi_func(obj):\n \"\"\"Check whether the obj is a CFFI function\"\"\"\n try:\n return ffi.typeof(obj).kind == 'function'\n except TypeError:\n try:\n return obj in _ool_func_types\n except:\n return False\n\ndef get_pointer(cffi_func):\n \"\"\"\n Get a pointer to the underlying function for a CFFI function as an\n integer.\n \"\"\"\n if cffi_func in _ool_func_ptr:\n return _ool_func_ptr[cffi_func]\n return int(ffi.cast(\"uintptr_t\", cffi_func))\n\n\n_cached_type_map = None\n\ndef _type_map():\n \"\"\"\n Lazily compute type map, as calling ffi.typeof() involves costly\n parsing of C code...\n \"\"\"\n global _cached_type_map\n if _cached_type_map is None:\n _cached_type_map = {\n ffi.typeof('char') : types.int8,\n ffi.typeof('short') : types.short,\n ffi.typeof('int') : types.intc,\n ffi.typeof('long') : types.long_,\n ffi.typeof('long long') : types.longlong,\n ffi.typeof('unsigned char') : types.uchar,\n ffi.typeof('unsigned short') : types.ushort,\n ffi.typeof('unsigned int') : types.uintc,\n ffi.typeof('unsigned long') : types.ulong,\n ffi.typeof('unsigned long long') : types.ulonglong,\n ffi.typeof('int8_t') : types.char,\n ffi.typeof('uint8_t') : types.uchar,\n ffi.typeof('int16_t') : types.short,\n ffi.typeof('uint16_t') : types.ushort,\n ffi.typeof('int32_t') : types.intc,\n ffi.typeof('uint32_t') : types.uintc,\n ffi.typeof('int64_t') : types.longlong,\n ffi.typeof('uint64_t') : types.ulonglong,\n ffi.typeof('float') : types.float_,\n ffi.typeof('double') : types.double,\n ffi.typeof('char *') : types.voidptr,\n ffi.typeof('void *') : types.voidptr,\n ffi.typeof('uint8_t *') : types.CPointer(types.uint8),\n ffi.typeof('float *') : types.CPointer(types.float32),\n ffi.typeof('double *') : types.CPointer(types.float64),\n ffi.typeof('ssize_t') : types.intp,\n ffi.typeof('size_t') : types.uintp,\n ffi.typeof('void') : types.void,\n }\n return _cached_type_map\n\n\ndef map_type(cffi_type):\n \"\"\"\n Map CFFI type to numba type.\n \"\"\"\n kind = getattr(cffi_type, 'kind', '')\n if kind in ('struct', 'union'):\n raise TypeError(\"No support for struct or union\")\n elif kind == 'function':\n if cffi_type.ellipsis:\n raise TypeError(\"vararg function is not supported\")\n restype = map_type(cffi_type.result)\n argtypes = [map_type(arg) for arg in cffi_type.args]\n return templates.signature(restype, *argtypes)\n else:\n result = _type_map().get(cffi_type)\n\n if result is None:\n raise TypeError(cffi_type)\n\n return result\n\n\ndef make_function_type(cffi_func):\n \"\"\"\n Return a Numba type for the given CFFI function pointer.\n \"\"\"\n cffi_type = _ool_func_types.get(cffi_func) or ffi.typeof(cffi_func)\n sig = map_type(cffi_type)\n return types.ExternalFunctionPointer(sig, get_pointer=get_pointer)\n\n\nclass ExternCFunction(types.ExternalFunction):\n # XXX unused?\n\n def __init__(self, symbol, cstring):\n \"\"\"Parse C function declaration/signature\"\"\"\n parser = cffi.cparser.Parser()\n rft = parser.parse_type(cstring) # \"RawFunctionType\"\n type_map = _type_map()\n self.restype = type_map[rft.result.build_backend_type(ffi, None)]\n self.argtypes = [type_map[arg.build_backend_type(ffi, None)] for arg in rft.args]\n signature = templates.signature(self.restype, *self.argtypes)\n super(ExternCFunction, self).__init__(symbol, signature)\n\n\nregistry = templates.Registry()\n\[email protected]\nclass FFI_from_buffer(templates.AbstractTemplate):\n key = 'ffi.from_buffer'\n\n def generic(self, args, kws):\n if kws or (len(args) != 1):\n return\n [ary] = args\n if not (isinstance(ary, types.Array) and ary.layout in ('C', 'F')):\n return\n ptr = types.CPointer(ary.dtype)\n return templates.signature(ptr, ary)\n\[email protected]_attr\nclass FFIAttribute(templates.AttributeTemplate):\n key = types.ffi\n\n def resolve_from_buffer(self, ffi):\n return types.BoundFunction(FFI_from_buffer, types.ffi)\n\n\ndef register_module(mod):\n \"\"\"\n Add typing for all functions in an out-of-line CFFI module to the typemap\n \"\"\"\n for f in dir(mod.lib):\n f = getattr(mod.lib, f)\n if isinstance(f, BuiltinFunctionType):\n _ool_func_types[f] = mod.ffi.typeof(f)\n addr = mod.ffi.addressof(mod.lib, f.__name__)\n _ool_func_ptr[f] = int(mod.ffi.cast(\"uintptr_t\", addr))\n _ffi_instances.add(mod.ffi)\n", "path": "numba/typing/cffi_utils.py"}, {"content": "\"\"\"\nImplementation of some CFFI functions\n\"\"\"\n\nfrom __future__ import print_function, absolute_import, division\n\nfrom numba.targets.imputils import Registry\nfrom numba import types\nfrom . import arrayobj\n\nregistry = Registry()\n\[email protected]('ffi.from_buffer', types.Array)\ndef from_buffer(context, builder, sig, args):\n assert len(sig.args) == 1\n assert len(args) == 1\n [fromty] = sig.args\n [val] = args\n # Type inference should have prevented passing a buffer from an\n # array to a pointer of the wrong type\n assert fromty.dtype == sig.return_type.dtype\n ary = arrayobj.make_array(fromty)(context, builder, val)\n return ary.data\n", "path": "numba/targets/cffiimpl.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nSupport for CFFI. Allows checking whether objects are CFFI functions and\nobtaining the pointer and numba signature.\n\"\"\"\nfrom __future__ import print_function, division, absolute_import\n\nfrom types import BuiltinFunctionType\nimport ctypes\n\nfrom numba import types\nfrom numba.errors import TypingError\nfrom . import templates\n\ntry:\n import cffi\n ffi = cffi.FFI()\nexcept ImportError:\n ffi = None\n\nSUPPORTED = ffi is not None\n_ool_func_types = {}\n_ool_func_ptr = {}\n_ffi_instances = set()\n\n\ndef is_ffi_instance(obj):\n # Compiled FFI modules have a member, ffi, which is an instance of\n # CompiledFFI, which behaves similarly to an instance of cffi.FFI. In\n # order to simplify handling a CompiledFFI object, we treat them as\n # if they're cffi.FFI instances for typing and lowering purposes.\n try:\n return obj in _ffi_instances or isinstance(obj, cffi.FFI)\n except TypeError: # Unhashable type possible\n return False\n\ndef is_cffi_func(obj):\n \"\"\"Check whether the obj is a CFFI function\"\"\"\n try:\n return ffi.typeof(obj).kind == 'function'\n except TypeError:\n try:\n return obj in _ool_func_types\n except:\n return False\n\ndef get_pointer(cffi_func):\n \"\"\"\n Get a pointer to the underlying function for a CFFI function as an\n integer.\n \"\"\"\n if cffi_func in _ool_func_ptr:\n return _ool_func_ptr[cffi_func]\n return int(ffi.cast(\"uintptr_t\", cffi_func))\n\n\n_cached_type_map = None\n\ndef _type_map():\n \"\"\"\n Lazily compute type map, as calling ffi.typeof() involves costly\n parsing of C code...\n \"\"\"\n global _cached_type_map\n if _cached_type_map is None:\n _cached_type_map = {\n ffi.typeof('char') : types.char,\n ffi.typeof('short') : types.short,\n ffi.typeof('int') : types.intc,\n ffi.typeof('long') : types.long_,\n ffi.typeof('long long') : types.longlong,\n ffi.typeof('unsigned char') : types.uchar,\n ffi.typeof('unsigned short') : types.ushort,\n ffi.typeof('unsigned int') : types.uintc,\n ffi.typeof('unsigned long') : types.ulong,\n ffi.typeof('unsigned long long') : types.ulonglong,\n ffi.typeof('int8_t') : types.char,\n ffi.typeof('uint8_t') : types.uchar,\n ffi.typeof('int16_t') : types.short,\n ffi.typeof('uint16_t') : types.ushort,\n ffi.typeof('int32_t') : types.intc,\n ffi.typeof('uint32_t') : types.uintc,\n ffi.typeof('int64_t') : types.longlong,\n ffi.typeof('uint64_t') : types.ulonglong,\n ffi.typeof('float') : types.float_,\n ffi.typeof('double') : types.double,\n ffi.typeof('ssize_t') : types.intp,\n ffi.typeof('size_t') : types.uintp,\n ffi.typeof('void') : types.void,\n }\n return _cached_type_map\n\n\ndef map_type(cffi_type):\n \"\"\"\n Map CFFI type to numba type.\n \"\"\"\n kind = getattr(cffi_type, 'kind', '')\n if kind in ('struct', 'union'):\n raise TypeError(\"No support for CFFI %s\" % (kind,))\n elif kind == 'function':\n if cffi_type.ellipsis:\n raise TypeError(\"vararg function is not supported\")\n restype = map_type(cffi_type.result)\n argtypes = [map_type(arg) for arg in cffi_type.args]\n return templates.signature(restype, *argtypes)\n elif kind == 'pointer':\n pointee = cffi_type.item\n if pointee.kind == 'void':\n return types.voidptr\n else:\n return types.CPointer(map_type(pointee))\n else:\n result = _type_map().get(cffi_type)\n if result is None:\n raise TypeError(cffi_type)\n return result\n\n\ndef make_function_type(cffi_func):\n \"\"\"\n Return a Numba type for the given CFFI function pointer.\n \"\"\"\n cffi_type = _ool_func_types.get(cffi_func) or ffi.typeof(cffi_func)\n sig = map_type(cffi_type)\n return types.ExternalFunctionPointer(sig, get_pointer=get_pointer)\n\n\nclass ExternCFunction(types.ExternalFunction):\n # XXX unused?\n\n def __init__(self, symbol, cstring):\n \"\"\"Parse C function declaration/signature\"\"\"\n parser = cffi.cparser.Parser()\n rft = parser.parse_type(cstring) # \"RawFunctionType\"\n type_map = _type_map()\n self.restype = type_map[rft.result.build_backend_type(ffi, None)]\n self.argtypes = [type_map[arg.build_backend_type(ffi, None)] for arg in rft.args]\n signature = templates.signature(self.restype, *self.argtypes)\n super(ExternCFunction, self).__init__(symbol, signature)\n\n\nregistry = templates.Registry()\n\[email protected]\nclass FFI_from_buffer(templates.AbstractTemplate):\n key = 'ffi.from_buffer'\n\n def generic(self, args, kws):\n if kws or len(args) != 1:\n return\n [ary] = args\n if not isinstance(ary, types.Buffer):\n raise TypingError(\"from_buffer() expected a buffer object, got %s\"\n % (ary,))\n if ary.layout not in ('C', 'F'):\n raise TypingError(\"from_buffer() unsupported on non-contiguous buffers (got %s)\"\n % (ary,))\n ptr = types.CPointer(ary.dtype)\n return templates.signature(ptr, ary)\n\[email protected]_attr\nclass FFIAttribute(templates.AttributeTemplate):\n key = types.ffi\n\n def resolve_from_buffer(self, ffi):\n return types.BoundFunction(FFI_from_buffer, types.ffi)\n\n\ndef register_module(mod):\n \"\"\"\n Add typing for all functions in an out-of-line CFFI module to the typemap\n \"\"\"\n for f in dir(mod.lib):\n f = getattr(mod.lib, f)\n if isinstance(f, BuiltinFunctionType):\n _ool_func_types[f] = mod.ffi.typeof(f)\n addr = mod.ffi.addressof(mod.lib, f.__name__)\n _ool_func_ptr[f] = int(mod.ffi.cast(\"uintptr_t\", addr))\n _ffi_instances.add(mod.ffi)\n", "path": "numba/typing/cffi_utils.py"}, {"content": "\"\"\"\nImplementation of some CFFI functions\n\"\"\"\n\nfrom __future__ import print_function, absolute_import, division\n\nfrom numba.targets.imputils import Registry\nfrom numba import types\nfrom . import arrayobj\n\nregistry = Registry()\n\[email protected]('ffi.from_buffer', types.Buffer)\ndef from_buffer(context, builder, sig, args):\n assert len(sig.args) == 1\n assert len(args) == 1\n [fromty] = sig.args\n [val] = args\n # Type inference should have prevented passing a buffer from an\n # array to a pointer of the wrong type\n assert fromty.dtype == sig.return_type.dtype\n ary = arrayobj.make_array(fromty)(context, builder, val)\n return ary.data\n", "path": "numba/targets/cffiimpl.py"}]}
| 2,778 | 932 |
gh_patches_debug_63274
|
rasdani/github-patches
|
git_diff
|
Mailu__Mailu-2603
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Maximum number of connections from user+IP exceeded
Hi, we have a problem... :-)
We have changed the original value of "AUTH_RATELIMIT" to "AUTH_RATELIMIT=100/minute;6000/hour", but logs continue to say " Maximum number of connections from user+IP exceeded (mail_max_userip_connections=20)" while reading response from upstream..."
We have made docker-compose dow and docker-compose up -d, but without result.
How can we change the default limit set during the installation?
Thanks in advance.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `core/admin/mailu/internal/views/dovecot.py`
Content:
```
1 from mailu import models
2 from mailu.internal import internal
3 from flask import current_app as app
4
5 import flask
6 import socket
7 import os
8 import sqlalchemy.exc
9
10 @internal.route("/dovecot/passdb/<path:user_email>")
11 def dovecot_passdb_dict(user_email):
12 user = models.User.query.get(user_email) or flask.abort(404)
13 allow_nets = []
14 allow_nets.append(app.config["SUBNET"])
15 if app.config["SUBNET6"]:
16 allow_nets.append(app.config["SUBNET6"])
17 return flask.jsonify({
18 "password": None,
19 "nopassword": "Y",
20 "allow_nets": ",".join(allow_nets)
21 })
22
23 @internal.route("/dovecot/userdb/")
24 def dovecot_userdb_dict_list():
25 return flask.jsonify([
26 user[0] for user in models.User.query.filter(models.User.enabled.is_(True)).with_entities(models.User.email).all()
27 ])
28
29 @internal.route("/dovecot/userdb/<path:user_email>")
30 def dovecot_userdb_dict(user_email):
31 try:
32 quota = models.User.query.filter(models.User.email==user_email).with_entities(models.User.quota_bytes).one_or_none() or flask.abort(404)
33 except sqlalchemy.exc.StatementError as exc:
34 flask.abort(404)
35 return flask.jsonify({
36 "quota_rule": f"*:bytes={quota[0]}"
37 })
38
39
40 @internal.route("/dovecot/quota/<ns>/<path:user_email>", methods=["POST"])
41 def dovecot_quota(ns, user_email):
42 user = models.User.query.get(user_email) or flask.abort(404)
43 if ns == "storage":
44 user.quota_bytes_used = flask.request.get_json()
45 user.dont_change_updated_at()
46 models.db.session.commit()
47 return flask.jsonify(None)
48
49
50 @internal.route("/dovecot/sieve/name/<script>/<path:user_email>")
51 def dovecot_sieve_name(script, user_email):
52 return flask.jsonify(script)
53
54
55 @internal.route("/dovecot/sieve/data/default/<path:user_email>")
56 def dovecot_sieve_data(user_email):
57 user = models.User.query.get(user_email) or flask.abort(404)
58 return flask.jsonify(flask.render_template("default.sieve", user=user))
59
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/core/admin/mailu/internal/views/dovecot.py b/core/admin/mailu/internal/views/dovecot.py
--- a/core/admin/mailu/internal/views/dovecot.py
+++ b/core/admin/mailu/internal/views/dovecot.py
@@ -17,7 +17,7 @@
return flask.jsonify({
"password": None,
"nopassword": "Y",
- "allow_nets": ",".join(allow_nets)
+ "allow_real_nets": ",".join(allow_nets)
})
@internal.route("/dovecot/userdb/")
|
{"golden_diff": "diff --git a/core/admin/mailu/internal/views/dovecot.py b/core/admin/mailu/internal/views/dovecot.py\n--- a/core/admin/mailu/internal/views/dovecot.py\n+++ b/core/admin/mailu/internal/views/dovecot.py\n@@ -17,7 +17,7 @@\n return flask.jsonify({\n \"password\": None,\n \"nopassword\": \"Y\",\n- \"allow_nets\": \",\".join(allow_nets)\n+ \"allow_real_nets\": \",\".join(allow_nets)\n })\n \n @internal.route(\"/dovecot/userdb/\")\n", "issue": "Maximum number of connections from user+IP exceeded \nHi, we have a problem... :-)\r\nWe have changed the original value of \"AUTH_RATELIMIT\" to \"AUTH_RATELIMIT=100/minute;6000/hour\", but logs continue to say \" Maximum number of connections from user+IP exceeded (mail_max_userip_connections=20)\" while reading response from upstream...\"\r\nWe have made docker-compose dow and docker-compose up -d, but without result.\r\nHow can we change the default limit set during the installation?\r\nThanks in advance.\n", "before_files": [{"content": "from mailu import models\nfrom mailu.internal import internal\nfrom flask import current_app as app\n\nimport flask\nimport socket\nimport os\nimport sqlalchemy.exc\n\[email protected](\"/dovecot/passdb/<path:user_email>\")\ndef dovecot_passdb_dict(user_email):\n user = models.User.query.get(user_email) or flask.abort(404)\n allow_nets = []\n allow_nets.append(app.config[\"SUBNET\"])\n if app.config[\"SUBNET6\"]:\n allow_nets.append(app.config[\"SUBNET6\"])\n return flask.jsonify({\n \"password\": None,\n \"nopassword\": \"Y\",\n \"allow_nets\": \",\".join(allow_nets)\n })\n\[email protected](\"/dovecot/userdb/\")\ndef dovecot_userdb_dict_list():\n return flask.jsonify([\n user[0] for user in models.User.query.filter(models.User.enabled.is_(True)).with_entities(models.User.email).all()\n ])\n\[email protected](\"/dovecot/userdb/<path:user_email>\")\ndef dovecot_userdb_dict(user_email):\n try:\n quota = models.User.query.filter(models.User.email==user_email).with_entities(models.User.quota_bytes).one_or_none() or flask.abort(404)\n except sqlalchemy.exc.StatementError as exc:\n flask.abort(404)\n return flask.jsonify({\n \"quota_rule\": f\"*:bytes={quota[0]}\"\n })\n\n\[email protected](\"/dovecot/quota/<ns>/<path:user_email>\", methods=[\"POST\"])\ndef dovecot_quota(ns, user_email):\n user = models.User.query.get(user_email) or flask.abort(404)\n if ns == \"storage\":\n user.quota_bytes_used = flask.request.get_json()\n user.dont_change_updated_at()\n models.db.session.commit()\n return flask.jsonify(None)\n\n\[email protected](\"/dovecot/sieve/name/<script>/<path:user_email>\")\ndef dovecot_sieve_name(script, user_email):\n return flask.jsonify(script)\n\n\[email protected](\"/dovecot/sieve/data/default/<path:user_email>\")\ndef dovecot_sieve_data(user_email):\n user = models.User.query.get(user_email) or flask.abort(404)\n return flask.jsonify(flask.render_template(\"default.sieve\", user=user))\n", "path": "core/admin/mailu/internal/views/dovecot.py"}], "after_files": [{"content": "from mailu import models\nfrom mailu.internal import internal\nfrom flask import current_app as app\n\nimport flask\nimport socket\nimport os\nimport sqlalchemy.exc\n\[email protected](\"/dovecot/passdb/<path:user_email>\")\ndef dovecot_passdb_dict(user_email):\n user = models.User.query.get(user_email) or flask.abort(404)\n allow_nets = []\n allow_nets.append(app.config[\"SUBNET\"])\n if app.config[\"SUBNET6\"]:\n allow_nets.append(app.config[\"SUBNET6\"])\n return flask.jsonify({\n \"password\": None,\n \"nopassword\": \"Y\",\n \"allow_real_nets\": \",\".join(allow_nets)\n })\n\[email protected](\"/dovecot/userdb/\")\ndef dovecot_userdb_dict_list():\n return flask.jsonify([\n user[0] for user in models.User.query.filter(models.User.enabled.is_(True)).with_entities(models.User.email).all()\n ])\n\[email protected](\"/dovecot/userdb/<path:user_email>\")\ndef dovecot_userdb_dict(user_email):\n try:\n quota = models.User.query.filter(models.User.email==user_email).with_entities(models.User.quota_bytes).one_or_none() or flask.abort(404)\n except sqlalchemy.exc.StatementError as exc:\n flask.abort(404)\n return flask.jsonify({\n \"quota_rule\": f\"*:bytes={quota[0]}\"\n })\n\n\[email protected](\"/dovecot/quota/<ns>/<path:user_email>\", methods=[\"POST\"])\ndef dovecot_quota(ns, user_email):\n user = models.User.query.get(user_email) or flask.abort(404)\n if ns == \"storage\":\n user.quota_bytes_used = flask.request.get_json()\n user.dont_change_updated_at()\n models.db.session.commit()\n return flask.jsonify(None)\n\n\[email protected](\"/dovecot/sieve/name/<script>/<path:user_email>\")\ndef dovecot_sieve_name(script, user_email):\n return flask.jsonify(script)\n\n\[email protected](\"/dovecot/sieve/data/default/<path:user_email>\")\ndef dovecot_sieve_data(user_email):\n user = models.User.query.get(user_email) or flask.abort(404)\n return flask.jsonify(flask.render_template(\"default.sieve\", user=user))\n", "path": "core/admin/mailu/internal/views/dovecot.py"}]}
| 987 | 128 |
gh_patches_debug_20106
|
rasdani/github-patches
|
git_diff
|
microsoft__torchgeo-93
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Jupyter Notebook tutorials
We need to figure out how to render Jupyter Notebooks in our documentation so that we can provide easy-to-use tutorials for new users. This should work similarly to https://pytorch.org/tutorials/.
Ideally I would like to be able to test these tutorials so that they stay up-to-date.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/conf.py`
Content:
```
1 # Configuration file for the Sphinx documentation builder.
2 #
3 # This file only contains a selection of the most common options. For a full
4 # list see the documentation:
5 # https://www.sphinx-doc.org/en/master/usage/configuration.html
6
7 # -- Path setup --------------------------------------------------------------
8
9 import os
10 import sys
11
12 import pytorch_sphinx_theme
13
14 # If extensions (or modules to document with autodoc) are in another directory,
15 # add these directories to sys.path here. If the directory is relative to the
16 # documentation root, use os.path.abspath to make it absolute, like shown here.
17 sys.path.insert(0, os.path.abspath(".."))
18
19 import torchgeo # noqa: E402
20
21 # -- Project information -----------------------------------------------------
22
23 project = "torchgeo"
24 copyright = "2021, Microsoft Corporation"
25 author = "Adam J. Stewart"
26 version = ".".join(torchgeo.__version__.split(".")[:2])
27 release = torchgeo.__version__
28
29
30 # -- General configuration ---------------------------------------------------
31
32 # Add any Sphinx extension module names here, as strings. They can be
33 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
34 # ones.
35 extensions = [
36 "sphinx.ext.autodoc",
37 "sphinx.ext.autosectionlabel",
38 "sphinx.ext.intersphinx",
39 "sphinx.ext.napoleon",
40 "sphinx.ext.todo",
41 "sphinx.ext.viewcode",
42 ]
43
44 # List of patterns, relative to source directory, that match files and
45 # directories to ignore when looking for source files.
46 # This pattern also affects html_static_path and html_extra_path.
47 exclude_patterns = ["_build"]
48
49 # Sphinx 3.0+ required for:
50 # autodoc_typehints = "description"
51 needs_sphinx = "3.0"
52
53 nitpicky = True
54 nitpick_ignore = [
55 # https://github.com/sphinx-doc/sphinx/issues/8127
56 ("py:class", ".."),
57 # TODO: can't figure out why this isn't found
58 ("py:class", "LightningDataModule"),
59 ]
60
61
62 # -- Options for HTML output -------------------------------------------------
63
64 # The theme to use for HTML and HTML Help pages. See the documentation for
65 # a list of builtin themes.
66 html_theme = "pytorch_sphinx_theme"
67 html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]
68
69 # Theme options are theme-specific and customize the look and feel of a theme
70 # further. For a list of options available for each theme, see the
71 # documentation.
72 html_theme_options = {
73 "collapse_navigation": False,
74 "display_version": True,
75 "logo_only": True,
76 "pytorch_project": "docs",
77 "navigation_with_keys": True,
78 "analytics_id": "UA-117752657-2",
79 }
80
81 # -- Extension configuration -------------------------------------------------
82
83 # sphinx.ext.autodoc
84 autodoc_default_options = {
85 "members": True,
86 "special-members": True,
87 "show-inheritance": True,
88 }
89 autodoc_member_order = "bysource"
90 autodoc_typehints = "description"
91
92 # sphinx.ext.intersphinx
93 intersphinx_mapping = {
94 "python": ("https://docs.python.org/3", None),
95 "pytorch-lightning": ("https://pytorch-lightning.readthedocs.io/en/latest/", None),
96 "rasterio": ("https://rasterio.readthedocs.io/en/latest/", None),
97 "rtree": ("https://rtree.readthedocs.io/en/latest/", None),
98 "torch": ("https://pytorch.org/docs/stable", None),
99 }
100
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -34,11 +34,11 @@
# ones.
extensions = [
"sphinx.ext.autodoc",
- "sphinx.ext.autosectionlabel",
"sphinx.ext.intersphinx",
"sphinx.ext.napoleon",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
+ "nbsphinx",
]
# List of patterns, relative to source directory, that match files and
@@ -97,3 +97,17 @@
"rtree": ("https://rtree.readthedocs.io/en/latest/", None),
"torch": ("https://pytorch.org/docs/stable", None),
}
+
+# nbsphinx
+nbsphinx_execute = "never"
+# TODO: branch/tag should change depending on which version of docs you look at
+# TODO: :width: may be broken
+nbsphinx_prolog = """
+{% set colab = "https://colab.research.google.com" %}
+{% set repo = "microsoft/torchgeo" %}
+{% set branch = "main" %}
+
+.. image:: {{ colab }}/assets/colab-badge.svg
+ :alt: Open in Colab
+ :target: {{ colab }}/github/{{ repo }}/blob/{{ branch }}/docs/{{ env.docname }}
+"""
|
{"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -34,11 +34,11 @@\n # ones.\n extensions = [\n \"sphinx.ext.autodoc\",\n- \"sphinx.ext.autosectionlabel\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.napoleon\",\n \"sphinx.ext.todo\",\n \"sphinx.ext.viewcode\",\n+ \"nbsphinx\",\n ]\n \n # List of patterns, relative to source directory, that match files and\n@@ -97,3 +97,17 @@\n \"rtree\": (\"https://rtree.readthedocs.io/en/latest/\", None),\n \"torch\": (\"https://pytorch.org/docs/stable\", None),\n }\n+\n+# nbsphinx\n+nbsphinx_execute = \"never\"\n+# TODO: branch/tag should change depending on which version of docs you look at\n+# TODO: :width: may be broken\n+nbsphinx_prolog = \"\"\"\n+{% set colab = \"https://colab.research.google.com\" %}\n+{% set repo = \"microsoft/torchgeo\" %}\n+{% set branch = \"main\" %}\n+\n+.. image:: {{ colab }}/assets/colab-badge.svg\n+ :alt: Open in Colab\n+ :target: {{ colab }}/github/{{ repo }}/blob/{{ branch }}/docs/{{ env.docname }}\n+\"\"\"\n", "issue": "Jupyter Notebook tutorials\nWe need to figure out how to render Jupyter Notebooks in our documentation so that we can provide easy-to-use tutorials for new users. This should work similarly to https://pytorch.org/tutorials/.\r\n\r\nIdeally I would like to be able to test these tutorials so that they stay up-to-date.\n", "before_files": [{"content": "# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Path setup --------------------------------------------------------------\n\nimport os\nimport sys\n\nimport pytorch_sphinx_theme\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\nsys.path.insert(0, os.path.abspath(\"..\"))\n\nimport torchgeo # noqa: E402\n\n# -- Project information -----------------------------------------------------\n\nproject = \"torchgeo\"\ncopyright = \"2021, Microsoft Corporation\"\nauthor = \"Adam J. Stewart\"\nversion = \".\".join(torchgeo.__version__.split(\".\")[:2])\nrelease = torchgeo.__version__\n\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.autosectionlabel\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.napoleon\",\n \"sphinx.ext.todo\",\n \"sphinx.ext.viewcode\",\n]\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = [\"_build\"]\n\n# Sphinx 3.0+ required for:\n# autodoc_typehints = \"description\"\nneeds_sphinx = \"3.0\"\n\nnitpicky = True\nnitpick_ignore = [\n # https://github.com/sphinx-doc/sphinx/issues/8127\n (\"py:class\", \"..\"),\n # TODO: can't figure out why this isn't found\n (\"py:class\", \"LightningDataModule\"),\n]\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = \"pytorch_sphinx_theme\"\nhtml_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\nhtml_theme_options = {\n \"collapse_navigation\": False,\n \"display_version\": True,\n \"logo_only\": True,\n \"pytorch_project\": \"docs\",\n \"navigation_with_keys\": True,\n \"analytics_id\": \"UA-117752657-2\",\n}\n\n# -- Extension configuration -------------------------------------------------\n\n# sphinx.ext.autodoc\nautodoc_default_options = {\n \"members\": True,\n \"special-members\": True,\n \"show-inheritance\": True,\n}\nautodoc_member_order = \"bysource\"\nautodoc_typehints = \"description\"\n\n# sphinx.ext.intersphinx\nintersphinx_mapping = {\n \"python\": (\"https://docs.python.org/3\", None),\n \"pytorch-lightning\": (\"https://pytorch-lightning.readthedocs.io/en/latest/\", None),\n \"rasterio\": (\"https://rasterio.readthedocs.io/en/latest/\", None),\n \"rtree\": (\"https://rtree.readthedocs.io/en/latest/\", None),\n \"torch\": (\"https://pytorch.org/docs/stable\", None),\n}\n", "path": "docs/conf.py"}], "after_files": [{"content": "# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Path setup --------------------------------------------------------------\n\nimport os\nimport sys\n\nimport pytorch_sphinx_theme\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\nsys.path.insert(0, os.path.abspath(\"..\"))\n\nimport torchgeo # noqa: E402\n\n# -- Project information -----------------------------------------------------\n\nproject = \"torchgeo\"\ncopyright = \"2021, Microsoft Corporation\"\nauthor = \"Adam J. Stewart\"\nversion = \".\".join(torchgeo.__version__.split(\".\")[:2])\nrelease = torchgeo.__version__\n\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.napoleon\",\n \"sphinx.ext.todo\",\n \"sphinx.ext.viewcode\",\n \"nbsphinx\",\n]\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = [\"_build\"]\n\n# Sphinx 3.0+ required for:\n# autodoc_typehints = \"description\"\nneeds_sphinx = \"3.0\"\n\nnitpicky = True\nnitpick_ignore = [\n # https://github.com/sphinx-doc/sphinx/issues/8127\n (\"py:class\", \"..\"),\n # TODO: can't figure out why this isn't found\n (\"py:class\", \"LightningDataModule\"),\n]\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = \"pytorch_sphinx_theme\"\nhtml_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\nhtml_theme_options = {\n \"collapse_navigation\": False,\n \"display_version\": True,\n \"logo_only\": True,\n \"pytorch_project\": \"docs\",\n \"navigation_with_keys\": True,\n \"analytics_id\": \"UA-117752657-2\",\n}\n\n# -- Extension configuration -------------------------------------------------\n\n# sphinx.ext.autodoc\nautodoc_default_options = {\n \"members\": True,\n \"special-members\": True,\n \"show-inheritance\": True,\n}\nautodoc_member_order = \"bysource\"\nautodoc_typehints = \"description\"\n\n# sphinx.ext.intersphinx\nintersphinx_mapping = {\n \"python\": (\"https://docs.python.org/3\", None),\n \"pytorch-lightning\": (\"https://pytorch-lightning.readthedocs.io/en/latest/\", None),\n \"rasterio\": (\"https://rasterio.readthedocs.io/en/latest/\", None),\n \"rtree\": (\"https://rtree.readthedocs.io/en/latest/\", None),\n \"torch\": (\"https://pytorch.org/docs/stable\", None),\n}\n\n# nbsphinx\nnbsphinx_execute = \"never\"\n# TODO: branch/tag should change depending on which version of docs you look at\n# TODO: :width: may be broken\nnbsphinx_prolog = \"\"\"\n{% set colab = \"https://colab.research.google.com\" %}\n{% set repo = \"microsoft/torchgeo\" %}\n{% set branch = \"main\" %}\n\n.. image:: {{ colab }}/assets/colab-badge.svg\n :alt: Open in Colab\n :target: {{ colab }}/github/{{ repo }}/blob/{{ branch }}/docs/{{ env.docname }}\n\"\"\"\n", "path": "docs/conf.py"}]}
| 1,294 | 308 |
gh_patches_debug_32695
|
rasdani/github-patches
|
git_diff
|
conan-io__conan-center-index-3023
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[package] jbig/20160605: Fails to build on iOS
<!--
Please don't forget to update the issue title.
Include all applicable information to help us reproduce your problem.
-->
### Package and Environment Details (include every applicable attribute)
* Package Name/Version: **jbig/20160605**
* Operating System+version: **iOS 11.0**
* Compiler+version: **apple-clang 11.0**
* Conan version: **conan 1.29.2**
* Python version: **Python 3.8.5**
### Conan profile
```
[settings]
arch=x86_64
arch_build=x86_64
build_type=Debug
compiler=apple-clang
compiler.cppstd=17
compiler.libcxx=libc++
compiler.version=11.0
os=iOS
os.version=11.0
os_build=Macos
[options]
[build_requires]
*: darwin-toolchain/1.0.8@theodelrieu/stable
[env]
```
### Steps to reproduce (Include if Applicable)
`conan install jbig/20160605@ --profile ios --build=missing`
### Logs (Include/Attach if Applicable)
<details><summary>Click to expand log</summary>
```
CMake Error at CMakeLists.txt:31 (install):
install TARGETS given no BUNDLE DESTINATION for MACOSX_BUNDLE executable
target "jbgtopbm".
```
</details>
I would suggest adding an option that disables the `pbmtojbg` and `jbgtopbm` targets from being generated. The recipe could define individual `build_` options for each, which other packages do, or go with a more generically named option that enables/disables both. For reference, `sqlite3`, `bzip2`, and `spirv-cross` have a `build_executable` option, while `glslang` has a `build_executables` option.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `recipes/jbig/all/conanfile.py`
Content:
```
1 import os
2 import glob
3 from conans import ConanFile, CMake, tools
4
5
6 class ConanJBig(ConanFile):
7 name = "jbig"
8 url = "https://github.com/conan-io/conan-center-index"
9 homepage = "https://github.com/ImageMagick/jbig"
10 description = "jbig for the Windows build of ImageMagick"
11 topics = ("conan", "jbig", "imagemagick", "window", "graphic")
12 license = "GPL-2.0"
13 exports_sources = ['CMakeLists.txt', "*.patch"]
14 generators = 'cmake'
15 settings = "os", "arch", "compiler", "build_type"
16 options = {"shared": [True, False], "fPIC": [True, False]}
17 default_options = {"shared": False, "fPIC": True}
18
19 @property
20 def _source_subfolder(self):
21 return "source_subfolder"
22
23 @property
24 def _build_subfolder(self):
25 return "build_subfolder"
26
27 def source(self):
28 tools.get(**self.conan_data["sources"][self.version])
29 for extracted_dir in glob.glob("jbig-*"):
30 os.rename(extracted_dir, self._source_subfolder)
31
32 def config_options(self):
33 if self.settings.os == "Windows":
34 del self.options.fPIC
35
36 def configure(self):
37 del self.settings.compiler.libcxx
38 del self.settings.compiler.cppstd
39
40 def _configure_cmake(self):
41 cmake = CMake(self)
42 cmake.configure(build_folder=self._build_subfolder)
43 return cmake
44
45 def build(self):
46 for patch in self.conan_data["patches"][self.version]:
47 tools.patch(**patch)
48 cmake = self._configure_cmake()
49 cmake.build()
50
51 def package(self):
52 self.copy(pattern="COPYING", dst="licenses", src=self._source_subfolder)
53 cmake = self._configure_cmake()
54 cmake.install()
55
56 def package_info(self):
57 bindir = os.path.join(self.package_folder, "bin")
58 self.output.info("Appending PATH environment variable: {}".format(bindir))
59 self.env_info.PATH.append(bindir)
60 self.cpp_info.libs = [self.name]
61 if self.options.shared and self.settings.compiler == "Visual Studio":
62 self.cpp_info.defines = ["_JBIGDLL_"]
63
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/recipes/jbig/all/conanfile.py b/recipes/jbig/all/conanfile.py
--- a/recipes/jbig/all/conanfile.py
+++ b/recipes/jbig/all/conanfile.py
@@ -13,8 +13,18 @@
exports_sources = ['CMakeLists.txt', "*.patch"]
generators = 'cmake'
settings = "os", "arch", "compiler", "build_type"
- options = {"shared": [True, False], "fPIC": [True, False]}
- default_options = {"shared": False, "fPIC": True}
+ options = {
+ "shared": [True, False],
+ "fPIC": [True, False],
+ "build_executables": [True, False]
+ }
+ default_options = {
+ "shared": False,
+ "fPIC": True,
+ "build_executables": True
+ }
+
+ _cmake = None
@property
def _source_subfolder(self):
@@ -38,9 +48,13 @@
del self.settings.compiler.cppstd
def _configure_cmake(self):
- cmake = CMake(self)
- cmake.configure(build_folder=self._build_subfolder)
- return cmake
+ if self._cmake:
+ return self._cmake
+
+ self._cmake = CMake(self)
+ self._cmake.definitions["BUILD_EXECUTABLES"] = self.options.build_executables
+ self._cmake.configure(build_folder=self._build_subfolder)
+ return self._cmake
def build(self):
for patch in self.conan_data["patches"][self.version]:
@@ -54,9 +68,11 @@
cmake.install()
def package_info(self):
- bindir = os.path.join(self.package_folder, "bin")
- self.output.info("Appending PATH environment variable: {}".format(bindir))
- self.env_info.PATH.append(bindir)
self.cpp_info.libs = [self.name]
if self.options.shared and self.settings.compiler == "Visual Studio":
self.cpp_info.defines = ["_JBIGDLL_"]
+
+ if self.options.build_executables:
+ bin_path = os.path.join(self.package_folder, "bin")
+ self.output.info("Appending PATH environment variable: {}".format(bin_path))
+ self.env_info.PATH.append(bin_path)
|
{"golden_diff": "diff --git a/recipes/jbig/all/conanfile.py b/recipes/jbig/all/conanfile.py\n--- a/recipes/jbig/all/conanfile.py\n+++ b/recipes/jbig/all/conanfile.py\n@@ -13,8 +13,18 @@\n exports_sources = ['CMakeLists.txt', \"*.patch\"]\n generators = 'cmake'\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n- options = {\"shared\": [True, False], \"fPIC\": [True, False]}\n- default_options = {\"shared\": False, \"fPIC\": True}\n+ options = {\n+ \"shared\": [True, False],\n+ \"fPIC\": [True, False],\n+ \"build_executables\": [True, False]\n+ }\n+ default_options = {\n+ \"shared\": False,\n+ \"fPIC\": True,\n+ \"build_executables\": True\n+ }\n+\n+ _cmake = None\n \n @property\n def _source_subfolder(self):\n@@ -38,9 +48,13 @@\n del self.settings.compiler.cppstd\n \n def _configure_cmake(self):\n- cmake = CMake(self)\n- cmake.configure(build_folder=self._build_subfolder)\n- return cmake\n+ if self._cmake:\n+ return self._cmake\n+\n+ self._cmake = CMake(self)\n+ self._cmake.definitions[\"BUILD_EXECUTABLES\"] = self.options.build_executables\n+ self._cmake.configure(build_folder=self._build_subfolder)\n+ return self._cmake\n \n def build(self):\n for patch in self.conan_data[\"patches\"][self.version]:\n@@ -54,9 +68,11 @@\n cmake.install()\n \n def package_info(self):\n- bindir = os.path.join(self.package_folder, \"bin\")\n- self.output.info(\"Appending PATH environment variable: {}\".format(bindir))\n- self.env_info.PATH.append(bindir)\n self.cpp_info.libs = [self.name]\n if self.options.shared and self.settings.compiler == \"Visual Studio\":\n self.cpp_info.defines = [\"_JBIGDLL_\"]\n+\n+ if self.options.build_executables:\n+ bin_path = os.path.join(self.package_folder, \"bin\")\n+ self.output.info(\"Appending PATH environment variable: {}\".format(bin_path))\n+ self.env_info.PATH.append(bin_path)\n", "issue": "[package] jbig/20160605: Fails to build on iOS\n<!-- \r\n Please don't forget to update the issue title.\r\n Include all applicable information to help us reproduce your problem.\r\n-->\r\n\r\n### Package and Environment Details (include every applicable attribute)\r\n * Package Name/Version: **jbig/20160605**\r\n * Operating System+version: **iOS 11.0**\r\n * Compiler+version: **apple-clang 11.0**\r\n * Conan version: **conan 1.29.2**\r\n * Python version: **Python 3.8.5**\r\n\r\n### Conan profile\r\n```\r\n[settings]\r\narch=x86_64\r\narch_build=x86_64\r\nbuild_type=Debug\r\ncompiler=apple-clang\r\ncompiler.cppstd=17\r\ncompiler.libcxx=libc++\r\ncompiler.version=11.0\r\nos=iOS\r\nos.version=11.0\r\nos_build=Macos\r\n[options]\r\n[build_requires]\r\n*: darwin-toolchain/1.0.8@theodelrieu/stable\r\n[env]\r\n```\r\n\r\n\r\n### Steps to reproduce (Include if Applicable)\r\n\r\n`conan install jbig/20160605@ --profile ios --build=missing`\r\n\r\n### Logs (Include/Attach if Applicable)\r\n<details><summary>Click to expand log</summary>\r\n\r\n```\r\nCMake Error at CMakeLists.txt:31 (install):\r\n install TARGETS given no BUNDLE DESTINATION for MACOSX_BUNDLE executable\r\n target \"jbgtopbm\".\r\n```\r\n\r\n</details>\r\n\r\nI would suggest adding an option that disables the `pbmtojbg` and `jbgtopbm` targets from being generated. The recipe could define individual `build_` options for each, which other packages do, or go with a more generically named option that enables/disables both. For reference, `sqlite3`, `bzip2`, and `spirv-cross` have a `build_executable` option, while `glslang` has a `build_executables` option. \n", "before_files": [{"content": "import os\nimport glob\nfrom conans import ConanFile, CMake, tools\n\n\nclass ConanJBig(ConanFile):\n name = \"jbig\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://github.com/ImageMagick/jbig\"\n description = \"jbig for the Windows build of ImageMagick\"\n topics = (\"conan\", \"jbig\", \"imagemagick\", \"window\", \"graphic\")\n license = \"GPL-2.0\"\n exports_sources = ['CMakeLists.txt', \"*.patch\"]\n generators = 'cmake'\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\"shared\": [True, False], \"fPIC\": [True, False]}\n default_options = {\"shared\": False, \"fPIC\": True}\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n @property\n def _build_subfolder(self):\n return \"build_subfolder\"\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n for extracted_dir in glob.glob(\"jbig-*\"):\n os.rename(extracted_dir, self._source_subfolder)\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def configure(self):\n del self.settings.compiler.libcxx\n del self.settings.compiler.cppstd\n\n def _configure_cmake(self):\n cmake = CMake(self)\n cmake.configure(build_folder=self._build_subfolder)\n return cmake\n\n def build(self):\n for patch in self.conan_data[\"patches\"][self.version]:\n tools.patch(**patch)\n cmake = self._configure_cmake()\n cmake.build()\n\n def package(self):\n self.copy(pattern=\"COPYING\", dst=\"licenses\", src=self._source_subfolder)\n cmake = self._configure_cmake()\n cmake.install()\n\n def package_info(self):\n bindir = os.path.join(self.package_folder, \"bin\")\n self.output.info(\"Appending PATH environment variable: {}\".format(bindir))\n self.env_info.PATH.append(bindir)\n self.cpp_info.libs = [self.name]\n if self.options.shared and self.settings.compiler == \"Visual Studio\":\n self.cpp_info.defines = [\"_JBIGDLL_\"]\n", "path": "recipes/jbig/all/conanfile.py"}], "after_files": [{"content": "import os\nimport glob\nfrom conans import ConanFile, CMake, tools\n\n\nclass ConanJBig(ConanFile):\n name = \"jbig\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://github.com/ImageMagick/jbig\"\n description = \"jbig for the Windows build of ImageMagick\"\n topics = (\"conan\", \"jbig\", \"imagemagick\", \"window\", \"graphic\")\n license = \"GPL-2.0\"\n exports_sources = ['CMakeLists.txt', \"*.patch\"]\n generators = 'cmake'\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n \"build_executables\": [True, False]\n }\n default_options = {\n \"shared\": False,\n \"fPIC\": True,\n \"build_executables\": True\n }\n\n _cmake = None\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n @property\n def _build_subfolder(self):\n return \"build_subfolder\"\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n for extracted_dir in glob.glob(\"jbig-*\"):\n os.rename(extracted_dir, self._source_subfolder)\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def configure(self):\n del self.settings.compiler.libcxx\n del self.settings.compiler.cppstd\n\n def _configure_cmake(self):\n if self._cmake:\n return self._cmake\n\n self._cmake = CMake(self)\n self._cmake.definitions[\"BUILD_EXECUTABLES\"] = self.options.build_executables\n self._cmake.configure(build_folder=self._build_subfolder)\n return self._cmake\n\n def build(self):\n for patch in self.conan_data[\"patches\"][self.version]:\n tools.patch(**patch)\n cmake = self._configure_cmake()\n cmake.build()\n\n def package(self):\n self.copy(pattern=\"COPYING\", dst=\"licenses\", src=self._source_subfolder)\n cmake = self._configure_cmake()\n cmake.install()\n\n def package_info(self):\n self.cpp_info.libs = [self.name]\n if self.options.shared and self.settings.compiler == \"Visual Studio\":\n self.cpp_info.defines = [\"_JBIGDLL_\"]\n\n if self.options.build_executables:\n bin_path = os.path.join(self.package_folder, \"bin\")\n self.output.info(\"Appending PATH environment variable: {}\".format(bin_path))\n self.env_info.PATH.append(bin_path)\n", "path": "recipes/jbig/all/conanfile.py"}]}
| 1,345 | 535 |
gh_patches_debug_33865
|
rasdani/github-patches
|
git_diff
|
cowrie__cowrie-1022
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Cowrie not set up for py.test framework
So I tried running the test in both python2 and python3. For python2 all the tests were passing but for python3 there was some error.
```
py.test --cov=cowrie
===================================================================================== test session starts =====================================================================================
platform linux -- Python 3.7.2, pytest-4.2.0, py-1.7.0, pluggy-0.8.1
rootdir: /home/mzfr/dev/cowrie, inifile:
plugins: cov-2.6.1
collected 3 items / 3 errors
=========================================================================================== ERRORS ============================================================================================
___________________________________________________________________ ERROR collecting src/cowrie/test/test_base_commands.py ____________________________________________________________________
../shell/fs.py:26: in <module>
PICKLE = pickle.load(open(CONFIG.get('shell', 'filesystem'), 'rb'))
../core/config.py:29: in get
return super(EnvironmentConfigParser, self).get(section, option, **kwargs)
/usr/lib/python3.7/configparser.py:780: in get
d = self._unify_values(section, vars)
/usr/lib/python3.7/configparser.py:1146: in _unify_values
raise NoSectionError(section) from None
E configparser.NoSectionError: No section: 'shell'
During handling of the above exception, another exception occurred:
test_base_commands.py:12: in <module>
from cowrie.shell import protocol
../shell/protocol.py:21: in <module>
from cowrie.shell import command
../shell/command.py:20: in <module>
from cowrie.shell import fs
../shell/fs.py:29: in <module>
exit(2)
/usr/lib/python3.7/_sitebuiltins.py:26: in __call__
raise SystemExit(code)
E SystemExit: 2
--------------------------------------------------------------------------------------- Captured stdout ---------------------------------------------------------------------------------------
ERROR: Config file not found: etc/cowrie.cfg.dist
________________________________________________________________________ ERROR collecting src/cowrie/test/test_echo.py ________________________________________________________________________
../shell/fs.py:26: in <module>
PICKLE = pickle.load(open(CONFIG.get('shell', 'filesystem'), 'rb'))
../core/config.py:29: in get
return super(EnvironmentConfigParser, self).get(section, option, **kwargs)
/usr/lib/python3.7/configparser.py:780: in get
d = self._unify_values(section, vars)
/usr/lib/python3.7/configparser.py:1146: in _unify_values
raise NoSectionError(section) from None
E configparser.NoSectionError: No section: 'shell'
During handling of the above exception, another exception occurred:
test_echo.py:16: in <module>
from cowrie.shell import protocol
../shell/protocol.py:21: in <module>
from cowrie.shell import command
../shell/command.py:20: in <module>
from cowrie.shell import fs
../shell/fs.py:29: in <module>
exit(2)
/usr/lib/python3.7/_sitebuiltins.py:26: in __call__
raise SystemExit(code)
E SystemExit: 2
--------------------------------------------------------------------------------------- Captured stdout ---------------------------------------------------------------------------------------
ERROR: Config file not found: etc/cowrie.cfg.dist
________________________________________________________________________ ERROR collecting src/cowrie/test/test_tftp.py ________________________________________________________________________
../shell/fs.py:26: in <module>
PICKLE = pickle.load(open(CONFIG.get('shell', 'filesystem'), 'rb'))
../core/config.py:29: in get
return super(EnvironmentConfigParser, self).get(section, option, **kwargs)
/usr/lib/python3.7/configparser.py:780: in get
d = self._unify_values(section, vars)
/usr/lib/python3.7/configparser.py:1146: in _unify_values
raise NoSectionError(section) from None
E configparser.NoSectionError: No section: 'shell'
During handling of the above exception, another exception occurred:
test_tftp.py:16: in <module>
from cowrie.shell import protocol
../shell/protocol.py:21: in <module>
from cowrie.shell import command
../shell/command.py:20: in <module>
from cowrie.shell import fs
../shell/fs.py:29: in <module>
exit(2)
/usr/lib/python3.7/_sitebuiltins.py:26: in __call__
raise SystemExit(code)
E SystemExit: 2
--------------------------------------------------------------------------------------- Captured stdout ---------------------------------------------------------------------------------------
ERROR: Config file not found: etc/cowrie.cfg.dist
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cowrie/core/config.py`
Content:
```
1 # Copyright (c) 2009-2014 Upi Tamminen <[email protected]>
2 # See the COPYRIGHT file for more information
3
4 """
5 This module contains ...
6 """
7
8 from __future__ import absolute_import, division
9
10 import configparser
11 import os
12
13
14 def to_environ_key(key):
15 return key.upper()
16
17
18 class EnvironmentConfigParser(configparser.ConfigParser):
19
20 def has_option(self, section, option):
21 if to_environ_key('_'.join((section, option))) in os.environ:
22 return True
23 return super(EnvironmentConfigParser, self).has_option(section, option)
24
25 def get(self, section, option, **kwargs):
26 key = to_environ_key('_'.join((section, option)))
27 if key in os.environ:
28 return os.environ[key]
29 return super(EnvironmentConfigParser, self).get(section, option, **kwargs)
30
31
32 def readConfigFile(cfgfile):
33 """
34 Read config files and return ConfigParser object
35
36 @param cfgfile: filename or array of filenames
37 @return: ConfigParser object
38 """
39 parser = EnvironmentConfigParser(interpolation=configparser.ExtendedInterpolation())
40 parser.read(cfgfile)
41 return parser
42
43
44 CONFIG = readConfigFile(("etc/cowrie.cfg.dist", "/etc/cowrie/cowrie.cfg", "etc/cowrie.cfg", "cowrie.cfg"))
45
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/cowrie/core/config.py b/src/cowrie/core/config.py
--- a/src/cowrie/core/config.py
+++ b/src/cowrie/core/config.py
@@ -8,7 +8,8 @@
from __future__ import absolute_import, division
import configparser
-import os
+from os import environ
+from os.path import abspath, dirname, exists, join
def to_environ_key(key):
@@ -18,15 +19,16 @@
class EnvironmentConfigParser(configparser.ConfigParser):
def has_option(self, section, option):
- if to_environ_key('_'.join((section, option))) in os.environ:
+ if to_environ_key('_'.join((section, option))) in environ:
return True
return super(EnvironmentConfigParser, self).has_option(section, option)
def get(self, section, option, **kwargs):
key = to_environ_key('_'.join((section, option)))
- if key in os.environ:
- return os.environ[key]
- return super(EnvironmentConfigParser, self).get(section, option, **kwargs)
+ if key in environ:
+ return environ[key]
+ return super(EnvironmentConfigParser, self).get(
+ section, option, **kwargs)
def readConfigFile(cfgfile):
@@ -36,9 +38,26 @@
@param cfgfile: filename or array of filenames
@return: ConfigParser object
"""
- parser = EnvironmentConfigParser(interpolation=configparser.ExtendedInterpolation())
+ parser = EnvironmentConfigParser(
+ interpolation=configparser.ExtendedInterpolation())
parser.read(cfgfile)
return parser
-CONFIG = readConfigFile(("etc/cowrie.cfg.dist", "/etc/cowrie/cowrie.cfg", "etc/cowrie.cfg", "cowrie.cfg"))
+def get_config_path():
+ """Get absolute path to the config file
+ """
+ config_files = ["etc/cowrie/cowrie.cfg", "etc/cowrie.cfg",
+ "cowrie.cfg", "etc/cowrie.cfg.dist"]
+ current_path = abspath(dirname(__file__))
+ root = "/".join(current_path.split("/")[:-3])
+
+ for file in config_files:
+ absolute_path = join(root, file)
+ if exists(absolute_path):
+ return absolute_path
+
+ print("Config file not found")
+
+
+CONFIG = readConfigFile(get_config_path())
|
{"golden_diff": "diff --git a/src/cowrie/core/config.py b/src/cowrie/core/config.py\n--- a/src/cowrie/core/config.py\n+++ b/src/cowrie/core/config.py\n@@ -8,7 +8,8 @@\n from __future__ import absolute_import, division\n \n import configparser\n-import os\n+from os import environ\n+from os.path import abspath, dirname, exists, join\n \n \n def to_environ_key(key):\n@@ -18,15 +19,16 @@\n class EnvironmentConfigParser(configparser.ConfigParser):\n \n def has_option(self, section, option):\n- if to_environ_key('_'.join((section, option))) in os.environ:\n+ if to_environ_key('_'.join((section, option))) in environ:\n return True\n return super(EnvironmentConfigParser, self).has_option(section, option)\n \n def get(self, section, option, **kwargs):\n key = to_environ_key('_'.join((section, option)))\n- if key in os.environ:\n- return os.environ[key]\n- return super(EnvironmentConfigParser, self).get(section, option, **kwargs)\n+ if key in environ:\n+ return environ[key]\n+ return super(EnvironmentConfigParser, self).get(\n+ section, option, **kwargs)\n \n \n def readConfigFile(cfgfile):\n@@ -36,9 +38,26 @@\n @param cfgfile: filename or array of filenames\n @return: ConfigParser object\n \"\"\"\n- parser = EnvironmentConfigParser(interpolation=configparser.ExtendedInterpolation())\n+ parser = EnvironmentConfigParser(\n+ interpolation=configparser.ExtendedInterpolation())\n parser.read(cfgfile)\n return parser\n \n \n-CONFIG = readConfigFile((\"etc/cowrie.cfg.dist\", \"/etc/cowrie/cowrie.cfg\", \"etc/cowrie.cfg\", \"cowrie.cfg\"))\n+def get_config_path():\n+ \"\"\"Get absolute path to the config file\n+ \"\"\"\n+ config_files = [\"etc/cowrie/cowrie.cfg\", \"etc/cowrie.cfg\",\n+ \"cowrie.cfg\", \"etc/cowrie.cfg.dist\"]\n+ current_path = abspath(dirname(__file__))\n+ root = \"/\".join(current_path.split(\"/\")[:-3])\n+\n+ for file in config_files:\n+ absolute_path = join(root, file)\n+ if exists(absolute_path):\n+ return absolute_path\n+\n+ print(\"Config file not found\")\n+\n+\n+CONFIG = readConfigFile(get_config_path())\n", "issue": "Cowrie not set up for py.test framework\nSo I tried running the test in both python2 and python3. For python2 all the tests were passing but for python3 there was some error.\r\n\r\n```\r\n py.test --cov=cowrie \r\n===================================================================================== test session starts =====================================================================================\r\nplatform linux -- Python 3.7.2, pytest-4.2.0, py-1.7.0, pluggy-0.8.1\r\nrootdir: /home/mzfr/dev/cowrie, inifile:\r\nplugins: cov-2.6.1\r\ncollected 3 items / 3 errors \r\n\r\n=========================================================================================== ERRORS ============================================================================================\r\n___________________________________________________________________ ERROR collecting src/cowrie/test/test_base_commands.py ____________________________________________________________________\r\n../shell/fs.py:26: in <module>\r\n PICKLE = pickle.load(open(CONFIG.get('shell', 'filesystem'), 'rb'))\r\n../core/config.py:29: in get\r\n return super(EnvironmentConfigParser, self).get(section, option, **kwargs)\r\n/usr/lib/python3.7/configparser.py:780: in get\r\n d = self._unify_values(section, vars)\r\n/usr/lib/python3.7/configparser.py:1146: in _unify_values\r\n raise NoSectionError(section) from None\r\nE configparser.NoSectionError: No section: 'shell'\r\n\r\nDuring handling of the above exception, another exception occurred:\r\ntest_base_commands.py:12: in <module>\r\n from cowrie.shell import protocol\r\n../shell/protocol.py:21: in <module>\r\n from cowrie.shell import command\r\n../shell/command.py:20: in <module>\r\n from cowrie.shell import fs\r\n../shell/fs.py:29: in <module>\r\n exit(2)\r\n/usr/lib/python3.7/_sitebuiltins.py:26: in __call__\r\n raise SystemExit(code)\r\nE SystemExit: 2\r\n--------------------------------------------------------------------------------------- Captured stdout ---------------------------------------------------------------------------------------\r\nERROR: Config file not found: etc/cowrie.cfg.dist\r\n________________________________________________________________________ ERROR collecting src/cowrie/test/test_echo.py ________________________________________________________________________\r\n../shell/fs.py:26: in <module>\r\n PICKLE = pickle.load(open(CONFIG.get('shell', 'filesystem'), 'rb'))\r\n../core/config.py:29: in get\r\n return super(EnvironmentConfigParser, self).get(section, option, **kwargs)\r\n/usr/lib/python3.7/configparser.py:780: in get\r\n d = self._unify_values(section, vars)\r\n/usr/lib/python3.7/configparser.py:1146: in _unify_values\r\n raise NoSectionError(section) from None\r\nE configparser.NoSectionError: No section: 'shell'\r\n\r\nDuring handling of the above exception, another exception occurred:\r\ntest_echo.py:16: in <module>\r\n from cowrie.shell import protocol\r\n../shell/protocol.py:21: in <module>\r\n from cowrie.shell import command\r\n../shell/command.py:20: in <module>\r\n from cowrie.shell import fs\r\n../shell/fs.py:29: in <module>\r\n exit(2)\r\n/usr/lib/python3.7/_sitebuiltins.py:26: in __call__\r\n raise SystemExit(code)\r\nE SystemExit: 2\r\n--------------------------------------------------------------------------------------- Captured stdout ---------------------------------------------------------------------------------------\r\nERROR: Config file not found: etc/cowrie.cfg.dist\r\n________________________________________________________________________ ERROR collecting src/cowrie/test/test_tftp.py ________________________________________________________________________\r\n../shell/fs.py:26: in <module>\r\n PICKLE = pickle.load(open(CONFIG.get('shell', 'filesystem'), 'rb'))\r\n../core/config.py:29: in get\r\n return super(EnvironmentConfigParser, self).get(section, option, **kwargs)\r\n/usr/lib/python3.7/configparser.py:780: in get\r\n d = self._unify_values(section, vars)\r\n/usr/lib/python3.7/configparser.py:1146: in _unify_values\r\n raise NoSectionError(section) from None\r\nE configparser.NoSectionError: No section: 'shell'\r\n\r\nDuring handling of the above exception, another exception occurred:\r\ntest_tftp.py:16: in <module>\r\n from cowrie.shell import protocol\r\n../shell/protocol.py:21: in <module>\r\n from cowrie.shell import command\r\n../shell/command.py:20: in <module>\r\n from cowrie.shell import fs\r\n../shell/fs.py:29: in <module>\r\n exit(2)\r\n/usr/lib/python3.7/_sitebuiltins.py:26: in __call__\r\n raise SystemExit(code)\r\nE SystemExit: 2\r\n--------------------------------------------------------------------------------------- Captured stdout ---------------------------------------------------------------------------------------\r\nERROR: Config file not found: etc/cowrie.cfg.dist\r\n```\n", "before_files": [{"content": "# Copyright (c) 2009-2014 Upi Tamminen <[email protected]>\n# See the COPYRIGHT file for more information\n\n\"\"\"\nThis module contains ...\n\"\"\"\n\nfrom __future__ import absolute_import, division\n\nimport configparser\nimport os\n\n\ndef to_environ_key(key):\n return key.upper()\n\n\nclass EnvironmentConfigParser(configparser.ConfigParser):\n\n def has_option(self, section, option):\n if to_environ_key('_'.join((section, option))) in os.environ:\n return True\n return super(EnvironmentConfigParser, self).has_option(section, option)\n\n def get(self, section, option, **kwargs):\n key = to_environ_key('_'.join((section, option)))\n if key in os.environ:\n return os.environ[key]\n return super(EnvironmentConfigParser, self).get(section, option, **kwargs)\n\n\ndef readConfigFile(cfgfile):\n \"\"\"\n Read config files and return ConfigParser object\n\n @param cfgfile: filename or array of filenames\n @return: ConfigParser object\n \"\"\"\n parser = EnvironmentConfigParser(interpolation=configparser.ExtendedInterpolation())\n parser.read(cfgfile)\n return parser\n\n\nCONFIG = readConfigFile((\"etc/cowrie.cfg.dist\", \"/etc/cowrie/cowrie.cfg\", \"etc/cowrie.cfg\", \"cowrie.cfg\"))\n", "path": "src/cowrie/core/config.py"}], "after_files": [{"content": "# Copyright (c) 2009-2014 Upi Tamminen <[email protected]>\n# See the COPYRIGHT file for more information\n\n\"\"\"\nThis module contains ...\n\"\"\"\n\nfrom __future__ import absolute_import, division\n\nimport configparser\nfrom os import environ\nfrom os.path import abspath, dirname, exists, join\n\n\ndef to_environ_key(key):\n return key.upper()\n\n\nclass EnvironmentConfigParser(configparser.ConfigParser):\n\n def has_option(self, section, option):\n if to_environ_key('_'.join((section, option))) in environ:\n return True\n return super(EnvironmentConfigParser, self).has_option(section, option)\n\n def get(self, section, option, **kwargs):\n key = to_environ_key('_'.join((section, option)))\n if key in environ:\n return environ[key]\n return super(EnvironmentConfigParser, self).get(\n section, option, **kwargs)\n\n\ndef readConfigFile(cfgfile):\n \"\"\"\n Read config files and return ConfigParser object\n\n @param cfgfile: filename or array of filenames\n @return: ConfigParser object\n \"\"\"\n parser = EnvironmentConfigParser(\n interpolation=configparser.ExtendedInterpolation())\n parser.read(cfgfile)\n return parser\n\n\ndef get_config_path():\n \"\"\"Get absolute path to the config file\n \"\"\"\n config_files = [\"etc/cowrie/cowrie.cfg\", \"etc/cowrie.cfg\",\n \"cowrie.cfg\", \"etc/cowrie.cfg.dist\"]\n current_path = abspath(dirname(__file__))\n root = \"/\".join(current_path.split(\"/\")[:-3])\n\n for file in config_files:\n absolute_path = join(root, file)\n if exists(absolute_path):\n return absolute_path\n\n print(\"Config file not found\")\n\n\nCONFIG = readConfigFile(get_config_path())\n", "path": "src/cowrie/core/config.py"}]}
| 1,656 | 540 |
gh_patches_debug_15272
|
rasdani/github-patches
|
git_diff
|
chainer__chainer-1539
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Invalid CuPy cache problem with different version of CUDAs
When a user update CUDA, caches of CuPy for old CUDA is sometimes incompatible with new one. We need to check CUDA version and to store kernel cache with its information.
@cosmo__ reported this problem on Twitter. Thank you!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cupy/cuda/compiler.py`
Content:
```
1 import hashlib
2 import os
3 import re
4 import subprocess
5 import sys
6 import tempfile
7
8 import filelock
9 import six
10
11 from cupy.cuda import device
12 from cupy.cuda import function
13
14
15 def _get_arch():
16 cc = device.Device().compute_capability
17 return 'sm_%s' % cc
18
19
20 class TemporaryDirectory(object):
21
22 def __enter__(self):
23 self.path = tempfile.mkdtemp()
24 return self.path
25
26 def __exit__(self, exc_type, exc_value, traceback):
27 if exc_value is not None:
28 return
29
30 for name in os.listdir(self.path):
31 os.unlink(os.path.join(self.path, name))
32 os.rmdir(self.path)
33
34
35 def _run_nvcc(cmd, cwd):
36 try:
37 return subprocess.check_output(cmd, cwd=cwd, stderr=subprocess.STDOUT)
38 except subprocess.CalledProcessError as e:
39 msg = ('`nvcc` command returns non-zero exit status. \n'
40 'command: {0}\n'
41 'return-code: {1}\n'
42 'stdout/stderr: \n'
43 '{2}'.format(e.cmd, e.returncode, e.output))
44 raise RuntimeError(msg)
45 except OSError as e:
46 msg = 'Failed to run `nvcc` command. ' \
47 'Check PATH environment variable: ' \
48 + str(e)
49 raise OSError(msg)
50
51
52 def nvcc(source, options=(), arch=None):
53 if not arch:
54 arch = _get_arch()
55 cmd = ['nvcc', '--cubin', '-arch', arch] + list(options)
56
57 with TemporaryDirectory() as root_dir:
58 path = os.path.join(root_dir, 'kern')
59 cu_path = '%s.cu' % path
60 cubin_path = '%s.cubin' % path
61
62 with open(cu_path, 'w') as cu_file:
63 cu_file.write(source)
64
65 cmd.append(cu_path)
66 _run_nvcc(cmd, root_dir)
67
68 with open(cubin_path, 'rb') as bin_file:
69 return bin_file.read()
70
71
72 def preprocess(source, options=()):
73 cmd = ['nvcc', '--preprocess'] + list(options)
74 with TemporaryDirectory() as root_dir:
75 path = os.path.join(root_dir, 'kern')
76 cu_path = '%s.cu' % path
77
78 with open(cu_path, 'w') as cu_file:
79 cu_file.write(source)
80
81 cmd.append(cu_path)
82 pp_src = _run_nvcc(cmd, root_dir)
83
84 if isinstance(pp_src, six.binary_type):
85 pp_src = pp_src.decode('utf-8')
86 return re.sub('(?m)^#.*$', '', pp_src)
87
88
89 _default_cache_dir = os.path.expanduser('~/.cupy/kernel_cache')
90
91
92 def get_cache_dir():
93 return os.environ.get('CUPY_CACHE_DIR', _default_cache_dir)
94
95
96 _empty_file_preprocess_cache = {}
97
98
99 def compile_with_cache(source, options=(), arch=None, cache_dir=None):
100 global _empty_file_preprocess_cache
101 if cache_dir is None:
102 cache_dir = get_cache_dir()
103 if arch is None:
104 arch = _get_arch()
105
106 if 'win32' == sys.platform:
107 options += ('-Xcompiler', '/wd 4819')
108 if sys.maxsize == 9223372036854775807:
109 options += '-m64',
110 elif sys.maxsize == 2147483647:
111 options += '-m32',
112
113 env = (arch, options)
114 if '#include' in source:
115 pp_src = '%s %s' % (env, preprocess(source, options))
116 else:
117 base = _empty_file_preprocess_cache.get(env, None)
118 if base is None:
119 base = _empty_file_preprocess_cache[env] = preprocess('', options)
120 pp_src = '%s %s %s' % (env, base, source)
121
122 if isinstance(pp_src, six.text_type):
123 pp_src = pp_src.encode('utf-8')
124 name = '%s.cubin' % hashlib.md5(pp_src).hexdigest()
125
126 mod = function.Module()
127
128 if not os.path.exists(cache_dir):
129 os.makedirs(cache_dir)
130
131 lock_path = os.path.join(cache_dir, 'lock_file.lock')
132
133 path = os.path.join(cache_dir, name)
134 with filelock.FileLock(lock_path) as lock:
135 if os.path.exists(path):
136 with open(path, 'rb') as file:
137 cubin = file.read()
138 mod.load(cubin)
139 else:
140 lock.release()
141 cubin = nvcc(source, options, arch)
142 mod.load(cubin)
143 lock.acquire()
144 with open(path, 'wb') as cubin_file:
145 cubin_file.write(cubin)
146
147 return mod
148
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/cupy/cuda/compiler.py b/cupy/cuda/compiler.py
--- a/cupy/cuda/compiler.py
+++ b/cupy/cuda/compiler.py
@@ -12,6 +12,18 @@
from cupy.cuda import function
+_nvcc_version = None
+
+
+def _get_nvcc_version():
+ global _nvcc_version
+ if _nvcc_version is None:
+ cmd = ['nvcc', '--version']
+ _nvcc_version = _run_nvcc(cmd, '.')
+
+ return _nvcc_version
+
+
def _get_arch():
cc = device.Device().compute_capability
return 'sm_%s' % cc
@@ -110,7 +122,7 @@
elif sys.maxsize == 2147483647:
options += '-m32',
- env = (arch, options)
+ env = (arch, options, _get_nvcc_version())
if '#include' in source:
pp_src = '%s %s' % (env, preprocess(source, options))
else:
|
{"golden_diff": "diff --git a/cupy/cuda/compiler.py b/cupy/cuda/compiler.py\n--- a/cupy/cuda/compiler.py\n+++ b/cupy/cuda/compiler.py\n@@ -12,6 +12,18 @@\n from cupy.cuda import function\n \n \n+_nvcc_version = None\n+\n+\n+def _get_nvcc_version():\n+ global _nvcc_version\n+ if _nvcc_version is None:\n+ cmd = ['nvcc', '--version']\n+ _nvcc_version = _run_nvcc(cmd, '.')\n+\n+ return _nvcc_version\n+\n+\n def _get_arch():\n cc = device.Device().compute_capability\n return 'sm_%s' % cc\n@@ -110,7 +122,7 @@\n elif sys.maxsize == 2147483647:\n options += '-m32',\n \n- env = (arch, options)\n+ env = (arch, options, _get_nvcc_version())\n if '#include' in source:\n pp_src = '%s %s' % (env, preprocess(source, options))\n else:\n", "issue": "Invalid CuPy cache problem with different version of CUDAs\nWhen a user update CUDA, caches of CuPy for old CUDA is sometimes incompatible with new one. We need to check CUDA version and to store kernel cache with its information.\n\n@cosmo__ reported this problem on Twitter. Thank you!\n\n", "before_files": [{"content": "import hashlib\nimport os\nimport re\nimport subprocess\nimport sys\nimport tempfile\n\nimport filelock\nimport six\n\nfrom cupy.cuda import device\nfrom cupy.cuda import function\n\n\ndef _get_arch():\n cc = device.Device().compute_capability\n return 'sm_%s' % cc\n\n\nclass TemporaryDirectory(object):\n\n def __enter__(self):\n self.path = tempfile.mkdtemp()\n return self.path\n\n def __exit__(self, exc_type, exc_value, traceback):\n if exc_value is not None:\n return\n\n for name in os.listdir(self.path):\n os.unlink(os.path.join(self.path, name))\n os.rmdir(self.path)\n\n\ndef _run_nvcc(cmd, cwd):\n try:\n return subprocess.check_output(cmd, cwd=cwd, stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as e:\n msg = ('`nvcc` command returns non-zero exit status. \\n'\n 'command: {0}\\n'\n 'return-code: {1}\\n'\n 'stdout/stderr: \\n'\n '{2}'.format(e.cmd, e.returncode, e.output))\n raise RuntimeError(msg)\n except OSError as e:\n msg = 'Failed to run `nvcc` command. ' \\\n 'Check PATH environment variable: ' \\\n + str(e)\n raise OSError(msg)\n\n\ndef nvcc(source, options=(), arch=None):\n if not arch:\n arch = _get_arch()\n cmd = ['nvcc', '--cubin', '-arch', arch] + list(options)\n\n with TemporaryDirectory() as root_dir:\n path = os.path.join(root_dir, 'kern')\n cu_path = '%s.cu' % path\n cubin_path = '%s.cubin' % path\n\n with open(cu_path, 'w') as cu_file:\n cu_file.write(source)\n\n cmd.append(cu_path)\n _run_nvcc(cmd, root_dir)\n\n with open(cubin_path, 'rb') as bin_file:\n return bin_file.read()\n\n\ndef preprocess(source, options=()):\n cmd = ['nvcc', '--preprocess'] + list(options)\n with TemporaryDirectory() as root_dir:\n path = os.path.join(root_dir, 'kern')\n cu_path = '%s.cu' % path\n\n with open(cu_path, 'w') as cu_file:\n cu_file.write(source)\n\n cmd.append(cu_path)\n pp_src = _run_nvcc(cmd, root_dir)\n\n if isinstance(pp_src, six.binary_type):\n pp_src = pp_src.decode('utf-8')\n return re.sub('(?m)^#.*$', '', pp_src)\n\n\n_default_cache_dir = os.path.expanduser('~/.cupy/kernel_cache')\n\n\ndef get_cache_dir():\n return os.environ.get('CUPY_CACHE_DIR', _default_cache_dir)\n\n\n_empty_file_preprocess_cache = {}\n\n\ndef compile_with_cache(source, options=(), arch=None, cache_dir=None):\n global _empty_file_preprocess_cache\n if cache_dir is None:\n cache_dir = get_cache_dir()\n if arch is None:\n arch = _get_arch()\n\n if 'win32' == sys.platform:\n options += ('-Xcompiler', '/wd 4819')\n if sys.maxsize == 9223372036854775807:\n options += '-m64',\n elif sys.maxsize == 2147483647:\n options += '-m32',\n\n env = (arch, options)\n if '#include' in source:\n pp_src = '%s %s' % (env, preprocess(source, options))\n else:\n base = _empty_file_preprocess_cache.get(env, None)\n if base is None:\n base = _empty_file_preprocess_cache[env] = preprocess('', options)\n pp_src = '%s %s %s' % (env, base, source)\n\n if isinstance(pp_src, six.text_type):\n pp_src = pp_src.encode('utf-8')\n name = '%s.cubin' % hashlib.md5(pp_src).hexdigest()\n\n mod = function.Module()\n\n if not os.path.exists(cache_dir):\n os.makedirs(cache_dir)\n\n lock_path = os.path.join(cache_dir, 'lock_file.lock')\n\n path = os.path.join(cache_dir, name)\n with filelock.FileLock(lock_path) as lock:\n if os.path.exists(path):\n with open(path, 'rb') as file:\n cubin = file.read()\n mod.load(cubin)\n else:\n lock.release()\n cubin = nvcc(source, options, arch)\n mod.load(cubin)\n lock.acquire()\n with open(path, 'wb') as cubin_file:\n cubin_file.write(cubin)\n\n return mod\n", "path": "cupy/cuda/compiler.py"}], "after_files": [{"content": "import hashlib\nimport os\nimport re\nimport subprocess\nimport sys\nimport tempfile\n\nimport filelock\nimport six\n\nfrom cupy.cuda import device\nfrom cupy.cuda import function\n\n\n_nvcc_version = None\n\n\ndef _get_nvcc_version():\n global _nvcc_version\n if _nvcc_version is None:\n cmd = ['nvcc', '--version']\n _nvcc_version = _run_nvcc(cmd, '.')\n\n return _nvcc_version\n\n\ndef _get_arch():\n cc = device.Device().compute_capability\n return 'sm_%s' % cc\n\n\nclass TemporaryDirectory(object):\n\n def __enter__(self):\n self.path = tempfile.mkdtemp()\n return self.path\n\n def __exit__(self, exc_type, exc_value, traceback):\n if exc_value is not None:\n return\n\n for name in os.listdir(self.path):\n os.unlink(os.path.join(self.path, name))\n os.rmdir(self.path)\n\n\ndef _run_nvcc(cmd, cwd):\n try:\n return subprocess.check_output(cmd, cwd=cwd, stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as e:\n msg = ('`nvcc` command returns non-zero exit status. \\n'\n 'command: {0}\\n'\n 'return-code: {1}\\n'\n 'stdout/stderr: \\n'\n '{2}'.format(e.cmd, e.returncode, e.output))\n raise RuntimeError(msg)\n except OSError as e:\n msg = 'Failed to run `nvcc` command. ' \\\n 'Check PATH environment variable: ' \\\n + str(e)\n raise OSError(msg)\n\n\ndef nvcc(source, options=(), arch=None):\n if not arch:\n arch = _get_arch()\n cmd = ['nvcc', '--cubin', '-arch', arch] + list(options)\n\n with TemporaryDirectory() as root_dir:\n path = os.path.join(root_dir, 'kern')\n cu_path = '%s.cu' % path\n cubin_path = '%s.cubin' % path\n\n with open(cu_path, 'w') as cu_file:\n cu_file.write(source)\n\n cmd.append(cu_path)\n _run_nvcc(cmd, root_dir)\n\n with open(cubin_path, 'rb') as bin_file:\n return bin_file.read()\n\n\ndef preprocess(source, options=()):\n cmd = ['nvcc', '--preprocess'] + list(options)\n with TemporaryDirectory() as root_dir:\n path = os.path.join(root_dir, 'kern')\n cu_path = '%s.cu' % path\n\n with open(cu_path, 'w') as cu_file:\n cu_file.write(source)\n\n cmd.append(cu_path)\n pp_src = _run_nvcc(cmd, root_dir)\n\n if isinstance(pp_src, six.binary_type):\n pp_src = pp_src.decode('utf-8')\n return re.sub('(?m)^#.*$', '', pp_src)\n\n\n_default_cache_dir = os.path.expanduser('~/.cupy/kernel_cache')\n\n\ndef get_cache_dir():\n return os.environ.get('CUPY_CACHE_DIR', _default_cache_dir)\n\n\n_empty_file_preprocess_cache = {}\n\n\ndef compile_with_cache(source, options=(), arch=None, cache_dir=None):\n global _empty_file_preprocess_cache\n if cache_dir is None:\n cache_dir = get_cache_dir()\n if arch is None:\n arch = _get_arch()\n\n if 'win32' == sys.platform:\n options += ('-Xcompiler', '/wd 4819')\n if sys.maxsize == 9223372036854775807:\n options += '-m64',\n elif sys.maxsize == 2147483647:\n options += '-m32',\n\n env = (arch, options, _get_nvcc_version())\n if '#include' in source:\n pp_src = '%s %s' % (env, preprocess(source, options))\n else:\n base = _empty_file_preprocess_cache.get(env, None)\n if base is None:\n base = _empty_file_preprocess_cache[env] = preprocess('', options)\n pp_src = '%s %s %s' % (env, base, source)\n\n if isinstance(pp_src, six.text_type):\n pp_src = pp_src.encode('utf-8')\n name = '%s.cubin' % hashlib.md5(pp_src).hexdigest()\n\n mod = function.Module()\n\n if not os.path.exists(cache_dir):\n os.makedirs(cache_dir)\n\n lock_path = os.path.join(cache_dir, 'lock_file.lock')\n\n path = os.path.join(cache_dir, name)\n with filelock.FileLock(lock_path) as lock:\n if os.path.exists(path):\n with open(path, 'rb') as file:\n cubin = file.read()\n mod.load(cubin)\n else:\n lock.release()\n cubin = nvcc(source, options, arch)\n mod.load(cubin)\n lock.acquire()\n with open(path, 'wb') as cubin_file:\n cubin_file.write(cubin)\n\n return mod\n", "path": "cupy/cuda/compiler.py"}]}
| 1,732 | 247 |
gh_patches_debug_63916
|
rasdani/github-patches
|
git_diff
|
tensorflow__addons-897
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Nightly build break
**System information**
- TensorFlow version and how it was installed (source or binary): tf-nightly-**2.2.0.dev20200115**
- TensorFlow-Addons version and how it was installed (source or binary): tfa-nightly-**0.8.0.dev20200115**
**Describe the bug**
Hi, it looks like [this commit](https://github.com/tensorflow/addons/commit/3aae7732998cb233234a2948010b9aaafc24e920) causes the latest nightly build to fail on import
```
----> 1 import tensorflow_addons
/usr/local/lib/python3.6/dist-packages/tensorflow_addons/__init__.py in <module>()
30
31 # Cleanup symbols to avoid polluting namespace.
---> 32 del absolute_import
33 del division
34 del print_function
NameError: name 'absolute_import' is not defined
```
@seanpmorgan
**Code to reproduce the issue**
[colab](https://colab.research.google.com/drive/1fxRshVv0FPJNHdOqWC4GySjPJ_TdJTJU#scrollTo=TTC3gzRLRAvY)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tensorflow_addons/__init__.py`
Content:
```
1 # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 # ==============================================================================
15 """Useful extra functionality for TensorFlow maintained by SIG-addons."""
16
17 # Local project imports
18 from tensorflow_addons import activations
19 from tensorflow_addons import callbacks
20 from tensorflow_addons import image
21 from tensorflow_addons import layers
22 from tensorflow_addons import losses
23 from tensorflow_addons import metrics
24 from tensorflow_addons import optimizers
25 from tensorflow_addons import rnn
26 from tensorflow_addons import seq2seq
27 from tensorflow_addons import text
28
29 from tensorflow_addons.version import __version__
30
31 # Cleanup symbols to avoid polluting namespace.
32 del absolute_import
33 del division
34 del print_function
35
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/tensorflow_addons/__init__.py b/tensorflow_addons/__init__.py
--- a/tensorflow_addons/__init__.py
+++ b/tensorflow_addons/__init__.py
@@ -27,8 +27,3 @@
from tensorflow_addons import text
from tensorflow_addons.version import __version__
-
-# Cleanup symbols to avoid polluting namespace.
-del absolute_import
-del division
-del print_function
|
{"golden_diff": "diff --git a/tensorflow_addons/__init__.py b/tensorflow_addons/__init__.py\n--- a/tensorflow_addons/__init__.py\n+++ b/tensorflow_addons/__init__.py\n@@ -27,8 +27,3 @@\n from tensorflow_addons import text\n \n from tensorflow_addons.version import __version__\n-\n-# Cleanup symbols to avoid polluting namespace.\n-del absolute_import\n-del division\n-del print_function\n", "issue": "Nightly build break\n**System information**\r\n- TensorFlow version and how it was installed (source or binary): tf-nightly-**2.2.0.dev20200115** \r\n- TensorFlow-Addons version and how it was installed (source or binary): tfa-nightly-**0.8.0.dev20200115**\r\n\r\n**Describe the bug**\r\nHi, it looks like [this commit](https://github.com/tensorflow/addons/commit/3aae7732998cb233234a2948010b9aaafc24e920) causes the latest nightly build to fail on import\r\n\r\n```\r\n----> 1 import tensorflow_addons\r\n\r\n/usr/local/lib/python3.6/dist-packages/tensorflow_addons/__init__.py in <module>()\r\n 30 \r\n 31 # Cleanup symbols to avoid polluting namespace.\r\n---> 32 del absolute_import\r\n 33 del division\r\n 34 del print_function\r\n\r\nNameError: name 'absolute_import' is not defined\r\n```\r\n@seanpmorgan \r\n\r\n**Code to reproduce the issue**\r\n[colab](https://colab.research.google.com/drive/1fxRshVv0FPJNHdOqWC4GySjPJ_TdJTJU#scrollTo=TTC3gzRLRAvY)\r\n\n", "before_files": [{"content": "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Useful extra functionality for TensorFlow maintained by SIG-addons.\"\"\"\n\n# Local project imports\nfrom tensorflow_addons import activations\nfrom tensorflow_addons import callbacks\nfrom tensorflow_addons import image\nfrom tensorflow_addons import layers\nfrom tensorflow_addons import losses\nfrom tensorflow_addons import metrics\nfrom tensorflow_addons import optimizers\nfrom tensorflow_addons import rnn\nfrom tensorflow_addons import seq2seq\nfrom tensorflow_addons import text\n\nfrom tensorflow_addons.version import __version__\n\n# Cleanup symbols to avoid polluting namespace.\ndel absolute_import\ndel division\ndel print_function\n", "path": "tensorflow_addons/__init__.py"}], "after_files": [{"content": "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Useful extra functionality for TensorFlow maintained by SIG-addons.\"\"\"\n\n# Local project imports\nfrom tensorflow_addons import activations\nfrom tensorflow_addons import callbacks\nfrom tensorflow_addons import image\nfrom tensorflow_addons import layers\nfrom tensorflow_addons import losses\nfrom tensorflow_addons import metrics\nfrom tensorflow_addons import optimizers\nfrom tensorflow_addons import rnn\nfrom tensorflow_addons import seq2seq\nfrom tensorflow_addons import text\n\nfrom tensorflow_addons.version import __version__\n", "path": "tensorflow_addons/__init__.py"}]}
| 891 | 98 |
gh_patches_debug_13053
|
rasdani/github-patches
|
git_diff
|
pymodbus-dev__pymodbus-921
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
AttributeError: module 'asyncio' has no attribute 'exceptions'
```
Traceback (most recent call last):
File "/home//.local/bin/pymodbus.server", line 6, in <module>
from pymodbus.repl.server.main import server
File "/home//.local/lib/python3.7/site-packages/pymodbus/repl/server/main.py", line 16, in <module>
CANCELLED_ERROR = asyncio.exceptions.CancelledError
AttributeError: module 'asyncio' has no attribute 'exceptions'
```
in the code
```
if IS_PYTHON3 and PYTHON_VERSION > (3, 7):
CANCELLED_ERROR = asyncio.exceptions.CancelledError
else:
CANCELLED_ERROR = asyncio.CancelledError
```
python 3.7 at least 3.7.3 doesn't have `asyncio.exceptions`
Maybe it was supposed to be >= 3.8.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pymodbus/repl/server/main.py`
Content:
```
1 """Repl server main.
2
3 Copyright (c) 2020 by RiptideIO
4 All rights reserved.
5 """
6 import sys
7 import logging
8 import asyncio
9 import json
10 import click
11 from pymodbus.framer.socket_framer import ModbusSocketFramer
12 from pymodbus.server.reactive.main import (
13 ReactiveServer,
14 DEFAULT_FRAMER,
15 DEFUALT_HANDLERS,
16 )
17 from pymodbus.server.reactive.default_config import DEFUALT_CONFIG
18 from pymodbus.repl.server.cli import run_repl
19
20 if sys.version_info > (3, 7):
21 CANCELLED_ERROR = asyncio.exceptions.CancelledError
22 else:
23 CANCELLED_ERROR = asyncio.CancelledError # pylint: disable=invalid-name
24
25
26 @click.group("ReactiveModbusServer")
27 @click.option("--host", default="localhost", help="Host address")
28 @click.option("--web-port", default=8080, help="Web app port")
29 @click.option(
30 "--broadcast-support",
31 is_flag=True,
32 default=False,
33 help="Support broadcast messages",
34 )
35 @click.option(
36 "--repl/--no-repl",
37 is_flag=True,
38 default=True,
39 help="Enable/Disable repl for server",
40 )
41 @click.option(
42 "--verbose", is_flag=True, help="Run with debug logs enabled for pymodbus"
43 )
44 @click.pass_context
45 def server(ctx, host, web_port, broadcast_support, repl, verbose):
46 """Run server code."""
47 FORMAT = ( # pylint: disable=invalid-name
48 "%(asctime)-15s %(threadName)-15s"
49 " %(levelname)-8s %(module)-15s:%(lineno)-8s %(message)s"
50 )
51 pymodbus_logger = logging.getLogger("pymodbus")
52 logging.basicConfig(format=FORMAT) # NOSONAR
53 if verbose:
54 pymodbus_logger.setLevel(logging.DEBUG)
55 else:
56 pymodbus_logger.setLevel(logging.ERROR)
57
58 ctx.obj = {
59 "repl": repl,
60 "host": host,
61 "web_port": web_port,
62 "broadcast": broadcast_support,
63 }
64
65
66 @server.command("run")
67 @click.option(
68 "--modbus-server",
69 default="tcp",
70 type=click.Choice(["tcp", "serial", "tls", "udp"], case_sensitive=False),
71 help="Modbus server",
72 )
73 @click.option(
74 "--modbus-framer",
75 default="socket",
76 type=click.Choice(
77 ["socket", "rtu", "tls", "ascii", "binary"], case_sensitive=False
78 ),
79 help="Modbus framer to use",
80 )
81 @click.option("--modbus-port", default="5020", help="Modbus port")
82 @click.option(
83 "--modbus-unit-id", default=[1], type=int, multiple=True, help="Modbus unit id"
84 )
85 @click.option(
86 "--modbus-config",
87 type=click.Path(exists=True),
88 help="Path to additional modbus server config",
89 )
90 @click.option(
91 "-r",
92 "--randomize",
93 default=0,
94 help="Randomize every `r` reads."
95 " 0=never, 1=always, "
96 "2=every-second-read, "
97 "and so on. "
98 "Applicable IR and DI.",
99 )
100 @click.pass_context
101 def run(
102 ctx,
103 modbus_server,
104 modbus_framer,
105 modbus_port,
106 modbus_unit_id,
107 modbus_config,
108 randomize,
109 ):
110 """Run Reactive Modbus server.
111
112 Exposing REST endpoint for response manipulation.
113 """
114 repl = ctx.obj.pop("repl")
115 web_app_config = ctx.obj
116 loop = asyncio.get_event_loop()
117 framer = DEFAULT_FRAMER.get(modbus_framer, ModbusSocketFramer)
118 if modbus_config:
119 with open(modbus_config) as my_file: # pylint: disable=unspecified-encoding
120 modbus_config = json.load(my_file)
121 else:
122 modbus_config = DEFUALT_CONFIG
123 modbus_config = modbus_config.get(modbus_server, {})
124 if modbus_server != "serial":
125 modbus_port = int(modbus_port)
126 handler = modbus_config.pop("handler", "ModbusConnectedRequestHandler")
127 else:
128 handler = modbus_config.pop("handler", "ModbusSingleRequestHandler")
129 handler = DEFUALT_HANDLERS.get(handler.strip())
130
131 modbus_config["handler"] = handler
132 modbus_config["randomize"] = randomize
133 app = ReactiveServer.factory(
134 modbus_server,
135 framer,
136 modbus_port=modbus_port,
137 unit=modbus_unit_id,
138 loop=loop,
139 **web_app_config,
140 **modbus_config
141 )
142 try:
143 if repl:
144 loop.run_until_complete(app.run_async())
145
146 loop.run_until_complete(run_repl(app))
147 loop.run_forever()
148 else:
149 app.run()
150
151 except CANCELLED_ERROR:
152 print("Done!!!!!")
153
154
155 if __name__ == "__main__":
156 server() # pylint: disable=no-value-for-parameter
157
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pymodbus/repl/server/main.py b/pymodbus/repl/server/main.py
--- a/pymodbus/repl/server/main.py
+++ b/pymodbus/repl/server/main.py
@@ -3,7 +3,6 @@
Copyright (c) 2020 by RiptideIO
All rights reserved.
"""
-import sys
import logging
import asyncio
import json
@@ -17,10 +16,7 @@
from pymodbus.server.reactive.default_config import DEFUALT_CONFIG
from pymodbus.repl.server.cli import run_repl
-if sys.version_info > (3, 7):
- CANCELLED_ERROR = asyncio.exceptions.CancelledError
-else:
- CANCELLED_ERROR = asyncio.CancelledError # pylint: disable=invalid-name
+CANCELLED_ERROR = asyncio.exceptions.CancelledError
@click.group("ReactiveModbusServer")
|
{"golden_diff": "diff --git a/pymodbus/repl/server/main.py b/pymodbus/repl/server/main.py\n--- a/pymodbus/repl/server/main.py\n+++ b/pymodbus/repl/server/main.py\n@@ -3,7 +3,6 @@\n Copyright (c) 2020 by RiptideIO\n All rights reserved.\n \"\"\"\n-import sys\n import logging\n import asyncio\n import json\n@@ -17,10 +16,7 @@\n from pymodbus.server.reactive.default_config import DEFUALT_CONFIG\n from pymodbus.repl.server.cli import run_repl\n \n-if sys.version_info > (3, 7):\n- CANCELLED_ERROR = asyncio.exceptions.CancelledError\n-else:\n- CANCELLED_ERROR = asyncio.CancelledError # pylint: disable=invalid-name\n+CANCELLED_ERROR = asyncio.exceptions.CancelledError\n \n \n @click.group(\"ReactiveModbusServer\")\n", "issue": "AttributeError: module 'asyncio' has no attribute 'exceptions'\n```\r\nTraceback (most recent call last):\r\n File \"/home//.local/bin/pymodbus.server\", line 6, in <module>\r\n from pymodbus.repl.server.main import server\r\n File \"/home//.local/lib/python3.7/site-packages/pymodbus/repl/server/main.py\", line 16, in <module>\r\n CANCELLED_ERROR = asyncio.exceptions.CancelledError\r\nAttributeError: module 'asyncio' has no attribute 'exceptions'\r\n```\r\n\r\nin the code \r\n\r\n```\r\nif IS_PYTHON3 and PYTHON_VERSION > (3, 7):\r\n CANCELLED_ERROR = asyncio.exceptions.CancelledError\r\nelse:\r\n CANCELLED_ERROR = asyncio.CancelledError\r\n\r\n```\r\npython 3.7 at least 3.7.3 doesn't have `asyncio.exceptions`\r\nMaybe it was supposed to be >= 3.8.\r\n\n", "before_files": [{"content": "\"\"\"Repl server main.\n\nCopyright (c) 2020 by RiptideIO\nAll rights reserved.\n\"\"\"\nimport sys\nimport logging\nimport asyncio\nimport json\nimport click\nfrom pymodbus.framer.socket_framer import ModbusSocketFramer\nfrom pymodbus.server.reactive.main import (\n ReactiveServer,\n DEFAULT_FRAMER,\n DEFUALT_HANDLERS,\n)\nfrom pymodbus.server.reactive.default_config import DEFUALT_CONFIG\nfrom pymodbus.repl.server.cli import run_repl\n\nif sys.version_info > (3, 7):\n CANCELLED_ERROR = asyncio.exceptions.CancelledError\nelse:\n CANCELLED_ERROR = asyncio.CancelledError # pylint: disable=invalid-name\n\n\[email protected](\"ReactiveModbusServer\")\[email protected](\"--host\", default=\"localhost\", help=\"Host address\")\[email protected](\"--web-port\", default=8080, help=\"Web app port\")\[email protected](\n \"--broadcast-support\",\n is_flag=True,\n default=False,\n help=\"Support broadcast messages\",\n)\[email protected](\n \"--repl/--no-repl\",\n is_flag=True,\n default=True,\n help=\"Enable/Disable repl for server\",\n)\[email protected](\n \"--verbose\", is_flag=True, help=\"Run with debug logs enabled for pymodbus\"\n)\[email protected]_context\ndef server(ctx, host, web_port, broadcast_support, repl, verbose):\n \"\"\"Run server code.\"\"\"\n FORMAT = ( # pylint: disable=invalid-name\n \"%(asctime)-15s %(threadName)-15s\"\n \" %(levelname)-8s %(module)-15s:%(lineno)-8s %(message)s\"\n )\n pymodbus_logger = logging.getLogger(\"pymodbus\")\n logging.basicConfig(format=FORMAT) # NOSONAR\n if verbose:\n pymodbus_logger.setLevel(logging.DEBUG)\n else:\n pymodbus_logger.setLevel(logging.ERROR)\n\n ctx.obj = {\n \"repl\": repl,\n \"host\": host,\n \"web_port\": web_port,\n \"broadcast\": broadcast_support,\n }\n\n\[email protected](\"run\")\[email protected](\n \"--modbus-server\",\n default=\"tcp\",\n type=click.Choice([\"tcp\", \"serial\", \"tls\", \"udp\"], case_sensitive=False),\n help=\"Modbus server\",\n)\[email protected](\n \"--modbus-framer\",\n default=\"socket\",\n type=click.Choice(\n [\"socket\", \"rtu\", \"tls\", \"ascii\", \"binary\"], case_sensitive=False\n ),\n help=\"Modbus framer to use\",\n)\[email protected](\"--modbus-port\", default=\"5020\", help=\"Modbus port\")\[email protected](\n \"--modbus-unit-id\", default=[1], type=int, multiple=True, help=\"Modbus unit id\"\n)\[email protected](\n \"--modbus-config\",\n type=click.Path(exists=True),\n help=\"Path to additional modbus server config\",\n)\[email protected](\n \"-r\",\n \"--randomize\",\n default=0,\n help=\"Randomize every `r` reads.\"\n \" 0=never, 1=always, \"\n \"2=every-second-read, \"\n \"and so on. \"\n \"Applicable IR and DI.\",\n)\[email protected]_context\ndef run(\n ctx,\n modbus_server,\n modbus_framer,\n modbus_port,\n modbus_unit_id,\n modbus_config,\n randomize,\n):\n \"\"\"Run Reactive Modbus server.\n\n Exposing REST endpoint for response manipulation.\n \"\"\"\n repl = ctx.obj.pop(\"repl\")\n web_app_config = ctx.obj\n loop = asyncio.get_event_loop()\n framer = DEFAULT_FRAMER.get(modbus_framer, ModbusSocketFramer)\n if modbus_config:\n with open(modbus_config) as my_file: # pylint: disable=unspecified-encoding\n modbus_config = json.load(my_file)\n else:\n modbus_config = DEFUALT_CONFIG\n modbus_config = modbus_config.get(modbus_server, {})\n if modbus_server != \"serial\":\n modbus_port = int(modbus_port)\n handler = modbus_config.pop(\"handler\", \"ModbusConnectedRequestHandler\")\n else:\n handler = modbus_config.pop(\"handler\", \"ModbusSingleRequestHandler\")\n handler = DEFUALT_HANDLERS.get(handler.strip())\n\n modbus_config[\"handler\"] = handler\n modbus_config[\"randomize\"] = randomize\n app = ReactiveServer.factory(\n modbus_server,\n framer,\n modbus_port=modbus_port,\n unit=modbus_unit_id,\n loop=loop,\n **web_app_config,\n **modbus_config\n )\n try:\n if repl:\n loop.run_until_complete(app.run_async())\n\n loop.run_until_complete(run_repl(app))\n loop.run_forever()\n else:\n app.run()\n\n except CANCELLED_ERROR:\n print(\"Done!!!!!\")\n\n\nif __name__ == \"__main__\":\n server() # pylint: disable=no-value-for-parameter\n", "path": "pymodbus/repl/server/main.py"}], "after_files": [{"content": "\"\"\"Repl server main.\n\nCopyright (c) 2020 by RiptideIO\nAll rights reserved.\n\"\"\"\nimport logging\nimport asyncio\nimport json\nimport click\nfrom pymodbus.framer.socket_framer import ModbusSocketFramer\nfrom pymodbus.server.reactive.main import (\n ReactiveServer,\n DEFAULT_FRAMER,\n DEFUALT_HANDLERS,\n)\nfrom pymodbus.server.reactive.default_config import DEFUALT_CONFIG\nfrom pymodbus.repl.server.cli import run_repl\n\nCANCELLED_ERROR = asyncio.exceptions.CancelledError\n\n\[email protected](\"ReactiveModbusServer\")\[email protected](\"--host\", default=\"localhost\", help=\"Host address\")\[email protected](\"--web-port\", default=8080, help=\"Web app port\")\[email protected](\n \"--broadcast-support\",\n is_flag=True,\n default=False,\n help=\"Support broadcast messages\",\n)\[email protected](\n \"--repl/--no-repl\",\n is_flag=True,\n default=True,\n help=\"Enable/Disable repl for server\",\n)\[email protected](\n \"--verbose\", is_flag=True, help=\"Run with debug logs enabled for pymodbus\"\n)\[email protected]_context\ndef server(ctx, host, web_port, broadcast_support, repl, verbose):\n \"\"\"Run server code.\"\"\"\n FORMAT = ( # pylint: disable=invalid-name\n \"%(asctime)-15s %(threadName)-15s\"\n \" %(levelname)-8s %(module)-15s:%(lineno)-8s %(message)s\"\n )\n pymodbus_logger = logging.getLogger(\"pymodbus\")\n logging.basicConfig(format=FORMAT) # NOSONAR\n if verbose:\n pymodbus_logger.setLevel(logging.DEBUG)\n else:\n pymodbus_logger.setLevel(logging.ERROR)\n\n ctx.obj = {\n \"repl\": repl,\n \"host\": host,\n \"web_port\": web_port,\n \"broadcast\": broadcast_support,\n }\n\n\[email protected](\"run\")\[email protected](\n \"--modbus-server\",\n default=\"tcp\",\n type=click.Choice([\"tcp\", \"serial\", \"tls\", \"udp\"], case_sensitive=False),\n help=\"Modbus server\",\n)\[email protected](\n \"--modbus-framer\",\n default=\"socket\",\n type=click.Choice(\n [\"socket\", \"rtu\", \"tls\", \"ascii\", \"binary\"], case_sensitive=False\n ),\n help=\"Modbus framer to use\",\n)\[email protected](\"--modbus-port\", default=\"5020\", help=\"Modbus port\")\[email protected](\n \"--modbus-unit-id\", default=[1], type=int, multiple=True, help=\"Modbus unit id\"\n)\[email protected](\n \"--modbus-config\",\n type=click.Path(exists=True),\n help=\"Path to additional modbus server config\",\n)\[email protected](\n \"-r\",\n \"--randomize\",\n default=0,\n help=\"Randomize every `r` reads.\"\n \" 0=never, 1=always, \"\n \"2=every-second-read, \"\n \"and so on. \"\n \"Applicable IR and DI.\",\n)\[email protected]_context\ndef run(\n ctx,\n modbus_server,\n modbus_framer,\n modbus_port,\n modbus_unit_id,\n modbus_config,\n randomize,\n):\n \"\"\"Run Reactive Modbus server.\n\n Exposing REST endpoint for response manipulation.\n \"\"\"\n repl = ctx.obj.pop(\"repl\")\n web_app_config = ctx.obj\n loop = asyncio.get_event_loop()\n framer = DEFAULT_FRAMER.get(modbus_framer, ModbusSocketFramer)\n if modbus_config:\n with open(modbus_config) as my_file: # pylint: disable=unspecified-encoding\n modbus_config = json.load(my_file)\n else:\n modbus_config = DEFUALT_CONFIG\n modbus_config = modbus_config.get(modbus_server, {})\n if modbus_server != \"serial\":\n modbus_port = int(modbus_port)\n handler = modbus_config.pop(\"handler\", \"ModbusConnectedRequestHandler\")\n else:\n handler = modbus_config.pop(\"handler\", \"ModbusSingleRequestHandler\")\n handler = DEFUALT_HANDLERS.get(handler.strip())\n\n modbus_config[\"handler\"] = handler\n modbus_config[\"randomize\"] = randomize\n app = ReactiveServer.factory(\n modbus_server,\n framer,\n modbus_port=modbus_port,\n unit=modbus_unit_id,\n loop=loop,\n **web_app_config,\n **modbus_config\n )\n try:\n if repl:\n loop.run_until_complete(app.run_async())\n\n loop.run_until_complete(run_repl(app))\n loop.run_forever()\n else:\n app.run()\n\n except CANCELLED_ERROR:\n print(\"Done!!!!!\")\n\n\nif __name__ == \"__main__\":\n server() # pylint: disable=no-value-for-parameter\n", "path": "pymodbus/repl/server/main.py"}]}
| 1,941 | 200 |
gh_patches_debug_29310
|
rasdani/github-patches
|
git_diff
|
Kinto__kinto-1993
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
OpenID authentication plugin is missing a profile view
When validating the token, the plugn is fetching the profile, but there are no endpoint available to fetch this info: https://github.com/Kinto/kinto/blob/master/kinto/plugins/openid/__init__.py#L74
I would be in favor of adding a `/v1/openid/google/profile` view in order to grab the username and the profile_pic of the user.
This information might also be added in the `user` field of the `/v1/` endpoint. What do you think?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kinto/core/__init__.py`
Content:
```
1 """Main entry point
2 """
3 import logging
4 import pkg_resources
5 import tempfile
6
7 from cornice import Service as CorniceService
8 from dockerflow import logging as dockerflow_logging
9 from pyramid.settings import aslist
10
11 from kinto.core import errors
12 from kinto.core import events
13 from kinto.core.initialization import ( # NOQA
14 initialize,
15 install_middlewares,
16 load_default_settings,
17 )
18 from kinto.core.utils import (
19 follow_subrequest,
20 current_service,
21 current_resource_name,
22 prefixed_userid,
23 prefixed_principals,
24 log_context,
25 )
26
27
28 logger = logging.getLogger(__name__)
29
30
31 # Module version, as defined in PEP-0396.
32 __version__ = pkg_resources.get_distribution("kinto").version # FIXME?
33
34 DEFAULT_SETTINGS = {
35 "backoff": None,
36 "backoff_percentage": None,
37 "batch_max_requests": 25,
38 "cache_backend": "",
39 "cache_hosts": "",
40 "cache_url": "",
41 "cache_pool_size": 25,
42 "cache_prefix": "",
43 "cache_max_size_bytes": 524288,
44 "cors_origins": "*",
45 "cors_max_age_seconds": 3600,
46 "eos": None,
47 "eos_message": None,
48 "eos_url": None,
49 "error_info_link": "https://github.com/Kinto/kinto/issues/",
50 "http_host": None,
51 "http_scheme": None,
52 "id_generator": "kinto.core.storage.generators.UUID4",
53 "includes": "",
54 "initialization_sequence": (
55 "kinto.core.initialization.setup_request_bound_data",
56 "kinto.core.initialization.setup_json_serializer",
57 "kinto.core.initialization.setup_logging",
58 "kinto.core.initialization.setup_storage",
59 "kinto.core.initialization.setup_permission",
60 "kinto.core.initialization.setup_cache",
61 "kinto.core.initialization.setup_requests_scheme",
62 "kinto.core.initialization.setup_version_redirection",
63 "kinto.core.initialization.setup_deprecation",
64 "kinto.core.initialization.setup_authentication",
65 "kinto.core.initialization.setup_backoff",
66 "kinto.core.initialization.setup_statsd",
67 "kinto.core.initialization.setup_listeners",
68 "kinto.core.events.setup_transaction_hook",
69 ),
70 "event_listeners": "",
71 "heartbeat_timeout_seconds": 10,
72 "newrelic_config": None,
73 "newrelic_env": "dev",
74 "paginate_by": None,
75 "pagination_token_validity_seconds": 10 * 60,
76 "permission_backend": "",
77 "permission_url": "",
78 "permission_pool_size": 25,
79 "profiler_dir": tempfile.gettempdir(),
80 "profiler_enabled": False,
81 "project_docs": "",
82 "project_name": "",
83 "project_version": "",
84 "readonly": False,
85 "retry_after_seconds": 30,
86 "settings_prefix": "",
87 "statsd_backend": "kinto.core.statsd",
88 "statsd_prefix": "kinto.core",
89 "statsd_url": None,
90 "storage_backend": "",
91 "storage_url": "",
92 "storage_max_fetch_size": 10000,
93 "storage_pool_size": 25,
94 "tm.annotate_user": False, # Do annotate transactions with the user-id.
95 "transaction_per_request": True,
96 "userid_hmac_secret": "",
97 "version_json_path": "version.json",
98 "version_prefix_redirect_enabled": True,
99 "trailing_slash_redirect_enabled": True,
100 "multiauth.groupfinder": "kinto.core.authorization.groupfinder",
101 "multiauth.policies": "",
102 "multiauth.policy.basicauth.use": (
103 "kinto.core.authentication." "BasicAuthAuthenticationPolicy"
104 ),
105 "multiauth.authorization_policy": ("kinto.core.authorization." "AuthorizationPolicy"),
106 }
107
108
109 class Service(CorniceService):
110 """Subclass of the default cornice service.
111
112 This is useful in order to attach specific behaviours without monkey
113 patching the default cornice service (which would impact other uses of it)
114 """
115
116 default_cors_headers = ("Backoff", "Retry-After", "Alert", "Content-Length")
117
118 def error_handler(self, request):
119 return errors.json_error_handler(request)
120
121 @classmethod
122 def init_from_settings(cls, settings):
123 cls.cors_origins = tuple(aslist(settings["cors_origins"]))
124 cors_max_age = settings["cors_max_age_seconds"]
125 cls.cors_max_age = int(cors_max_age) if cors_max_age else None
126
127
128 class JsonLogFormatter(dockerflow_logging.JsonLogFormatter):
129 logger_name = "kinto"
130
131 @classmethod
132 def init_from_settings(cls, settings):
133 cls.logger_name = settings["project_name"]
134
135 def __init__(self, fmt=None, datefmt=None, style="%"):
136 # Do not let mozilla-cloud-services-logger constructor to improperly
137 # use style as the logger_name.
138 # See https://github.com/mozilla/mozilla-cloud-services-logger/issues/3
139 logger_name = self.logger_name
140 super().__init__(fmt, datefmt, style)
141 self.logger_name = logger_name
142
143
144 def get_user_info(request):
145 # Default user info (shown in hello view for example).
146 return {"id": request.prefixed_userid, "principals": request.prefixed_principals}
147
148
149 def includeme(config):
150 settings = config.get_settings()
151
152 # Heartbeat registry.
153 config.registry.heartbeats = {}
154
155 # Public settings registry.
156 config.registry.public_settings = {"batch_max_requests", "readonly"}
157
158 # Directive to declare arbitrary API capabilities.
159 def add_api_capability(config, identifier, description="", url="", **kw):
160 existing = config.registry.api_capabilities.get(identifier)
161 if existing:
162 error_msg = "The '{}' API capability was already registered ({})."
163 raise ValueError(error_msg.format(identifier, existing))
164
165 capability = dict(description=description, url=url, **kw)
166 config.registry.api_capabilities[identifier] = capability
167
168 config.add_directive("add_api_capability", add_api_capability)
169 config.registry.api_capabilities = {}
170
171 # Resource events helpers.
172 config.add_request_method(events.get_resource_events, name="get_resource_events")
173 config.add_request_method(events.notify_resource_event, name="notify_resource_event")
174
175 # Setup cornice.
176 config.include("cornice")
177
178 # Setup cornice api documentation
179 config.include("cornice_swagger")
180
181 # Per-request transaction.
182 config.include("pyramid_tm")
183
184 # Add CORS settings to the base kinto.core Service class.
185 Service.init_from_settings(settings)
186
187 # Use the project name as the main logger name (Logger field in MozLog).
188 JsonLogFormatter.init_from_settings(settings)
189
190 # Setup components.
191 for step in aslist(settings["initialization_sequence"]):
192 step_func = config.maybe_dotted(step)
193 step_func(config)
194
195 # Custom helpers.
196 config.add_request_method(log_context)
197 config.add_request_method(follow_subrequest)
198 config.add_request_method(prefixed_userid, property=True)
199 config.add_request_method(prefixed_principals, reify=True)
200 config.add_request_method(get_user_info, name="get_user_info")
201 config.add_request_method(current_resource_name, reify=True)
202 config.add_request_method(current_service, reify=True)
203 config.commit()
204
205 # Include plugins after init, unlike pyramid includes.
206 includes = aslist(settings["includes"])
207 for app in includes:
208 config.include(app)
209
210 # # Show settings to output.
211 # for key, value in settings.items():
212 # logger.info('Using {} = {}'.format(key, value))
213
214 # Scan views.
215 config.scan("kinto.core.views")
216
217 # Give sign of life.
218 msg = "Running {project_name} {project_version}."
219 logger.info(msg.format_map(settings))
220
```
Path: `kinto/plugins/openid/__init__.py`
Content:
```
1 import requests
2 from pyramid import authentication as base_auth
3 from pyramid.settings import aslist
4 from pyramid.interfaces import IAuthenticationPolicy
5 from zope.interface import implementer
6
7 from kinto.core import logger
8 from kinto.core import utils as core_utils
9 from kinto.core.openapi import OpenAPI
10
11 from .utils import fetch_openid_config
12
13
14 @implementer(IAuthenticationPolicy)
15 class OpenIDConnectPolicy(base_auth.CallbackAuthenticationPolicy):
16 def __init__(self, issuer, client_id, realm="Realm", **kwargs):
17 self.realm = realm
18 self.issuer = issuer
19 self.client_id = client_id
20 self.client_secret = kwargs.get("client_secret", "")
21 self.header_type = kwargs.get("header_type", "Bearer")
22 self.userid_field = kwargs.get("userid_field", "sub")
23 self.verification_ttl = int(kwargs.get("verification_ttl_seconds", 86400))
24
25 # Fetch OpenID config (at instantiation, ie. startup)
26 self.oid_config = fetch_openid_config(issuer)
27
28 self._jwt_keys = None
29
30 def unauthenticated_userid(self, request):
31 """Return the userid or ``None`` if token could not be verified.
32 """
33 settings = request.registry.settings
34 hmac_secret = settings["userid_hmac_secret"]
35
36 authorization = request.headers.get("Authorization", "")
37 try:
38 authmeth, access_token = authorization.split(" ", 1)
39 except ValueError:
40 return None
41
42 if authmeth.lower() != self.header_type.lower():
43 return None
44
45 # XXX JWT Access token
46 # https://auth0.com/docs/tokens/access-token#access-token-format
47
48 # Check cache if these tokens were already verified.
49 hmac_tokens = core_utils.hmac_digest(hmac_secret, access_token)
50 cache_key = f"openid:verify:{hmac_tokens}"
51 payload = request.registry.cache.get(cache_key)
52 if payload is None:
53 # This can take some time.
54 payload = self._verify_token(access_token)
55 if payload is None:
56 return None
57 # Save for next time / refresh ttl.
58 request.registry.cache.set(cache_key, payload, ttl=self.verification_ttl)
59 # Extract meaningful field from userinfo (eg. email or sub)
60 return payload.get(self.userid_field)
61
62 def forget(self, request):
63 """A no-op. Credentials are sent on every request.
64 Return WWW-Authenticate Realm header for Bearer token.
65 """
66 return [("WWW-Authenticate", '%s realm="%s"' % (self.header_type, self.realm))]
67
68 def _verify_token(self, access_token):
69 uri = self.oid_config["userinfo_endpoint"]
70 # Opaque access token string. Fetch user info from profile.
71 try:
72 resp = requests.get(uri, headers={"Authorization": "Bearer " + access_token})
73 resp.raise_for_status()
74 userprofile = resp.json()
75 return userprofile
76
77 except (requests.exceptions.HTTPError, ValueError, KeyError) as e:
78 logger.debug("Unable to fetch user profile from %s (%s)" % (uri, e))
79 return None
80
81
82 def includeme(config):
83 # Activate end-points.
84 config.scan("kinto.plugins.openid.views")
85
86 settings = config.get_settings()
87
88 openid_policies = []
89 for policy in aslist(settings["multiauth.policies"]):
90 v = settings.get("multiauth.policy.%s.use" % policy, "")
91 if v.endswith("OpenIDConnectPolicy"):
92 openid_policies.append(policy)
93
94 if len(openid_policies) == 0:
95 # Do not add the capability if no policy is configured.
96 return
97
98 providers_infos = []
99 for name in openid_policies:
100 issuer = settings["multiauth.policy.%s.issuer" % name]
101 openid_config = fetch_openid_config(issuer)
102
103 client_id = settings["multiauth.policy.%s.client_id" % name]
104 header_type = settings.get("multiauth.policy.%s.header_type", "Bearer")
105
106 providers_infos.append(
107 {
108 "name": name,
109 "issuer": openid_config["issuer"],
110 "auth_path": "/openid/%s/login" % name,
111 "client_id": client_id,
112 "header_type": header_type,
113 "userinfo_endpoint": openid_config["userinfo_endpoint"],
114 }
115 )
116
117 OpenAPI.expose_authentication_method(
118 name, {"type": "oauth2", "authorizationUrl": openid_config["authorization_endpoint"]}
119 )
120
121 config.add_api_capability(
122 "openid",
123 description="OpenID connect support.",
124 url="http://kinto.readthedocs.io/en/stable/api/1.x/authentication.html",
125 providers=providers_infos,
126 )
127
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/kinto/core/__init__.py b/kinto/core/__init__.py
--- a/kinto/core/__init__.py
+++ b/kinto/core/__init__.py
@@ -143,7 +143,10 @@
def get_user_info(request):
# Default user info (shown in hello view for example).
- return {"id": request.prefixed_userid, "principals": request.prefixed_principals}
+ user_info = {"id": request.prefixed_userid, "principals": request.prefixed_principals}
+ if hasattr(request, "get_user_profile"):
+ user_info["profile"] = request.get_user_profile()
+ return user_info
def includeme(config):
diff --git a/kinto/plugins/openid/__init__.py b/kinto/plugins/openid/__init__.py
--- a/kinto/plugins/openid/__init__.py
+++ b/kinto/plugins/openid/__init__.py
@@ -56,6 +56,7 @@
return None
# Save for next time / refresh ttl.
request.registry.cache.set(cache_key, payload, ttl=self.verification_ttl)
+ request.bound_data["user_profile"] = payload
# Extract meaningful field from userinfo (eg. email or sub)
return payload.get(self.userid_field)
@@ -79,6 +80,10 @@
return None
+def get_user_profile(request):
+ return request.bound_data.get("user_profile", {})
+
+
def includeme(config):
# Activate end-points.
config.scan("kinto.plugins.openid.views")
@@ -124,3 +129,4 @@
url="http://kinto.readthedocs.io/en/stable/api/1.x/authentication.html",
providers=providers_infos,
)
+ config.add_request_method(get_user_profile, name="get_user_profile")
|
{"golden_diff": "diff --git a/kinto/core/__init__.py b/kinto/core/__init__.py\n--- a/kinto/core/__init__.py\n+++ b/kinto/core/__init__.py\n@@ -143,7 +143,10 @@\n \n def get_user_info(request):\n # Default user info (shown in hello view for example).\n- return {\"id\": request.prefixed_userid, \"principals\": request.prefixed_principals}\n+ user_info = {\"id\": request.prefixed_userid, \"principals\": request.prefixed_principals}\n+ if hasattr(request, \"get_user_profile\"):\n+ user_info[\"profile\"] = request.get_user_profile()\n+ return user_info\n \n \n def includeme(config):\ndiff --git a/kinto/plugins/openid/__init__.py b/kinto/plugins/openid/__init__.py\n--- a/kinto/plugins/openid/__init__.py\n+++ b/kinto/plugins/openid/__init__.py\n@@ -56,6 +56,7 @@\n return None\n # Save for next time / refresh ttl.\n request.registry.cache.set(cache_key, payload, ttl=self.verification_ttl)\n+ request.bound_data[\"user_profile\"] = payload\n # Extract meaningful field from userinfo (eg. email or sub)\n return payload.get(self.userid_field)\n \n@@ -79,6 +80,10 @@\n return None\n \n \n+def get_user_profile(request):\n+ return request.bound_data.get(\"user_profile\", {})\n+\n+\n def includeme(config):\n # Activate end-points.\n config.scan(\"kinto.plugins.openid.views\")\n@@ -124,3 +129,4 @@\n url=\"http://kinto.readthedocs.io/en/stable/api/1.x/authentication.html\",\n providers=providers_infos,\n )\n+ config.add_request_method(get_user_profile, name=\"get_user_profile\")\n", "issue": "OpenID authentication plugin is missing a profile view\nWhen validating the token, the plugn is fetching the profile, but there are no endpoint available to fetch this info: https://github.com/Kinto/kinto/blob/master/kinto/plugins/openid/__init__.py#L74\r\n\r\nI would be in favor of adding a `/v1/openid/google/profile` view in order to grab the username and the profile_pic of the user.\r\n\r\nThis information might also be added in the `user` field of the `/v1/` endpoint. What do you think?\n", "before_files": [{"content": "\"\"\"Main entry point\n\"\"\"\nimport logging\nimport pkg_resources\nimport tempfile\n\nfrom cornice import Service as CorniceService\nfrom dockerflow import logging as dockerflow_logging\nfrom pyramid.settings import aslist\n\nfrom kinto.core import errors\nfrom kinto.core import events\nfrom kinto.core.initialization import ( # NOQA\n initialize,\n install_middlewares,\n load_default_settings,\n)\nfrom kinto.core.utils import (\n follow_subrequest,\n current_service,\n current_resource_name,\n prefixed_userid,\n prefixed_principals,\n log_context,\n)\n\n\nlogger = logging.getLogger(__name__)\n\n\n# Module version, as defined in PEP-0396.\n__version__ = pkg_resources.get_distribution(\"kinto\").version # FIXME?\n\nDEFAULT_SETTINGS = {\n \"backoff\": None,\n \"backoff_percentage\": None,\n \"batch_max_requests\": 25,\n \"cache_backend\": \"\",\n \"cache_hosts\": \"\",\n \"cache_url\": \"\",\n \"cache_pool_size\": 25,\n \"cache_prefix\": \"\",\n \"cache_max_size_bytes\": 524288,\n \"cors_origins\": \"*\",\n \"cors_max_age_seconds\": 3600,\n \"eos\": None,\n \"eos_message\": None,\n \"eos_url\": None,\n \"error_info_link\": \"https://github.com/Kinto/kinto/issues/\",\n \"http_host\": None,\n \"http_scheme\": None,\n \"id_generator\": \"kinto.core.storage.generators.UUID4\",\n \"includes\": \"\",\n \"initialization_sequence\": (\n \"kinto.core.initialization.setup_request_bound_data\",\n \"kinto.core.initialization.setup_json_serializer\",\n \"kinto.core.initialization.setup_logging\",\n \"kinto.core.initialization.setup_storage\",\n \"kinto.core.initialization.setup_permission\",\n \"kinto.core.initialization.setup_cache\",\n \"kinto.core.initialization.setup_requests_scheme\",\n \"kinto.core.initialization.setup_version_redirection\",\n \"kinto.core.initialization.setup_deprecation\",\n \"kinto.core.initialization.setup_authentication\",\n \"kinto.core.initialization.setup_backoff\",\n \"kinto.core.initialization.setup_statsd\",\n \"kinto.core.initialization.setup_listeners\",\n \"kinto.core.events.setup_transaction_hook\",\n ),\n \"event_listeners\": \"\",\n \"heartbeat_timeout_seconds\": 10,\n \"newrelic_config\": None,\n \"newrelic_env\": \"dev\",\n \"paginate_by\": None,\n \"pagination_token_validity_seconds\": 10 * 60,\n \"permission_backend\": \"\",\n \"permission_url\": \"\",\n \"permission_pool_size\": 25,\n \"profiler_dir\": tempfile.gettempdir(),\n \"profiler_enabled\": False,\n \"project_docs\": \"\",\n \"project_name\": \"\",\n \"project_version\": \"\",\n \"readonly\": False,\n \"retry_after_seconds\": 30,\n \"settings_prefix\": \"\",\n \"statsd_backend\": \"kinto.core.statsd\",\n \"statsd_prefix\": \"kinto.core\",\n \"statsd_url\": None,\n \"storage_backend\": \"\",\n \"storage_url\": \"\",\n \"storage_max_fetch_size\": 10000,\n \"storage_pool_size\": 25,\n \"tm.annotate_user\": False, # Do annotate transactions with the user-id.\n \"transaction_per_request\": True,\n \"userid_hmac_secret\": \"\",\n \"version_json_path\": \"version.json\",\n \"version_prefix_redirect_enabled\": True,\n \"trailing_slash_redirect_enabled\": True,\n \"multiauth.groupfinder\": \"kinto.core.authorization.groupfinder\",\n \"multiauth.policies\": \"\",\n \"multiauth.policy.basicauth.use\": (\n \"kinto.core.authentication.\" \"BasicAuthAuthenticationPolicy\"\n ),\n \"multiauth.authorization_policy\": (\"kinto.core.authorization.\" \"AuthorizationPolicy\"),\n}\n\n\nclass Service(CorniceService):\n \"\"\"Subclass of the default cornice service.\n\n This is useful in order to attach specific behaviours without monkey\n patching the default cornice service (which would impact other uses of it)\n \"\"\"\n\n default_cors_headers = (\"Backoff\", \"Retry-After\", \"Alert\", \"Content-Length\")\n\n def error_handler(self, request):\n return errors.json_error_handler(request)\n\n @classmethod\n def init_from_settings(cls, settings):\n cls.cors_origins = tuple(aslist(settings[\"cors_origins\"]))\n cors_max_age = settings[\"cors_max_age_seconds\"]\n cls.cors_max_age = int(cors_max_age) if cors_max_age else None\n\n\nclass JsonLogFormatter(dockerflow_logging.JsonLogFormatter):\n logger_name = \"kinto\"\n\n @classmethod\n def init_from_settings(cls, settings):\n cls.logger_name = settings[\"project_name\"]\n\n def __init__(self, fmt=None, datefmt=None, style=\"%\"):\n # Do not let mozilla-cloud-services-logger constructor to improperly\n # use style as the logger_name.\n # See https://github.com/mozilla/mozilla-cloud-services-logger/issues/3\n logger_name = self.logger_name\n super().__init__(fmt, datefmt, style)\n self.logger_name = logger_name\n\n\ndef get_user_info(request):\n # Default user info (shown in hello view for example).\n return {\"id\": request.prefixed_userid, \"principals\": request.prefixed_principals}\n\n\ndef includeme(config):\n settings = config.get_settings()\n\n # Heartbeat registry.\n config.registry.heartbeats = {}\n\n # Public settings registry.\n config.registry.public_settings = {\"batch_max_requests\", \"readonly\"}\n\n # Directive to declare arbitrary API capabilities.\n def add_api_capability(config, identifier, description=\"\", url=\"\", **kw):\n existing = config.registry.api_capabilities.get(identifier)\n if existing:\n error_msg = \"The '{}' API capability was already registered ({}).\"\n raise ValueError(error_msg.format(identifier, existing))\n\n capability = dict(description=description, url=url, **kw)\n config.registry.api_capabilities[identifier] = capability\n\n config.add_directive(\"add_api_capability\", add_api_capability)\n config.registry.api_capabilities = {}\n\n # Resource events helpers.\n config.add_request_method(events.get_resource_events, name=\"get_resource_events\")\n config.add_request_method(events.notify_resource_event, name=\"notify_resource_event\")\n\n # Setup cornice.\n config.include(\"cornice\")\n\n # Setup cornice api documentation\n config.include(\"cornice_swagger\")\n\n # Per-request transaction.\n config.include(\"pyramid_tm\")\n\n # Add CORS settings to the base kinto.core Service class.\n Service.init_from_settings(settings)\n\n # Use the project name as the main logger name (Logger field in MozLog).\n JsonLogFormatter.init_from_settings(settings)\n\n # Setup components.\n for step in aslist(settings[\"initialization_sequence\"]):\n step_func = config.maybe_dotted(step)\n step_func(config)\n\n # Custom helpers.\n config.add_request_method(log_context)\n config.add_request_method(follow_subrequest)\n config.add_request_method(prefixed_userid, property=True)\n config.add_request_method(prefixed_principals, reify=True)\n config.add_request_method(get_user_info, name=\"get_user_info\")\n config.add_request_method(current_resource_name, reify=True)\n config.add_request_method(current_service, reify=True)\n config.commit()\n\n # Include plugins after init, unlike pyramid includes.\n includes = aslist(settings[\"includes\"])\n for app in includes:\n config.include(app)\n\n # # Show settings to output.\n # for key, value in settings.items():\n # logger.info('Using {} = {}'.format(key, value))\n\n # Scan views.\n config.scan(\"kinto.core.views\")\n\n # Give sign of life.\n msg = \"Running {project_name} {project_version}.\"\n logger.info(msg.format_map(settings))\n", "path": "kinto/core/__init__.py"}, {"content": "import requests\nfrom pyramid import authentication as base_auth\nfrom pyramid.settings import aslist\nfrom pyramid.interfaces import IAuthenticationPolicy\nfrom zope.interface import implementer\n\nfrom kinto.core import logger\nfrom kinto.core import utils as core_utils\nfrom kinto.core.openapi import OpenAPI\n\nfrom .utils import fetch_openid_config\n\n\n@implementer(IAuthenticationPolicy)\nclass OpenIDConnectPolicy(base_auth.CallbackAuthenticationPolicy):\n def __init__(self, issuer, client_id, realm=\"Realm\", **kwargs):\n self.realm = realm\n self.issuer = issuer\n self.client_id = client_id\n self.client_secret = kwargs.get(\"client_secret\", \"\")\n self.header_type = kwargs.get(\"header_type\", \"Bearer\")\n self.userid_field = kwargs.get(\"userid_field\", \"sub\")\n self.verification_ttl = int(kwargs.get(\"verification_ttl_seconds\", 86400))\n\n # Fetch OpenID config (at instantiation, ie. startup)\n self.oid_config = fetch_openid_config(issuer)\n\n self._jwt_keys = None\n\n def unauthenticated_userid(self, request):\n \"\"\"Return the userid or ``None`` if token could not be verified.\n \"\"\"\n settings = request.registry.settings\n hmac_secret = settings[\"userid_hmac_secret\"]\n\n authorization = request.headers.get(\"Authorization\", \"\")\n try:\n authmeth, access_token = authorization.split(\" \", 1)\n except ValueError:\n return None\n\n if authmeth.lower() != self.header_type.lower():\n return None\n\n # XXX JWT Access token\n # https://auth0.com/docs/tokens/access-token#access-token-format\n\n # Check cache if these tokens were already verified.\n hmac_tokens = core_utils.hmac_digest(hmac_secret, access_token)\n cache_key = f\"openid:verify:{hmac_tokens}\"\n payload = request.registry.cache.get(cache_key)\n if payload is None:\n # This can take some time.\n payload = self._verify_token(access_token)\n if payload is None:\n return None\n # Save for next time / refresh ttl.\n request.registry.cache.set(cache_key, payload, ttl=self.verification_ttl)\n # Extract meaningful field from userinfo (eg. email or sub)\n return payload.get(self.userid_field)\n\n def forget(self, request):\n \"\"\"A no-op. Credentials are sent on every request.\n Return WWW-Authenticate Realm header for Bearer token.\n \"\"\"\n return [(\"WWW-Authenticate\", '%s realm=\"%s\"' % (self.header_type, self.realm))]\n\n def _verify_token(self, access_token):\n uri = self.oid_config[\"userinfo_endpoint\"]\n # Opaque access token string. Fetch user info from profile.\n try:\n resp = requests.get(uri, headers={\"Authorization\": \"Bearer \" + access_token})\n resp.raise_for_status()\n userprofile = resp.json()\n return userprofile\n\n except (requests.exceptions.HTTPError, ValueError, KeyError) as e:\n logger.debug(\"Unable to fetch user profile from %s (%s)\" % (uri, e))\n return None\n\n\ndef includeme(config):\n # Activate end-points.\n config.scan(\"kinto.plugins.openid.views\")\n\n settings = config.get_settings()\n\n openid_policies = []\n for policy in aslist(settings[\"multiauth.policies\"]):\n v = settings.get(\"multiauth.policy.%s.use\" % policy, \"\")\n if v.endswith(\"OpenIDConnectPolicy\"):\n openid_policies.append(policy)\n\n if len(openid_policies) == 0:\n # Do not add the capability if no policy is configured.\n return\n\n providers_infos = []\n for name in openid_policies:\n issuer = settings[\"multiauth.policy.%s.issuer\" % name]\n openid_config = fetch_openid_config(issuer)\n\n client_id = settings[\"multiauth.policy.%s.client_id\" % name]\n header_type = settings.get(\"multiauth.policy.%s.header_type\", \"Bearer\")\n\n providers_infos.append(\n {\n \"name\": name,\n \"issuer\": openid_config[\"issuer\"],\n \"auth_path\": \"/openid/%s/login\" % name,\n \"client_id\": client_id,\n \"header_type\": header_type,\n \"userinfo_endpoint\": openid_config[\"userinfo_endpoint\"],\n }\n )\n\n OpenAPI.expose_authentication_method(\n name, {\"type\": \"oauth2\", \"authorizationUrl\": openid_config[\"authorization_endpoint\"]}\n )\n\n config.add_api_capability(\n \"openid\",\n description=\"OpenID connect support.\",\n url=\"http://kinto.readthedocs.io/en/stable/api/1.x/authentication.html\",\n providers=providers_infos,\n )\n", "path": "kinto/plugins/openid/__init__.py"}], "after_files": [{"content": "\"\"\"Main entry point\n\"\"\"\nimport logging\nimport pkg_resources\nimport tempfile\n\nfrom cornice import Service as CorniceService\nfrom dockerflow import logging as dockerflow_logging\nfrom pyramid.settings import aslist\n\nfrom kinto.core import errors\nfrom kinto.core import events\nfrom kinto.core.initialization import ( # NOQA\n initialize,\n install_middlewares,\n load_default_settings,\n)\nfrom kinto.core.utils import (\n follow_subrequest,\n current_service,\n current_resource_name,\n prefixed_userid,\n prefixed_principals,\n log_context,\n)\n\n\nlogger = logging.getLogger(__name__)\n\n\n# Module version, as defined in PEP-0396.\n__version__ = pkg_resources.get_distribution(\"kinto\").version # FIXME?\n\nDEFAULT_SETTINGS = {\n \"backoff\": None,\n \"backoff_percentage\": None,\n \"batch_max_requests\": 25,\n \"cache_backend\": \"\",\n \"cache_hosts\": \"\",\n \"cache_url\": \"\",\n \"cache_pool_size\": 25,\n \"cache_prefix\": \"\",\n \"cache_max_size_bytes\": 524288,\n \"cors_origins\": \"*\",\n \"cors_max_age_seconds\": 3600,\n \"eos\": None,\n \"eos_message\": None,\n \"eos_url\": None,\n \"error_info_link\": \"https://github.com/Kinto/kinto/issues/\",\n \"http_host\": None,\n \"http_scheme\": None,\n \"id_generator\": \"kinto.core.storage.generators.UUID4\",\n \"includes\": \"\",\n \"initialization_sequence\": (\n \"kinto.core.initialization.setup_request_bound_data\",\n \"kinto.core.initialization.setup_json_serializer\",\n \"kinto.core.initialization.setup_logging\",\n \"kinto.core.initialization.setup_storage\",\n \"kinto.core.initialization.setup_permission\",\n \"kinto.core.initialization.setup_cache\",\n \"kinto.core.initialization.setup_requests_scheme\",\n \"kinto.core.initialization.setup_version_redirection\",\n \"kinto.core.initialization.setup_deprecation\",\n \"kinto.core.initialization.setup_authentication\",\n \"kinto.core.initialization.setup_backoff\",\n \"kinto.core.initialization.setup_statsd\",\n \"kinto.core.initialization.setup_listeners\",\n \"kinto.core.events.setup_transaction_hook\",\n ),\n \"event_listeners\": \"\",\n \"heartbeat_timeout_seconds\": 10,\n \"newrelic_config\": None,\n \"newrelic_env\": \"dev\",\n \"paginate_by\": None,\n \"pagination_token_validity_seconds\": 10 * 60,\n \"permission_backend\": \"\",\n \"permission_url\": \"\",\n \"permission_pool_size\": 25,\n \"profiler_dir\": tempfile.gettempdir(),\n \"profiler_enabled\": False,\n \"project_docs\": \"\",\n \"project_name\": \"\",\n \"project_version\": \"\",\n \"readonly\": False,\n \"retry_after_seconds\": 30,\n \"settings_prefix\": \"\",\n \"statsd_backend\": \"kinto.core.statsd\",\n \"statsd_prefix\": \"kinto.core\",\n \"statsd_url\": None,\n \"storage_backend\": \"\",\n \"storage_url\": \"\",\n \"storage_max_fetch_size\": 10000,\n \"storage_pool_size\": 25,\n \"tm.annotate_user\": False, # Do annotate transactions with the user-id.\n \"transaction_per_request\": True,\n \"userid_hmac_secret\": \"\",\n \"version_json_path\": \"version.json\",\n \"version_prefix_redirect_enabled\": True,\n \"trailing_slash_redirect_enabled\": True,\n \"multiauth.groupfinder\": \"kinto.core.authorization.groupfinder\",\n \"multiauth.policies\": \"\",\n \"multiauth.policy.basicauth.use\": (\n \"kinto.core.authentication.\" \"BasicAuthAuthenticationPolicy\"\n ),\n \"multiauth.authorization_policy\": (\"kinto.core.authorization.\" \"AuthorizationPolicy\"),\n}\n\n\nclass Service(CorniceService):\n \"\"\"Subclass of the default cornice service.\n\n This is useful in order to attach specific behaviours without monkey\n patching the default cornice service (which would impact other uses of it)\n \"\"\"\n\n default_cors_headers = (\"Backoff\", \"Retry-After\", \"Alert\", \"Content-Length\")\n\n def error_handler(self, request):\n return errors.json_error_handler(request)\n\n @classmethod\n def init_from_settings(cls, settings):\n cls.cors_origins = tuple(aslist(settings[\"cors_origins\"]))\n cors_max_age = settings[\"cors_max_age_seconds\"]\n cls.cors_max_age = int(cors_max_age) if cors_max_age else None\n\n\nclass JsonLogFormatter(dockerflow_logging.JsonLogFormatter):\n logger_name = \"kinto\"\n\n @classmethod\n def init_from_settings(cls, settings):\n cls.logger_name = settings[\"project_name\"]\n\n def __init__(self, fmt=None, datefmt=None, style=\"%\"):\n # Do not let mozilla-cloud-services-logger constructor to improperly\n # use style as the logger_name.\n # See https://github.com/mozilla/mozilla-cloud-services-logger/issues/3\n logger_name = self.logger_name\n super().__init__(fmt, datefmt, style)\n self.logger_name = logger_name\n\n\ndef get_user_info(request):\n # Default user info (shown in hello view for example).\n user_info = {\"id\": request.prefixed_userid, \"principals\": request.prefixed_principals}\n if hasattr(request, \"get_user_profile\"):\n user_info[\"profile\"] = request.get_user_profile()\n return user_info\n\n\ndef includeme(config):\n settings = config.get_settings()\n\n # Heartbeat registry.\n config.registry.heartbeats = {}\n\n # Public settings registry.\n config.registry.public_settings = {\"batch_max_requests\", \"readonly\"}\n\n # Directive to declare arbitrary API capabilities.\n def add_api_capability(config, identifier, description=\"\", url=\"\", **kw):\n existing = config.registry.api_capabilities.get(identifier)\n if existing:\n error_msg = \"The '{}' API capability was already registered ({}).\"\n raise ValueError(error_msg.format(identifier, existing))\n\n capability = dict(description=description, url=url, **kw)\n config.registry.api_capabilities[identifier] = capability\n\n config.add_directive(\"add_api_capability\", add_api_capability)\n config.registry.api_capabilities = {}\n\n # Resource events helpers.\n config.add_request_method(events.get_resource_events, name=\"get_resource_events\")\n config.add_request_method(events.notify_resource_event, name=\"notify_resource_event\")\n\n # Setup cornice.\n config.include(\"cornice\")\n\n # Setup cornice api documentation\n config.include(\"cornice_swagger\")\n\n # Per-request transaction.\n config.include(\"pyramid_tm\")\n\n # Add CORS settings to the base kinto.core Service class.\n Service.init_from_settings(settings)\n\n # Use the project name as the main logger name (Logger field in MozLog).\n JsonLogFormatter.init_from_settings(settings)\n\n # Setup components.\n for step in aslist(settings[\"initialization_sequence\"]):\n step_func = config.maybe_dotted(step)\n step_func(config)\n\n # Custom helpers.\n config.add_request_method(log_context)\n config.add_request_method(follow_subrequest)\n config.add_request_method(prefixed_userid, property=True)\n config.add_request_method(prefixed_principals, reify=True)\n config.add_request_method(get_user_info, name=\"get_user_info\")\n config.add_request_method(current_resource_name, reify=True)\n config.add_request_method(current_service, reify=True)\n config.commit()\n\n # Include plugins after init, unlike pyramid includes.\n includes = aslist(settings[\"includes\"])\n for app in includes:\n config.include(app)\n\n # # Show settings to output.\n # for key, value in settings.items():\n # logger.info('Using {} = {}'.format(key, value))\n\n # Scan views.\n config.scan(\"kinto.core.views\")\n\n # Give sign of life.\n msg = \"Running {project_name} {project_version}.\"\n logger.info(msg.format_map(settings))\n", "path": "kinto/core/__init__.py"}, {"content": "import requests\nfrom pyramid import authentication as base_auth\nfrom pyramid.settings import aslist\nfrom pyramid.interfaces import IAuthenticationPolicy\nfrom zope.interface import implementer\n\nfrom kinto.core import logger\nfrom kinto.core import utils as core_utils\nfrom kinto.core.openapi import OpenAPI\n\nfrom .utils import fetch_openid_config\n\n\n@implementer(IAuthenticationPolicy)\nclass OpenIDConnectPolicy(base_auth.CallbackAuthenticationPolicy):\n def __init__(self, issuer, client_id, realm=\"Realm\", **kwargs):\n self.realm = realm\n self.issuer = issuer\n self.client_id = client_id\n self.client_secret = kwargs.get(\"client_secret\", \"\")\n self.header_type = kwargs.get(\"header_type\", \"Bearer\")\n self.userid_field = kwargs.get(\"userid_field\", \"sub\")\n self.verification_ttl = int(kwargs.get(\"verification_ttl_seconds\", 86400))\n\n # Fetch OpenID config (at instantiation, ie. startup)\n self.oid_config = fetch_openid_config(issuer)\n\n self._jwt_keys = None\n\n def unauthenticated_userid(self, request):\n \"\"\"Return the userid or ``None`` if token could not be verified.\n \"\"\"\n settings = request.registry.settings\n hmac_secret = settings[\"userid_hmac_secret\"]\n\n authorization = request.headers.get(\"Authorization\", \"\")\n try:\n authmeth, access_token = authorization.split(\" \", 1)\n except ValueError:\n return None\n\n if authmeth.lower() != self.header_type.lower():\n return None\n\n # XXX JWT Access token\n # https://auth0.com/docs/tokens/access-token#access-token-format\n\n # Check cache if these tokens were already verified.\n hmac_tokens = core_utils.hmac_digest(hmac_secret, access_token)\n cache_key = f\"openid:verify:{hmac_tokens}\"\n payload = request.registry.cache.get(cache_key)\n if payload is None:\n # This can take some time.\n payload = self._verify_token(access_token)\n if payload is None:\n return None\n # Save for next time / refresh ttl.\n request.registry.cache.set(cache_key, payload, ttl=self.verification_ttl)\n request.bound_data[\"user_profile\"] = payload\n # Extract meaningful field from userinfo (eg. email or sub)\n return payload.get(self.userid_field)\n\n def forget(self, request):\n \"\"\"A no-op. Credentials are sent on every request.\n Return WWW-Authenticate Realm header for Bearer token.\n \"\"\"\n return [(\"WWW-Authenticate\", '%s realm=\"%s\"' % (self.header_type, self.realm))]\n\n def _verify_token(self, access_token):\n uri = self.oid_config[\"userinfo_endpoint\"]\n # Opaque access token string. Fetch user info from profile.\n try:\n resp = requests.get(uri, headers={\"Authorization\": \"Bearer \" + access_token})\n resp.raise_for_status()\n userprofile = resp.json()\n return userprofile\n\n except (requests.exceptions.HTTPError, ValueError, KeyError) as e:\n logger.debug(\"Unable to fetch user profile from %s (%s)\" % (uri, e))\n return None\n\n\ndef get_user_profile(request):\n return request.bound_data.get(\"user_profile\", {})\n\n\ndef includeme(config):\n # Activate end-points.\n config.scan(\"kinto.plugins.openid.views\")\n\n settings = config.get_settings()\n\n openid_policies = []\n for policy in aslist(settings[\"multiauth.policies\"]):\n v = settings.get(\"multiauth.policy.%s.use\" % policy, \"\")\n if v.endswith(\"OpenIDConnectPolicy\"):\n openid_policies.append(policy)\n\n if len(openid_policies) == 0:\n # Do not add the capability if no policy is configured.\n return\n\n providers_infos = []\n for name in openid_policies:\n issuer = settings[\"multiauth.policy.%s.issuer\" % name]\n openid_config = fetch_openid_config(issuer)\n\n client_id = settings[\"multiauth.policy.%s.client_id\" % name]\n header_type = settings.get(\"multiauth.policy.%s.header_type\", \"Bearer\")\n\n providers_infos.append(\n {\n \"name\": name,\n \"issuer\": openid_config[\"issuer\"],\n \"auth_path\": \"/openid/%s/login\" % name,\n \"client_id\": client_id,\n \"header_type\": header_type,\n \"userinfo_endpoint\": openid_config[\"userinfo_endpoint\"],\n }\n )\n\n OpenAPI.expose_authentication_method(\n name, {\"type\": \"oauth2\", \"authorizationUrl\": openid_config[\"authorization_endpoint\"]}\n )\n\n config.add_api_capability(\n \"openid\",\n description=\"OpenID connect support.\",\n url=\"http://kinto.readthedocs.io/en/stable/api/1.x/authentication.html\",\n providers=providers_infos,\n )\n config.add_request_method(get_user_profile, name=\"get_user_profile\")\n", "path": "kinto/plugins/openid/__init__.py"}]}
| 3,938 | 406 |
gh_patches_debug_989
|
rasdani/github-patches
|
git_diff
|
hydroshare__hydroshare-5098
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Haystack rest endpoint response serializer does not include short_id
**Description of the bug**
The Haystack REST endpoint for complex solr searches does not include the short_id into the response serializer. This is a critical piece of information for users of this endpoint.
Steps to reproduce the bug:
https://github.com/hydroshare/hydroshare/blob/d3bd1737a0179eac74cd68926b3b79b80894410e/hs_rest_api/discovery.py#L12
**Expected behavior**
I expect resource ids to be included with search results so I can retrieve resources.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `hs_rest_api/discovery.py`
Content:
```
1 from drf_haystack.serializers import HaystackSerializer
2 from drf_haystack.viewsets import HaystackViewSet
3 from hs_core.search_indexes import BaseResourceIndex
4 from hs_core.models import BaseResource
5 from drf_haystack.fields import HaystackCharField, HaystackDateField, HaystackMultiValueField, \
6 HaystackFloatField
7 from drf_yasg.utils import swagger_auto_schema
8 from rest_framework.decorators import action
9 from rest_framework import serializers
10
11
12 class DiscoveryResourceSerializer(HaystackSerializer):
13 class Meta:
14 index_classes = [BaseResourceIndex]
15 fields = [
16 "title",
17 "author",
18 "contributor",
19 "subject",
20 "abstract",
21 "resource_type",
22 "content_type",
23 "coverage_type",
24 "availability",
25 "created",
26 "modified",
27 "start_date",
28 "end_date",
29 "east",
30 "north",
31 "eastlimit",
32 "westlimit",
33 "northlimit",
34 "southlimit"
35 ]
36
37
38 class DiscoverResourceValidator(serializers.Serializer):
39 text = HaystackCharField(required=False,
40 help_text='Search across all Resource Fields')
41 author = HaystackCharField(required=False,
42 help_text='Search by author')
43 contributor = HaystackMultiValueField(required=False,
44 help_text='Search by contributor')
45 subject = HaystackMultiValueField(required=False,
46 help_text='Search within subject keywords')
47 abstract = HaystackCharField(required=False,
48 help_text='Search within the abstract')
49 resource_type = HaystackCharField(required=False,
50 help_text='Search by resource type')
51 content_type = HaystackMultiValueField(required=False,
52 help_text='Search by content type')
53 coverage_type = HaystackMultiValueField(required=False,
54 help_text='Search by coverage type '
55 '(point, box, period)')
56 availability = HaystackMultiValueField(required=False,
57 help_text='Search by availability '
58 '(discoverable, public, published)')
59 created = HaystackDateField(required=False,
60 help_text='Search by created date')
61 modified = HaystackDateField(required=False,
62 help_text='Search by modified date')
63 start_date = HaystackDateField(required=False,
64 help_text='Search by start date')
65 end_date = HaystackDateField(required=False,
66 help_text='Search by end date')
67 east = HaystackFloatField(required=False,
68 help_text='Search by location or box center east longitude')
69 north = HaystackFloatField(required=False,
70 help_text='Search by location or box center north latitude')
71 eastlimit = HaystackFloatField(required=False,
72 help_text='Search by east limit longitude')
73 westlimit = HaystackFloatField(required=False,
74 help_text='Search by west limit longitude')
75 northlimit = HaystackFloatField(required=False,
76 help_text='Search by north limit latitude')
77 southlimit = HaystackFloatField(required=False,
78 help_text='Search by south limit latitude')
79
80
81 class DiscoverSearchView(HaystackViewSet):
82 index_models = [BaseResource]
83 serializer_class = DiscoveryResourceSerializer
84
85 @action(detail=True, methods=['get'])
86 @swagger_auto_schema(operation_description="Search HydroShare Resources using solr conventions."
87 "We use haystack for queries so you can use all of "
88 "the parameters described here in combination with "
89 "field lookups "
90 "https://django-haystack.readthedocs.io/en/latest/"
91 "searchqueryset_api.html?highlight=lookups#id1",
92 query_serializer=DiscoverResourceValidator)
93 def list(self, request):
94 return super(DiscoverSearchView, self).list(request)
95
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/hs_rest_api/discovery.py b/hs_rest_api/discovery.py
--- a/hs_rest_api/discovery.py
+++ b/hs_rest_api/discovery.py
@@ -13,6 +13,7 @@
class Meta:
index_classes = [BaseResourceIndex]
fields = [
+ "short_id",
"title",
"author",
"contributor",
|
{"golden_diff": "diff --git a/hs_rest_api/discovery.py b/hs_rest_api/discovery.py\n--- a/hs_rest_api/discovery.py\n+++ b/hs_rest_api/discovery.py\n@@ -13,6 +13,7 @@\n class Meta:\n index_classes = [BaseResourceIndex]\n fields = [\n+ \"short_id\",\n \"title\",\n \"author\",\n \"contributor\",\n", "issue": "Haystack rest endpoint response serializer does not include short_id\n**Description of the bug**\r\nThe Haystack REST endpoint for complex solr searches does not include the short_id into the response serializer. This is a critical piece of information for users of this endpoint. \r\n\r\nSteps to reproduce the bug:\r\nhttps://github.com/hydroshare/hydroshare/blob/d3bd1737a0179eac74cd68926b3b79b80894410e/hs_rest_api/discovery.py#L12\r\n\r\n**Expected behavior**\r\nI expect resource ids to be included with search results so I can retrieve resources.\r\n\n", "before_files": [{"content": "from drf_haystack.serializers import HaystackSerializer\nfrom drf_haystack.viewsets import HaystackViewSet\nfrom hs_core.search_indexes import BaseResourceIndex\nfrom hs_core.models import BaseResource\nfrom drf_haystack.fields import HaystackCharField, HaystackDateField, HaystackMultiValueField, \\\n HaystackFloatField\nfrom drf_yasg.utils import swagger_auto_schema\nfrom rest_framework.decorators import action\nfrom rest_framework import serializers\n\n\nclass DiscoveryResourceSerializer(HaystackSerializer):\n class Meta:\n index_classes = [BaseResourceIndex]\n fields = [\n \"title\",\n \"author\",\n \"contributor\",\n \"subject\",\n \"abstract\",\n \"resource_type\",\n \"content_type\",\n \"coverage_type\",\n \"availability\",\n \"created\",\n \"modified\",\n \"start_date\",\n \"end_date\",\n \"east\",\n \"north\",\n \"eastlimit\",\n \"westlimit\",\n \"northlimit\",\n \"southlimit\"\n ]\n\n\nclass DiscoverResourceValidator(serializers.Serializer):\n text = HaystackCharField(required=False,\n help_text='Search across all Resource Fields')\n author = HaystackCharField(required=False,\n help_text='Search by author')\n contributor = HaystackMultiValueField(required=False,\n help_text='Search by contributor')\n subject = HaystackMultiValueField(required=False,\n help_text='Search within subject keywords')\n abstract = HaystackCharField(required=False,\n help_text='Search within the abstract')\n resource_type = HaystackCharField(required=False,\n help_text='Search by resource type')\n content_type = HaystackMultiValueField(required=False,\n help_text='Search by content type')\n coverage_type = HaystackMultiValueField(required=False,\n help_text='Search by coverage type '\n '(point, box, period)')\n availability = HaystackMultiValueField(required=False,\n help_text='Search by availability '\n '(discoverable, public, published)')\n created = HaystackDateField(required=False,\n help_text='Search by created date')\n modified = HaystackDateField(required=False,\n help_text='Search by modified date')\n start_date = HaystackDateField(required=False,\n help_text='Search by start date')\n end_date = HaystackDateField(required=False,\n help_text='Search by end date')\n east = HaystackFloatField(required=False,\n help_text='Search by location or box center east longitude')\n north = HaystackFloatField(required=False,\n help_text='Search by location or box center north latitude')\n eastlimit = HaystackFloatField(required=False,\n help_text='Search by east limit longitude')\n westlimit = HaystackFloatField(required=False,\n help_text='Search by west limit longitude')\n northlimit = HaystackFloatField(required=False,\n help_text='Search by north limit latitude')\n southlimit = HaystackFloatField(required=False,\n help_text='Search by south limit latitude')\n\n\nclass DiscoverSearchView(HaystackViewSet):\n index_models = [BaseResource]\n serializer_class = DiscoveryResourceSerializer\n\n @action(detail=True, methods=['get'])\n @swagger_auto_schema(operation_description=\"Search HydroShare Resources using solr conventions.\"\n \"We use haystack for queries so you can use all of \"\n \"the parameters described here in combination with \"\n \"field lookups \"\n \"https://django-haystack.readthedocs.io/en/latest/\"\n \"searchqueryset_api.html?highlight=lookups#id1\",\n query_serializer=DiscoverResourceValidator)\n def list(self, request):\n return super(DiscoverSearchView, self).list(request)\n", "path": "hs_rest_api/discovery.py"}], "after_files": [{"content": "from drf_haystack.serializers import HaystackSerializer\nfrom drf_haystack.viewsets import HaystackViewSet\nfrom hs_core.search_indexes import BaseResourceIndex\nfrom hs_core.models import BaseResource\nfrom drf_haystack.fields import HaystackCharField, HaystackDateField, HaystackMultiValueField, \\\n HaystackFloatField\nfrom drf_yasg.utils import swagger_auto_schema\nfrom rest_framework.decorators import action\nfrom rest_framework import serializers\n\n\nclass DiscoveryResourceSerializer(HaystackSerializer):\n class Meta:\n index_classes = [BaseResourceIndex]\n fields = [\n \"short_id\",\n \"title\",\n \"author\",\n \"contributor\",\n \"subject\",\n \"abstract\",\n \"resource_type\",\n \"content_type\",\n \"coverage_type\",\n \"availability\",\n \"created\",\n \"modified\",\n \"start_date\",\n \"end_date\",\n \"east\",\n \"north\",\n \"eastlimit\",\n \"westlimit\",\n \"northlimit\",\n \"southlimit\"\n ]\n\n\nclass DiscoverResourceValidator(serializers.Serializer):\n text = HaystackCharField(required=False,\n help_text='Search across all Resource Fields')\n author = HaystackCharField(required=False,\n help_text='Search by author')\n contributor = HaystackMultiValueField(required=False,\n help_text='Search by contributor')\n subject = HaystackMultiValueField(required=False,\n help_text='Search within subject keywords')\n abstract = HaystackCharField(required=False,\n help_text='Search within the abstract')\n resource_type = HaystackCharField(required=False,\n help_text='Search by resource type')\n content_type = HaystackMultiValueField(required=False,\n help_text='Search by content type')\n coverage_type = HaystackMultiValueField(required=False,\n help_text='Search by coverage type '\n '(point, box, period)')\n availability = HaystackMultiValueField(required=False,\n help_text='Search by availability '\n '(discoverable, public, published)')\n created = HaystackDateField(required=False,\n help_text='Search by created date')\n modified = HaystackDateField(required=False,\n help_text='Search by modified date')\n start_date = HaystackDateField(required=False,\n help_text='Search by start date')\n end_date = HaystackDateField(required=False,\n help_text='Search by end date')\n east = HaystackFloatField(required=False,\n help_text='Search by location or box center east longitude')\n north = HaystackFloatField(required=False,\n help_text='Search by location or box center north latitude')\n eastlimit = HaystackFloatField(required=False,\n help_text='Search by east limit longitude')\n westlimit = HaystackFloatField(required=False,\n help_text='Search by west limit longitude')\n northlimit = HaystackFloatField(required=False,\n help_text='Search by north limit latitude')\n southlimit = HaystackFloatField(required=False,\n help_text='Search by south limit latitude')\n\n\nclass DiscoverSearchView(HaystackViewSet):\n index_models = [BaseResource]\n serializer_class = DiscoveryResourceSerializer\n\n @action(detail=True, methods=['get'])\n @swagger_auto_schema(operation_description=\"Search HydroShare Resources using solr conventions.\"\n \"We use haystack for queries so you can use all of \"\n \"the parameters described here in combination with \"\n \"field lookups \"\n \"https://django-haystack.readthedocs.io/en/latest/\"\n \"searchqueryset_api.html?highlight=lookups#id1\",\n query_serializer=DiscoverResourceValidator)\n def list(self, request):\n return super(DiscoverSearchView, self).list(request)\n", "path": "hs_rest_api/discovery.py"}]}
| 1,354 | 88 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.