problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.1k
25.4k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 582
39.1k
| num_tokens
int64 271
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_8296 | rasdani/github-patches | git_diff | plone__Products.CMFPlone-1383 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Author feedback form broken
Author feedback seems to have been missed in the refactoring of settings into the registry. It stills expects to find portal.email_from_name
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `Products/CMFPlone/browser/author.py`
Content:
```
1 from AccessControl import Unauthorized
2
3 from Products.CMFCore.interfaces import IPropertiesTool
4 from Products.CMFPlone import PloneMessageFactory as _
5 from Products.CMFPlone.interfaces import ISecuritySchema
6 from Products.CMFPlone.interfaces.controlpanel import IMailSchema
7 from Products.CMFPlone.utils import getToolByName
8 from Products.CMFPlone.utils import pretty_title_or_id
9 from Products.Five.browser import BrowserView
10 from Products.MailHost.interfaces import IMailHost
11 from Products.statusmessages.interfaces import IStatusMessage
12
13 from ZODB.POSException import ConflictError
14
15 from interfaces import IAuthorFeedbackForm
16
17 from plone.registry.interfaces import IRegistry
18
19 from urllib import quote_plus
20
21 from z3c.form import button
22 from z3c.form import field
23 from z3c.form import form
24 from z3c.form.interfaces import HIDDEN_MODE
25
26 from zope.component import getMultiAdapter
27 from zope.component import getUtility
28 from zope.interface import implementer
29 from zope.publisher.interfaces import IPublishTraverse
30
31 import logging
32
33 logger = logging.getLogger("Plone")
34
35
36 class AuthorFeedbackForm(form.Form):
37
38 fields = field.Fields(IAuthorFeedbackForm)
39 ignoreContext = True
40
41 @button.buttonAndHandler(_(u'label_send', default='Send'),
42 name='send')
43 def handle_send(self, action):
44 self.portal_state = getMultiAdapter(
45 (self.context, self.request),
46 name=u'plone_portal_state'
47 )
48
49 self.portal = self.portal_state.portal()
50 self.membership_tool = getToolByName(
51 self.context, 'portal_membership'
52 )
53
54 self.feedback_template = self.context.restrictedTraverse(
55 '@@author-feedback-template'
56 )
57
58 data, errors = self.extractData()
59 if errors:
60 IStatusMessage(self.request).addStatusMessage(
61 self.formErrorsMessage,
62 type=u'error'
63 )
64
65 return
66
67 referer = data.get('referer', 'unknown referer')
68 subject = data.get('subject', '')
69 message = data.get('message', '')
70 # Author is None means portal administrator
71 author = data.get('author', None)
72
73 sender = self.portal_state.member()
74 registry = getUtility(IRegistry)
75 mail_settings = registry.forInterface(IMailSchema, prefix='plone')
76 envelope_from = mail_settings.email_from_address
77
78 if author is None:
79 send_to_address = mail_settings.email_from_address
80 else:
81 author_member = self.membership_tool.getMemberById(author)
82 send_to_address = author_member.getProperty('email')
83
84 send_from_address = sender.getProperty('email')
85
86 if send_from_address == '':
87 IStatusMessage(self.request).addStatusMessage(
88 _(u'Could not find a valid email address'),
89 type=u'error'
90 )
91 return
92
93 sender_id = "%s (%s), %s" % (
94 sender.getProperty('fullname'),
95 sender.getId(),
96 send_from_address
97 )
98
99 mail_host = getUtility(IMailHost)
100 registry = getUtility(IRegistry)
101 email_charset = registry.get('plone.email_charset', 'utf-8')
102
103 try:
104 message = self.feedback_template(
105 self, send_from_address=send_from_address,
106 sender_id=sender_id, url=referer, subject=subject,
107 message=message, encoding=email_charset
108 )
109
110 message = message.encode(email_charset)
111
112 mail_host.send(
113 message, send_to_address, envelope_from,
114 subject=subject, charset=email_charset
115 )
116 except ConflictError:
117 raise
118 except Exception as e:
119 logger.info("Unable to send mail: " + str(e))
120
121 IStatusMessage(self.request).addStatusMessage(
122 _(u'Unable to send mail.'),
123 type=u'error'
124 )
125
126 return
127
128 IStatusMessage(self.request).addStatusMessage(
129 _(u'Mail sent.'),
130 type=u'info'
131 )
132 self.request.response.redirect('%s/author/%s' % (
133 self.portal.absolute_url(),
134 author or ''))
135 return
136
137
138 @implementer(IPublishTraverse)
139 class AuthorView(BrowserView):
140
141 def __init__(self, context, request):
142 super(AuthorView, self).__init__(context, request)
143
144 self.username = None
145
146 def publishTraverse(self, request, name):
147 request['TraversalRequestNameStack'] = []
148
149 self.username = name
150 return self
151
152 @property
153 def is_anonymous(self):
154 return self.portal_state.anonymous()
155
156 @property
157 def is_owner(self):
158 current_member = self.portal_state.member()
159 return current_member.getId() == self.username
160
161 @property
162 def author(self):
163 username = self.username
164
165 if not username:
166 return {}
167
168 authorinfo = self.membership_tool.getMemberInfo(username)
169 portrait = self.membership_tool.getPersonalPortrait(username)
170
171 if not authorinfo or not portrait:
172 return {}
173
174 return {
175 'info': authorinfo,
176 'portrait': portrait
177 }
178
179 @property
180 def member_info(self):
181 current_member = self.portal_state.member()
182 if not current_member or not current_member.getId():
183 return {'url': None, 'email': None}
184
185 return {
186 'url': quote_plus(current_member.getId()),
187 'email': current_member.getProperty('email')
188 }
189
190 @property
191 def author_content(self):
192 results = []
193
194 plone_view = self.context.restrictedTraverse(
195 '@@plone'
196 )
197
198 brains = self.portal_catalog.searchResults(
199 Creator=self.username,
200 sort_on='created',
201 sort_order='reverse'
202 )
203
204 for brain in brains[:10]:
205 results.append({
206 'title': pretty_title_or_id(
207 self, brain
208 ),
209 'date': plone_view.toLocalizedTime(
210 brain.Date
211 ),
212 'url': brain.getURL()
213 })
214
215 return results
216
217 def home_folder(self, username):
218 return self.membership_tool.getHomeFolder(id=username)
219
220 def __call__(self):
221
222 self.portal_properties = getUtility(
223 IPropertiesTool
224 )
225
226 self.portal_catalog = getToolByName(
227 self.context, 'portal_catalog'
228 )
229
230 # XXX: getUtility call does not work.
231 self.membership_tool = getToolByName(
232 self.context, 'portal_membership'
233 )
234
235 self.portal_state = getMultiAdapter(
236 (self.context, self.request),
237 name=u'plone_portal_state'
238 )
239
240 self.feedback_form = AuthorFeedbackForm(
241 self.context, self.request
242 )
243 self.feedback_form.update()
244 self.feedback_form.widgets["author"].mode = HIDDEN_MODE
245 self.feedback_form.widgets["referer"].mode = HIDDEN_MODE
246 self.feedback_form.widgets["author"].value = self.username
247 self.feedback_form.widgets["referer"].value = self.request.get(
248 'referer',
249 self.request.get('HTTP_REFERER', 'unknown url')
250 )
251
252 registry = getUtility(IRegistry)
253 security_settings = registry.forInterface(
254 ISecuritySchema, prefix='plone')
255 allow_anonymous_view_about = security_settings.allow_anon_views_about
256
257 mail_settings = registry.forInterface(IMailSchema, prefix='plone')
258 self.email_from_address = mail_settings.email_from_address
259
260 if self.is_anonymous and not allow_anonymous_view_about:
261 raise Unauthorized()
262
263 return self.index()
264
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/Products/CMFPlone/browser/author.py b/Products/CMFPlone/browser/author.py
--- a/Products/CMFPlone/browser/author.py
+++ b/Products/CMFPlone/browser/author.py
@@ -104,7 +104,8 @@
message = self.feedback_template(
self, send_from_address=send_from_address,
sender_id=sender_id, url=referer, subject=subject,
- message=message, encoding=email_charset
+ message=message, encoding=email_charset,
+ email_from_name=mail_settings.email_from_name
)
message = message.encode(email_charset)
| {"golden_diff": "diff --git a/Products/CMFPlone/browser/author.py b/Products/CMFPlone/browser/author.py\n--- a/Products/CMFPlone/browser/author.py\n+++ b/Products/CMFPlone/browser/author.py\n@@ -104,7 +104,8 @@\n message = self.feedback_template(\n self, send_from_address=send_from_address,\n sender_id=sender_id, url=referer, subject=subject,\n- message=message, encoding=email_charset\n+ message=message, encoding=email_charset,\n+ email_from_name=mail_settings.email_from_name\n )\n \n message = message.encode(email_charset)\n", "issue": "Author feedback form broken\nAuthor feedback seems to have been missed in the refactoring of settings into the registry. It stills expects to find portal.email_from_name\n\n", "before_files": [{"content": "from AccessControl import Unauthorized\n\nfrom Products.CMFCore.interfaces import IPropertiesTool\nfrom Products.CMFPlone import PloneMessageFactory as _\nfrom Products.CMFPlone.interfaces import ISecuritySchema\nfrom Products.CMFPlone.interfaces.controlpanel import IMailSchema\nfrom Products.CMFPlone.utils import getToolByName\nfrom Products.CMFPlone.utils import pretty_title_or_id\nfrom Products.Five.browser import BrowserView\nfrom Products.MailHost.interfaces import IMailHost\nfrom Products.statusmessages.interfaces import IStatusMessage\n\nfrom ZODB.POSException import ConflictError\n\nfrom interfaces import IAuthorFeedbackForm\n\nfrom plone.registry.interfaces import IRegistry\n\nfrom urllib import quote_plus\n\nfrom z3c.form import button\nfrom z3c.form import field\nfrom z3c.form import form\nfrom z3c.form.interfaces import HIDDEN_MODE\n\nfrom zope.component import getMultiAdapter\nfrom zope.component import getUtility\nfrom zope.interface import implementer\nfrom zope.publisher.interfaces import IPublishTraverse\n\nimport logging\n\nlogger = logging.getLogger(\"Plone\")\n\n\nclass AuthorFeedbackForm(form.Form):\n\n fields = field.Fields(IAuthorFeedbackForm)\n ignoreContext = True\n\n @button.buttonAndHandler(_(u'label_send', default='Send'),\n name='send')\n def handle_send(self, action):\n self.portal_state = getMultiAdapter(\n (self.context, self.request),\n name=u'plone_portal_state'\n )\n\n self.portal = self.portal_state.portal()\n self.membership_tool = getToolByName(\n self.context, 'portal_membership'\n )\n\n self.feedback_template = self.context.restrictedTraverse(\n '@@author-feedback-template'\n )\n\n data, errors = self.extractData()\n if errors:\n IStatusMessage(self.request).addStatusMessage(\n self.formErrorsMessage,\n type=u'error'\n )\n\n return\n\n referer = data.get('referer', 'unknown referer')\n subject = data.get('subject', '')\n message = data.get('message', '')\n # Author is None means portal administrator\n author = data.get('author', None)\n\n sender = self.portal_state.member()\n registry = getUtility(IRegistry)\n mail_settings = registry.forInterface(IMailSchema, prefix='plone')\n envelope_from = mail_settings.email_from_address\n\n if author is None:\n send_to_address = mail_settings.email_from_address\n else:\n author_member = self.membership_tool.getMemberById(author)\n send_to_address = author_member.getProperty('email')\n\n send_from_address = sender.getProperty('email')\n\n if send_from_address == '':\n IStatusMessage(self.request).addStatusMessage(\n _(u'Could not find a valid email address'),\n type=u'error'\n )\n return\n\n sender_id = \"%s (%s), %s\" % (\n sender.getProperty('fullname'),\n sender.getId(),\n send_from_address\n )\n\n mail_host = getUtility(IMailHost)\n registry = getUtility(IRegistry)\n email_charset = registry.get('plone.email_charset', 'utf-8')\n\n try:\n message = self.feedback_template(\n self, send_from_address=send_from_address,\n sender_id=sender_id, url=referer, subject=subject,\n message=message, encoding=email_charset\n )\n\n message = message.encode(email_charset)\n\n mail_host.send(\n message, send_to_address, envelope_from,\n subject=subject, charset=email_charset\n )\n except ConflictError:\n raise\n except Exception as e:\n logger.info(\"Unable to send mail: \" + str(e))\n\n IStatusMessage(self.request).addStatusMessage(\n _(u'Unable to send mail.'),\n type=u'error'\n )\n\n return\n\n IStatusMessage(self.request).addStatusMessage(\n _(u'Mail sent.'),\n type=u'info'\n )\n self.request.response.redirect('%s/author/%s' % (\n self.portal.absolute_url(),\n author or ''))\n return\n\n\n@implementer(IPublishTraverse)\nclass AuthorView(BrowserView):\n\n def __init__(self, context, request):\n super(AuthorView, self).__init__(context, request)\n\n self.username = None\n\n def publishTraverse(self, request, name):\n request['TraversalRequestNameStack'] = []\n\n self.username = name\n return self\n\n @property\n def is_anonymous(self):\n return self.portal_state.anonymous()\n\n @property\n def is_owner(self):\n current_member = self.portal_state.member()\n return current_member.getId() == self.username\n\n @property\n def author(self):\n username = self.username\n\n if not username:\n return {}\n\n authorinfo = self.membership_tool.getMemberInfo(username)\n portrait = self.membership_tool.getPersonalPortrait(username)\n\n if not authorinfo or not portrait:\n return {}\n\n return {\n 'info': authorinfo,\n 'portrait': portrait\n }\n\n @property\n def member_info(self):\n current_member = self.portal_state.member()\n if not current_member or not current_member.getId():\n return {'url': None, 'email': None}\n\n return {\n 'url': quote_plus(current_member.getId()),\n 'email': current_member.getProperty('email')\n }\n\n @property\n def author_content(self):\n results = []\n\n plone_view = self.context.restrictedTraverse(\n '@@plone'\n )\n\n brains = self.portal_catalog.searchResults(\n Creator=self.username,\n sort_on='created',\n sort_order='reverse'\n )\n\n for brain in brains[:10]:\n results.append({\n 'title': pretty_title_or_id(\n self, brain\n ),\n 'date': plone_view.toLocalizedTime(\n brain.Date\n ),\n 'url': brain.getURL()\n })\n\n return results\n\n def home_folder(self, username):\n return self.membership_tool.getHomeFolder(id=username)\n\n def __call__(self):\n\n self.portal_properties = getUtility(\n IPropertiesTool\n )\n\n self.portal_catalog = getToolByName(\n self.context, 'portal_catalog'\n )\n\n # XXX: getUtility call does not work.\n self.membership_tool = getToolByName(\n self.context, 'portal_membership'\n )\n\n self.portal_state = getMultiAdapter(\n (self.context, self.request),\n name=u'plone_portal_state'\n )\n\n self.feedback_form = AuthorFeedbackForm(\n self.context, self.request\n )\n self.feedback_form.update()\n self.feedback_form.widgets[\"author\"].mode = HIDDEN_MODE\n self.feedback_form.widgets[\"referer\"].mode = HIDDEN_MODE\n self.feedback_form.widgets[\"author\"].value = self.username\n self.feedback_form.widgets[\"referer\"].value = self.request.get(\n 'referer',\n self.request.get('HTTP_REFERER', 'unknown url')\n )\n\n registry = getUtility(IRegistry)\n security_settings = registry.forInterface(\n ISecuritySchema, prefix='plone')\n allow_anonymous_view_about = security_settings.allow_anon_views_about\n\n mail_settings = registry.forInterface(IMailSchema, prefix='plone')\n self.email_from_address = mail_settings.email_from_address\n\n if self.is_anonymous and not allow_anonymous_view_about:\n raise Unauthorized()\n\n return self.index()\n", "path": "Products/CMFPlone/browser/author.py"}], "after_files": [{"content": "from AccessControl import Unauthorized\n\nfrom Products.CMFCore.interfaces import IPropertiesTool\nfrom Products.CMFPlone import PloneMessageFactory as _\nfrom Products.CMFPlone.interfaces import ISecuritySchema\nfrom Products.CMFPlone.interfaces.controlpanel import IMailSchema\nfrom Products.CMFPlone.utils import getToolByName\nfrom Products.CMFPlone.utils import pretty_title_or_id\nfrom Products.Five.browser import BrowserView\nfrom Products.MailHost.interfaces import IMailHost\nfrom Products.statusmessages.interfaces import IStatusMessage\n\nfrom ZODB.POSException import ConflictError\n\nfrom interfaces import IAuthorFeedbackForm\n\nfrom plone.registry.interfaces import IRegistry\n\nfrom urllib import quote_plus\n\nfrom z3c.form import button\nfrom z3c.form import field\nfrom z3c.form import form\nfrom z3c.form.interfaces import HIDDEN_MODE\n\nfrom zope.component import getMultiAdapter\nfrom zope.component import getUtility\nfrom zope.interface import implementer\nfrom zope.publisher.interfaces import IPublishTraverse\n\nimport logging\n\nlogger = logging.getLogger(\"Plone\")\n\n\nclass AuthorFeedbackForm(form.Form):\n\n fields = field.Fields(IAuthorFeedbackForm)\n ignoreContext = True\n\n @button.buttonAndHandler(_(u'label_send', default='Send'),\n name='send')\n def handle_send(self, action):\n self.portal_state = getMultiAdapter(\n (self.context, self.request),\n name=u'plone_portal_state'\n )\n\n self.portal = self.portal_state.portal()\n self.membership_tool = getToolByName(\n self.context, 'portal_membership'\n )\n\n self.feedback_template = self.context.restrictedTraverse(\n '@@author-feedback-template'\n )\n\n data, errors = self.extractData()\n if errors:\n IStatusMessage(self.request).addStatusMessage(\n self.formErrorsMessage,\n type=u'error'\n )\n\n return\n\n referer = data.get('referer', 'unknown referer')\n subject = data.get('subject', '')\n message = data.get('message', '')\n # Author is None means portal administrator\n author = data.get('author', None)\n\n sender = self.portal_state.member()\n registry = getUtility(IRegistry)\n mail_settings = registry.forInterface(IMailSchema, prefix='plone')\n envelope_from = mail_settings.email_from_address\n\n if author is None:\n send_to_address = mail_settings.email_from_address\n else:\n author_member = self.membership_tool.getMemberById(author)\n send_to_address = author_member.getProperty('email')\n\n send_from_address = sender.getProperty('email')\n\n if send_from_address == '':\n IStatusMessage(self.request).addStatusMessage(\n _(u'Could not find a valid email address'),\n type=u'error'\n )\n return\n\n sender_id = \"%s (%s), %s\" % (\n sender.getProperty('fullname'),\n sender.getId(),\n send_from_address\n )\n\n mail_host = getUtility(IMailHost)\n registry = getUtility(IRegistry)\n email_charset = registry.get('plone.email_charset', 'utf-8')\n\n try:\n message = self.feedback_template(\n self, send_from_address=send_from_address,\n sender_id=sender_id, url=referer, subject=subject,\n message=message, encoding=email_charset,\n email_from_name=mail_settings.email_from_name\n )\n\n message = message.encode(email_charset)\n\n mail_host.send(\n message, send_to_address, envelope_from,\n subject=subject, charset=email_charset\n )\n except ConflictError:\n raise\n except Exception as e:\n logger.info(\"Unable to send mail: \" + str(e))\n\n IStatusMessage(self.request).addStatusMessage(\n _(u'Unable to send mail.'),\n type=u'error'\n )\n\n return\n\n IStatusMessage(self.request).addStatusMessage(\n _(u'Mail sent.'),\n type=u'info'\n )\n self.request.response.redirect('%s/author/%s' % (\n self.portal.absolute_url(),\n author or ''))\n return\n\n\n@implementer(IPublishTraverse)\nclass AuthorView(BrowserView):\n\n def __init__(self, context, request):\n super(AuthorView, self).__init__(context, request)\n\n self.username = None\n\n def publishTraverse(self, request, name):\n request['TraversalRequestNameStack'] = []\n\n self.username = name\n return self\n\n @property\n def is_anonymous(self):\n return self.portal_state.anonymous()\n\n @property\n def is_owner(self):\n current_member = self.portal_state.member()\n return current_member.getId() == self.username\n\n @property\n def author(self):\n username = self.username\n\n if not username:\n return {}\n\n authorinfo = self.membership_tool.getMemberInfo(username)\n portrait = self.membership_tool.getPersonalPortrait(username)\n\n if not authorinfo or not portrait:\n return {}\n\n return {\n 'info': authorinfo,\n 'portrait': portrait\n }\n\n @property\n def member_info(self):\n current_member = self.portal_state.member()\n if not current_member or not current_member.getId():\n return {'url': None, 'email': None}\n\n return {\n 'url': quote_plus(current_member.getId()),\n 'email': current_member.getProperty('email')\n }\n\n @property\n def author_content(self):\n results = []\n\n plone_view = self.context.restrictedTraverse(\n '@@plone'\n )\n\n brains = self.portal_catalog.searchResults(\n Creator=self.username,\n sort_on='created',\n sort_order='reverse'\n )\n\n for brain in brains[:10]:\n results.append({\n 'title': pretty_title_or_id(\n self, brain\n ),\n 'date': plone_view.toLocalizedTime(\n brain.Date\n ),\n 'url': brain.getURL()\n })\n\n return results\n\n def home_folder(self, username):\n return self.membership_tool.getHomeFolder(id=username)\n\n def __call__(self):\n\n self.portal_properties = getUtility(\n IPropertiesTool\n )\n\n self.portal_catalog = getToolByName(\n self.context, 'portal_catalog'\n )\n\n # XXX: getUtility call does not work.\n self.membership_tool = getToolByName(\n self.context, 'portal_membership'\n )\n\n self.portal_state = getMultiAdapter(\n (self.context, self.request),\n name=u'plone_portal_state'\n )\n\n self.feedback_form = AuthorFeedbackForm(\n self.context, self.request\n )\n self.feedback_form.update()\n self.feedback_form.widgets[\"author\"].mode = HIDDEN_MODE\n self.feedback_form.widgets[\"referer\"].mode = HIDDEN_MODE\n self.feedback_form.widgets[\"author\"].value = self.username\n self.feedback_form.widgets[\"referer\"].value = self.request.get(\n 'referer',\n self.request.get('HTTP_REFERER', 'unknown url')\n )\n\n registry = getUtility(IRegistry)\n security_settings = registry.forInterface(\n ISecuritySchema, prefix='plone')\n allow_anonymous_view_about = security_settings.allow_anon_views_about\n\n mail_settings = registry.forInterface(IMailSchema, prefix='plone')\n self.email_from_address = mail_settings.email_from_address\n\n if self.is_anonymous and not allow_anonymous_view_about:\n raise Unauthorized()\n\n return self.index()\n", "path": "Products/CMFPlone/browser/author.py"}]} | 2,586 | 144 |
gh_patches_debug_6019 | rasdani/github-patches | git_diff | cupy__cupy-3335 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`around` result is different compared to numpy
```
>>> np.__version__
'1.16.4'
>>> np.around([2.5])
array([2.])
>>> cupy.__version__
'7.0.0a1'
>>> cupy.around([2.5])
array([3.])
```
NumPy seems to round to even.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cupy/math/rounding.py`
Content:
```
1 from cupy import core
2 from cupy.core import fusion
3 from cupy.math import ufunc
4
5
6 def around(a, decimals=0, out=None):
7 """Rounds to the given number of decimals.
8
9 Args:
10 a (cupy.ndarray): The source array.
11 decimals (int): umber of decimal places to round to (default: 0).
12 If decimals is negative, it specifies the number of positions to
13 the left of the decimal point.
14 out (cupy.ndarray): Output array.
15
16 Returns:
17 cupy.ndarray: Rounded array.
18
19 .. seealso:: :func:`numpy.around`
20
21 """
22 if fusion._is_fusing():
23 return fusion._call_ufunc(core.core._round_ufunc, a, decimals, out=out)
24 a = core.array(a, copy=False)
25 return a.round(decimals, out=out)
26
27
28 def round_(a, decimals=0, out=None):
29 return around(a, decimals, out=out)
30
31
32 rint = ufunc.create_math_ufunc(
33 'rint', 1, 'cupy_rint',
34 '''Rounds each element of an array to the nearest integer.
35
36 .. seealso:: :data:`numpy.rint`
37
38 ''')
39
40
41 floor = ufunc.create_math_ufunc(
42 'floor', 1, 'cupy_floor',
43 '''Rounds each element of an array to its floor integer.
44
45 .. seealso:: :data:`numpy.floor`
46
47 ''', support_complex=False)
48
49
50 ceil = ufunc.create_math_ufunc(
51 'ceil', 1, 'cupy_ceil',
52 '''Rounds each element of an array to its ceiling integer.
53
54 .. seealso:: :data:`numpy.ceil`
55
56 ''', support_complex=False)
57
58
59 trunc = ufunc.create_math_ufunc(
60 'trunc', 1, 'cupy_trunc',
61 '''Rounds each element of an array towards zero.
62
63 .. seealso:: :data:`numpy.trunc`
64
65 ''', support_complex=False)
66
67
68 fix = core.create_ufunc(
69 'cupy_fix', ('e->e', 'f->f', 'd->d'),
70 'out0 = (in0 >= 0.0) ? floor(in0): ceil(in0)',
71 doc='''If given value x is positive, it return floor(x).
72 Else, it return ceil(x).
73
74 .. seealso:: :func:`numpy.fix`
75
76 ''')
77
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cupy/math/rounding.py b/cupy/math/rounding.py
--- a/cupy/math/rounding.py
+++ b/cupy/math/rounding.py
@@ -8,7 +8,7 @@
Args:
a (cupy.ndarray): The source array.
- decimals (int): umber of decimal places to round to (default: 0).
+ decimals (int): Number of decimal places to round to (default: 0).
If decimals is negative, it specifies the number of positions to
the left of the decimal point.
out (cupy.ndarray): Output array.
| {"golden_diff": "diff --git a/cupy/math/rounding.py b/cupy/math/rounding.py\n--- a/cupy/math/rounding.py\n+++ b/cupy/math/rounding.py\n@@ -8,7 +8,7 @@\n \n Args:\n a (cupy.ndarray): The source array.\n- decimals (int): umber of decimal places to round to (default: 0).\n+ decimals (int): Number of decimal places to round to (default: 0).\n If decimals is negative, it specifies the number of positions to\n the left of the decimal point.\n out (cupy.ndarray): Output array.\n", "issue": "`around` result is different compared to numpy\n```\r\n>>> np.__version__\r\n'1.16.4'\r\n>>> np.around([2.5])\r\narray([2.])\r\n>>> cupy.__version__\r\n'7.0.0a1'\r\n>>> cupy.around([2.5])\r\narray([3.])\r\n```\r\nNumPy seems to round to even.\n", "before_files": [{"content": "from cupy import core\nfrom cupy.core import fusion\nfrom cupy.math import ufunc\n\n\ndef around(a, decimals=0, out=None):\n \"\"\"Rounds to the given number of decimals.\n\n Args:\n a (cupy.ndarray): The source array.\n decimals (int): umber of decimal places to round to (default: 0).\n If decimals is negative, it specifies the number of positions to\n the left of the decimal point.\n out (cupy.ndarray): Output array.\n\n Returns:\n cupy.ndarray: Rounded array.\n\n .. seealso:: :func:`numpy.around`\n\n \"\"\"\n if fusion._is_fusing():\n return fusion._call_ufunc(core.core._round_ufunc, a, decimals, out=out)\n a = core.array(a, copy=False)\n return a.round(decimals, out=out)\n\n\ndef round_(a, decimals=0, out=None):\n return around(a, decimals, out=out)\n\n\nrint = ufunc.create_math_ufunc(\n 'rint', 1, 'cupy_rint',\n '''Rounds each element of an array to the nearest integer.\n\n .. seealso:: :data:`numpy.rint`\n\n ''')\n\n\nfloor = ufunc.create_math_ufunc(\n 'floor', 1, 'cupy_floor',\n '''Rounds each element of an array to its floor integer.\n\n .. seealso:: :data:`numpy.floor`\n\n ''', support_complex=False)\n\n\nceil = ufunc.create_math_ufunc(\n 'ceil', 1, 'cupy_ceil',\n '''Rounds each element of an array to its ceiling integer.\n\n .. seealso:: :data:`numpy.ceil`\n\n ''', support_complex=False)\n\n\ntrunc = ufunc.create_math_ufunc(\n 'trunc', 1, 'cupy_trunc',\n '''Rounds each element of an array towards zero.\n\n .. seealso:: :data:`numpy.trunc`\n\n ''', support_complex=False)\n\n\nfix = core.create_ufunc(\n 'cupy_fix', ('e->e', 'f->f', 'd->d'),\n 'out0 = (in0 >= 0.0) ? floor(in0): ceil(in0)',\n doc='''If given value x is positive, it return floor(x).\n Else, it return ceil(x).\n\n .. seealso:: :func:`numpy.fix`\n\n ''')\n", "path": "cupy/math/rounding.py"}], "after_files": [{"content": "from cupy import core\nfrom cupy.core import fusion\nfrom cupy.math import ufunc\n\n\ndef around(a, decimals=0, out=None):\n \"\"\"Rounds to the given number of decimals.\n\n Args:\n a (cupy.ndarray): The source array.\n decimals (int): Number of decimal places to round to (default: 0).\n If decimals is negative, it specifies the number of positions to\n the left of the decimal point.\n out (cupy.ndarray): Output array.\n\n Returns:\n cupy.ndarray: Rounded array.\n\n .. seealso:: :func:`numpy.around`\n\n \"\"\"\n if fusion._is_fusing():\n return fusion._call_ufunc(core.core._round_ufunc, a, decimals, out=out)\n a = core.array(a, copy=False)\n return a.round(decimals, out=out)\n\n\ndef round_(a, decimals=0, out=None):\n return around(a, decimals, out=out)\n\n\nrint = ufunc.create_math_ufunc(\n 'rint', 1, 'cupy_rint',\n '''Rounds each element of an array to the nearest integer.\n\n .. seealso:: :data:`numpy.rint`\n\n ''')\n\n\nfloor = ufunc.create_math_ufunc(\n 'floor', 1, 'cupy_floor',\n '''Rounds each element of an array to its floor integer.\n\n .. seealso:: :data:`numpy.floor`\n\n ''', support_complex=False)\n\n\nceil = ufunc.create_math_ufunc(\n 'ceil', 1, 'cupy_ceil',\n '''Rounds each element of an array to its ceiling integer.\n\n .. seealso:: :data:`numpy.ceil`\n\n ''', support_complex=False)\n\n\ntrunc = ufunc.create_math_ufunc(\n 'trunc', 1, 'cupy_trunc',\n '''Rounds each element of an array towards zero.\n\n .. seealso:: :data:`numpy.trunc`\n\n ''', support_complex=False)\n\n\nfix = core.create_ufunc(\n 'cupy_fix', ('e->e', 'f->f', 'd->d'),\n 'out0 = (in0 >= 0.0) ? floor(in0): ceil(in0)',\n doc='''If given value x is positive, it return floor(x).\n Else, it return ceil(x).\n\n .. seealso:: :func:`numpy.fix`\n\n ''')\n", "path": "cupy/math/rounding.py"}]} | 1,026 | 136 |
gh_patches_debug_27011 | rasdani/github-patches | git_diff | dask__distributed-8347 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bokeh 3.3.0 uses absolute URLs and breaks proxied dashboards
I noticed when using `distributed==2023.10.1` with `bokeh==3.2.2` the URLs to the Bokeh JavaScript are relative.
```html
<script type="text/javascript" src="static/js/bokeh.min.js?v=3ca6425586de5036dc01992dd69aa61e9196dd02619557cfaeb1b3d8b77adf724be49401b1168483d165494ce57a6daa16e6f6d3660fef117d45028221f86357"></script>
```
But when upgrading to `bokeh==3.3.0` they become absolute.
```html
<script type="text/javascript" src="/static/js/bokeh.min.js?v=39ef57c3a83533e24f961e5c27f651a61045dbccefac4b5df86a7680b1edaff31886a7c0322250ffb0d758fa14ae156c9b640f60cca99f020096b050a4dbb571"></script>
```
This breaks dashboards that are being proxied at some sub-url.
Setting `dask scheduler --dashboard-prefix ""` doesn't fix it.
### Reproducer
```
$ pip install dask distributed bokeh==3.3.0
$ dask scheduler &
$ curl localhost:8787/status | grep bokeh.min.js
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `distributed/dashboard/core.py`
Content:
```
1 from __future__ import annotations
2
3 import functools
4 import warnings
5
6 from bokeh.application import Application
7 from bokeh.application.handlers.function import FunctionHandler
8 from bokeh.server.server import BokehTornado
9 from bokeh.server.util import create_hosts_allowlist
10
11 import dask
12
13 from distributed.dashboard.utils import BOKEH_VERSION
14 from distributed.versions import BOKEH_REQUIREMENT
15
16 # Set `prereleases=True` to allow for use with dev versions of `bokeh`
17 if not BOKEH_REQUIREMENT.specifier.contains(BOKEH_VERSION, prereleases=True):
18 warnings.warn(
19 f"\nDask needs {BOKEH_REQUIREMENT} for the dashboard."
20 f"\nYou have bokeh={BOKEH_VERSION}."
21 "\nContinuing without the dashboard."
22 )
23 raise ImportError(
24 f"Dask needs {BOKEH_REQUIREMENT} for the dashboard, not bokeh={BOKEH_VERSION}"
25 )
26
27
28 if BOKEH_VERSION.major < 3:
29 from bokeh.models import Panel as TabPanel # noqa: F401
30 else:
31 from bokeh.models import TabPanel # noqa: F401
32
33
34 def BokehApplication(applications, server, prefix="/", template_variables=None):
35 template_variables = template_variables or {}
36 prefix = "/" + prefix.strip("/") + "/" if prefix else "/"
37
38 extra = {"prefix": prefix, **template_variables}
39
40 funcs = {k: functools.partial(v, server, extra) for k, v in applications.items()}
41 apps = {k: Application(FunctionHandler(v)) for k, v in funcs.items()}
42
43 kwargs = dask.config.get("distributed.scheduler.dashboard.bokeh-application").copy()
44 extra_websocket_origins = create_hosts_allowlist(
45 kwargs.pop("allow_websocket_origin"), server.http_server.port
46 )
47
48 return BokehTornado(
49 apps,
50 prefix=prefix,
51 use_index=False,
52 extra_websocket_origins=extra_websocket_origins,
53 **kwargs,
54 )
55
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/distributed/dashboard/core.py b/distributed/dashboard/core.py
--- a/distributed/dashboard/core.py
+++ b/distributed/dashboard/core.py
@@ -5,6 +5,7 @@
from bokeh.application import Application
from bokeh.application.handlers.function import FunctionHandler
+from bokeh.resources import Resources
from bokeh.server.server import BokehTornado
from bokeh.server.util import create_hosts_allowlist
@@ -31,6 +32,11 @@
from bokeh.models import TabPanel # noqa: F401
+class DaskBokehTornado(BokehTornado):
+ def resources(self, absolute_url: str | bool | None = True) -> Resources:
+ return super().resources(absolute_url)
+
+
def BokehApplication(applications, server, prefix="/", template_variables=None):
template_variables = template_variables or {}
prefix = "/" + prefix.strip("/") + "/" if prefix else "/"
@@ -45,10 +51,11 @@
kwargs.pop("allow_websocket_origin"), server.http_server.port
)
- return BokehTornado(
+ return DaskBokehTornado(
apps,
prefix=prefix,
use_index=False,
extra_websocket_origins=extra_websocket_origins,
+ absolute_url="",
**kwargs,
)
| {"golden_diff": "diff --git a/distributed/dashboard/core.py b/distributed/dashboard/core.py\n--- a/distributed/dashboard/core.py\n+++ b/distributed/dashboard/core.py\n@@ -5,6 +5,7 @@\n \n from bokeh.application import Application\n from bokeh.application.handlers.function import FunctionHandler\n+from bokeh.resources import Resources\n from bokeh.server.server import BokehTornado\n from bokeh.server.util import create_hosts_allowlist\n \n@@ -31,6 +32,11 @@\n from bokeh.models import TabPanel # noqa: F401\n \n \n+class DaskBokehTornado(BokehTornado):\n+ def resources(self, absolute_url: str | bool | None = True) -> Resources:\n+ return super().resources(absolute_url)\n+\n+\n def BokehApplication(applications, server, prefix=\"/\", template_variables=None):\n template_variables = template_variables or {}\n prefix = \"/\" + prefix.strip(\"/\") + \"/\" if prefix else \"/\"\n@@ -45,10 +51,11 @@\n kwargs.pop(\"allow_websocket_origin\"), server.http_server.port\n )\n \n- return BokehTornado(\n+ return DaskBokehTornado(\n apps,\n prefix=prefix,\n use_index=False,\n extra_websocket_origins=extra_websocket_origins,\n+ absolute_url=\"\",\n **kwargs,\n )\n", "issue": "Bokeh 3.3.0 uses absolute URLs and breaks proxied dashboards\nI noticed when using `distributed==2023.10.1` with `bokeh==3.2.2` the URLs to the Bokeh JavaScript are relative.\r\n\r\n```html\r\n<script type=\"text/javascript\" src=\"static/js/bokeh.min.js?v=3ca6425586de5036dc01992dd69aa61e9196dd02619557cfaeb1b3d8b77adf724be49401b1168483d165494ce57a6daa16e6f6d3660fef117d45028221f86357\"></script>\r\n```\r\n\r\nBut when upgrading to `bokeh==3.3.0` they become absolute.\r\n\r\n```html\r\n<script type=\"text/javascript\" src=\"/static/js/bokeh.min.js?v=39ef57c3a83533e24f961e5c27f651a61045dbccefac4b5df86a7680b1edaff31886a7c0322250ffb0d758fa14ae156c9b640f60cca99f020096b050a4dbb571\"></script>\r\n```\r\n\r\nThis breaks dashboards that are being proxied at some sub-url.\r\n\r\nSetting `dask scheduler --dashboard-prefix \"\"` doesn't fix it.\r\n\r\n### Reproducer\r\n\r\n```\r\n$ pip install dask distributed bokeh==3.3.0\r\n$ dask scheduler &\r\n$ curl localhost:8787/status | grep bokeh.min.js\r\n```\n", "before_files": [{"content": "from __future__ import annotations\n\nimport functools\nimport warnings\n\nfrom bokeh.application import Application\nfrom bokeh.application.handlers.function import FunctionHandler\nfrom bokeh.server.server import BokehTornado\nfrom bokeh.server.util import create_hosts_allowlist\n\nimport dask\n\nfrom distributed.dashboard.utils import BOKEH_VERSION\nfrom distributed.versions import BOKEH_REQUIREMENT\n\n# Set `prereleases=True` to allow for use with dev versions of `bokeh`\nif not BOKEH_REQUIREMENT.specifier.contains(BOKEH_VERSION, prereleases=True):\n warnings.warn(\n f\"\\nDask needs {BOKEH_REQUIREMENT} for the dashboard.\"\n f\"\\nYou have bokeh={BOKEH_VERSION}.\"\n \"\\nContinuing without the dashboard.\"\n )\n raise ImportError(\n f\"Dask needs {BOKEH_REQUIREMENT} for the dashboard, not bokeh={BOKEH_VERSION}\"\n )\n\n\nif BOKEH_VERSION.major < 3:\n from bokeh.models import Panel as TabPanel # noqa: F401\nelse:\n from bokeh.models import TabPanel # noqa: F401\n\n\ndef BokehApplication(applications, server, prefix=\"/\", template_variables=None):\n template_variables = template_variables or {}\n prefix = \"/\" + prefix.strip(\"/\") + \"/\" if prefix else \"/\"\n\n extra = {\"prefix\": prefix, **template_variables}\n\n funcs = {k: functools.partial(v, server, extra) for k, v in applications.items()}\n apps = {k: Application(FunctionHandler(v)) for k, v in funcs.items()}\n\n kwargs = dask.config.get(\"distributed.scheduler.dashboard.bokeh-application\").copy()\n extra_websocket_origins = create_hosts_allowlist(\n kwargs.pop(\"allow_websocket_origin\"), server.http_server.port\n )\n\n return BokehTornado(\n apps,\n prefix=prefix,\n use_index=False,\n extra_websocket_origins=extra_websocket_origins,\n **kwargs,\n )\n", "path": "distributed/dashboard/core.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport functools\nimport warnings\n\nfrom bokeh.application import Application\nfrom bokeh.application.handlers.function import FunctionHandler\nfrom bokeh.resources import Resources\nfrom bokeh.server.server import BokehTornado\nfrom bokeh.server.util import create_hosts_allowlist\n\nimport dask\n\nfrom distributed.dashboard.utils import BOKEH_VERSION\nfrom distributed.versions import BOKEH_REQUIREMENT\n\n# Set `prereleases=True` to allow for use with dev versions of `bokeh`\nif not BOKEH_REQUIREMENT.specifier.contains(BOKEH_VERSION, prereleases=True):\n warnings.warn(\n f\"\\nDask needs {BOKEH_REQUIREMENT} for the dashboard.\"\n f\"\\nYou have bokeh={BOKEH_VERSION}.\"\n \"\\nContinuing without the dashboard.\"\n )\n raise ImportError(\n f\"Dask needs {BOKEH_REQUIREMENT} for the dashboard, not bokeh={BOKEH_VERSION}\"\n )\n\n\nif BOKEH_VERSION.major < 3:\n from bokeh.models import Panel as TabPanel # noqa: F401\nelse:\n from bokeh.models import TabPanel # noqa: F401\n\n\nclass DaskBokehTornado(BokehTornado):\n def resources(self, absolute_url: str | bool | None = True) -> Resources:\n return super().resources(absolute_url)\n\n\ndef BokehApplication(applications, server, prefix=\"/\", template_variables=None):\n template_variables = template_variables or {}\n prefix = \"/\" + prefix.strip(\"/\") + \"/\" if prefix else \"/\"\n\n extra = {\"prefix\": prefix, **template_variables}\n\n funcs = {k: functools.partial(v, server, extra) for k, v in applications.items()}\n apps = {k: Application(FunctionHandler(v)) for k, v in funcs.items()}\n\n kwargs = dask.config.get(\"distributed.scheduler.dashboard.bokeh-application\").copy()\n extra_websocket_origins = create_hosts_allowlist(\n kwargs.pop(\"allow_websocket_origin\"), server.http_server.port\n )\n\n return DaskBokehTornado(\n apps,\n prefix=prefix,\n use_index=False,\n extra_websocket_origins=extra_websocket_origins,\n absolute_url=\"\",\n **kwargs,\n )\n", "path": "distributed/dashboard/core.py"}]} | 1,214 | 292 |
gh_patches_debug_1824 | rasdani/github-patches | git_diff | ManageIQ__integration_tests-8533 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
is_displayed for catalog all page is not working as expected
Currently the is_display of catalog all page returns True even if the view is on Add catalog page.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cfme/services/catalogs/catalog.py`
Content:
```
1 import attr
2 from navmazing import NavigateToAttribute
3 from navmazing import NavigateToSibling
4 from widgetastic.utils import Parameter
5 from widgetastic.widget import Text
6 from widgetastic_patternfly import Button
7 from widgetastic_patternfly import CandidateNotFound
8 from widgetastic_patternfly import Input
9
10 from . import ServicesCatalogView
11 from cfme.common import Taggable
12 from cfme.modeling.base import BaseCollection
13 from cfme.modeling.base import BaseEntity
14 from cfme.utils.appliance.implementations.ui import CFMENavigateStep
15 from cfme.utils.appliance.implementations.ui import navigate_to
16 from cfme.utils.appliance.implementations.ui import navigator
17 from cfme.utils.pretty import Pretty
18 from cfme.utils.update import Updateable
19 from cfme.utils.wait import wait_for
20 from widgetastic_manageiq import MultiBoxSelect
21
22
23 class CatalogsMultiBoxSelect(MultiBoxSelect):
24 move_into_button = Button(title=Parameter("@move_into"))
25 move_from_button = Button(title=Parameter("@move_from"))
26
27
28 class CatalogForm(ServicesCatalogView):
29 title = Text('#explorer_title_text')
30
31 name = Input(name='name')
32 description = Input(name="description")
33 assign_catalog_items = CatalogsMultiBoxSelect(
34 move_into="Move Selected buttons right",
35 move_from="Move Selected buttons left",
36 available_items="available_fields",
37 chosen_items="selected_fields"
38 )
39
40 save_button = Button('Save')
41 cancel_button = Button('Cancel')
42
43
44 class CatalogsView(ServicesCatalogView):
45 title = Text("#explorer_title_text")
46
47 @property
48 def is_displayed(self):
49 return (
50 self.in_explorer and
51 self.catalogs.is_opened and
52 self.catalogs.tree.currently_selected == ["All Catalogs"])
53
54
55 class DetailsCatalogView(ServicesCatalogView):
56 title = Text("#explorer_title_text")
57
58 @property
59 def is_displayed(self):
60 return (
61 self.in_explorer and self.catalogs.is_opened and
62 self.title.text == 'Catalog "{}"'.format(self.context["object"].name)
63 )
64
65
66 class AddCatalogView(CatalogForm):
67
68 add_button = Button("Add")
69
70 @property
71 def is_displayed(self):
72 return (
73 self.in_explorer and self.catalogs.is_opened and
74 self.title.text == 'Adding a new Catalog'
75 )
76
77
78 class EditCatalogView(CatalogForm):
79
80 save_button = Button('Save')
81 reset_button = Button('Reset')
82
83 @property
84 def is_displayed(self):
85 return (
86 self.in_explorer and self.catalogs.is_opened and
87 self.title.text == 'Editing Catalog "{}"'.format(self.context["object"].name)
88 )
89
90
91 @attr.s
92 class Catalog(BaseEntity, Updateable, Pretty, Taggable):
93
94 name = attr.ib()
95 description = attr.ib()
96 items = attr.ib(default=None)
97
98 def update(self, updates):
99 view = navigate_to(self, 'Edit')
100 changed = view.fill(updates)
101 if changed:
102 view.save_button.click()
103 else:
104 view.cancel_button.click()
105 view = self.create_view(DetailsCatalogView, override=updates, wait='10s')
106 view.flash.assert_no_error()
107 if changed:
108 view.flash.assert_message(
109 'Catalog "{}" was saved'.format(updates.get('name', self.name)))
110 else:
111 view.flash.assert_message(
112 'Edit of Catalog "{}" was cancelled by the user'.format(self.name))
113
114 def delete(self):
115 view = navigate_to(self, "Details")
116 view.configuration.item_select('Remove Catalog', handle_alert=True)
117 view = self.create_view(CatalogsView, wait='10s')
118 view.flash.assert_no_error()
119 view.flash.assert_success_message(
120 'Catalog "{}": Delete successful'.format(self.description or self.name))
121
122 @property
123 def exists(self):
124 try:
125 navigate_to(self, 'Details')
126 return True
127 except (NameError, CandidateNotFound):
128 return False
129
130
131 @attr.s
132 class CatalogCollection(BaseCollection):
133 """A collection for the :py:class:`cfme.services.catalogs.catalog.Catalog`"""
134 ENTITY = Catalog
135
136 def create(self, name, description, items=None):
137 """Create a catalog.
138
139 Args:
140 name: The name of the catalog
141 description: The description of the catalog
142 items: Items in the catalog
143 """
144 view = navigate_to(self, 'Add')
145 view.fill({
146 'name': name,
147 'description': description,
148 'assign_catalog_items': items
149 })
150 view.add_button.click()
151 catalog = self.instantiate(name=name, description=description, items=items)
152 view = self.create_view(CatalogsView)
153 assert view.is_displayed
154 view.flash.assert_no_error()
155 return catalog
156
157
158 @navigator.register(CatalogCollection)
159 class All(CFMENavigateStep):
160 VIEW = CatalogsView
161 prerequisite = NavigateToAttribute('appliance.server', 'LoggedIn')
162
163 def step(self, *args, **kwargs):
164 self.prerequisite_view.navigation.select('Services', 'Catalogs')
165 self.view.catalogs.tree.click_path("All Catalogs")
166
167
168 @navigator.register(CatalogCollection)
169 class Add(CFMENavigateStep):
170 VIEW = AddCatalogView
171 prerequisite = NavigateToSibling('All')
172
173 def step(self, *args, **kwargs):
174 self.prerequisite_view.configuration.item_select('Add a New Catalog')
175
176
177 @navigator.register(Catalog)
178 class Details(CFMENavigateStep):
179 VIEW = DetailsCatalogView
180 prerequisite = NavigateToAttribute('parent', 'All')
181
182 def step(self, *args, **kwargs):
183 self.prerequisite_view.catalogs.tree.click_path("All Catalogs", self.obj.name)
184
185
186 @navigator.register(Catalog)
187 class Edit(CFMENavigateStep):
188 VIEW = EditCatalogView
189 prerequisite = NavigateToSibling('Details')
190
191 def step(self, *args, **kwargs):
192 self.prerequisite_view.configuration.item_select('Edit this Item')
193
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cfme/services/catalogs/catalog.py b/cfme/services/catalogs/catalog.py
--- a/cfme/services/catalogs/catalog.py
+++ b/cfme/services/catalogs/catalog.py
@@ -49,6 +49,7 @@
return (
self.in_explorer and
self.catalogs.is_opened and
+ self.title.text == "All Catalogs" and
self.catalogs.tree.currently_selected == ["All Catalogs"])
| {"golden_diff": "diff --git a/cfme/services/catalogs/catalog.py b/cfme/services/catalogs/catalog.py\n--- a/cfme/services/catalogs/catalog.py\n+++ b/cfme/services/catalogs/catalog.py\n@@ -49,6 +49,7 @@\n return (\n self.in_explorer and\n self.catalogs.is_opened and\n+ self.title.text == \"All Catalogs\" and\n self.catalogs.tree.currently_selected == [\"All Catalogs\"])\n", "issue": "is_displayed for catalog all page is not working as expected\nCurrently the is_display of catalog all page returns True even if the view is on Add catalog page.\n", "before_files": [{"content": "import attr\nfrom navmazing import NavigateToAttribute\nfrom navmazing import NavigateToSibling\nfrom widgetastic.utils import Parameter\nfrom widgetastic.widget import Text\nfrom widgetastic_patternfly import Button\nfrom widgetastic_patternfly import CandidateNotFound\nfrom widgetastic_patternfly import Input\n\nfrom . import ServicesCatalogView\nfrom cfme.common import Taggable\nfrom cfme.modeling.base import BaseCollection\nfrom cfme.modeling.base import BaseEntity\nfrom cfme.utils.appliance.implementations.ui import CFMENavigateStep\nfrom cfme.utils.appliance.implementations.ui import navigate_to\nfrom cfme.utils.appliance.implementations.ui import navigator\nfrom cfme.utils.pretty import Pretty\nfrom cfme.utils.update import Updateable\nfrom cfme.utils.wait import wait_for\nfrom widgetastic_manageiq import MultiBoxSelect\n\n\nclass CatalogsMultiBoxSelect(MultiBoxSelect):\n move_into_button = Button(title=Parameter(\"@move_into\"))\n move_from_button = Button(title=Parameter(\"@move_from\"))\n\n\nclass CatalogForm(ServicesCatalogView):\n title = Text('#explorer_title_text')\n\n name = Input(name='name')\n description = Input(name=\"description\")\n assign_catalog_items = CatalogsMultiBoxSelect(\n move_into=\"Move Selected buttons right\",\n move_from=\"Move Selected buttons left\",\n available_items=\"available_fields\",\n chosen_items=\"selected_fields\"\n )\n\n save_button = Button('Save')\n cancel_button = Button('Cancel')\n\n\nclass CatalogsView(ServicesCatalogView):\n title = Text(\"#explorer_title_text\")\n\n @property\n def is_displayed(self):\n return (\n self.in_explorer and\n self.catalogs.is_opened and\n self.catalogs.tree.currently_selected == [\"All Catalogs\"])\n\n\nclass DetailsCatalogView(ServicesCatalogView):\n title = Text(\"#explorer_title_text\")\n\n @property\n def is_displayed(self):\n return (\n self.in_explorer and self.catalogs.is_opened and\n self.title.text == 'Catalog \"{}\"'.format(self.context[\"object\"].name)\n )\n\n\nclass AddCatalogView(CatalogForm):\n\n add_button = Button(\"Add\")\n\n @property\n def is_displayed(self):\n return (\n self.in_explorer and self.catalogs.is_opened and\n self.title.text == 'Adding a new Catalog'\n )\n\n\nclass EditCatalogView(CatalogForm):\n\n save_button = Button('Save')\n reset_button = Button('Reset')\n\n @property\n def is_displayed(self):\n return (\n self.in_explorer and self.catalogs.is_opened and\n self.title.text == 'Editing Catalog \"{}\"'.format(self.context[\"object\"].name)\n )\n\n\[email protected]\nclass Catalog(BaseEntity, Updateable, Pretty, Taggable):\n\n name = attr.ib()\n description = attr.ib()\n items = attr.ib(default=None)\n\n def update(self, updates):\n view = navigate_to(self, 'Edit')\n changed = view.fill(updates)\n if changed:\n view.save_button.click()\n else:\n view.cancel_button.click()\n view = self.create_view(DetailsCatalogView, override=updates, wait='10s')\n view.flash.assert_no_error()\n if changed:\n view.flash.assert_message(\n 'Catalog \"{}\" was saved'.format(updates.get('name', self.name)))\n else:\n view.flash.assert_message(\n 'Edit of Catalog \"{}\" was cancelled by the user'.format(self.name))\n\n def delete(self):\n view = navigate_to(self, \"Details\")\n view.configuration.item_select('Remove Catalog', handle_alert=True)\n view = self.create_view(CatalogsView, wait='10s')\n view.flash.assert_no_error()\n view.flash.assert_success_message(\n 'Catalog \"{}\": Delete successful'.format(self.description or self.name))\n\n @property\n def exists(self):\n try:\n navigate_to(self, 'Details')\n return True\n except (NameError, CandidateNotFound):\n return False\n\n\[email protected]\nclass CatalogCollection(BaseCollection):\n \"\"\"A collection for the :py:class:`cfme.services.catalogs.catalog.Catalog`\"\"\"\n ENTITY = Catalog\n\n def create(self, name, description, items=None):\n \"\"\"Create a catalog.\n\n Args:\n name: The name of the catalog\n description: The description of the catalog\n items: Items in the catalog\n \"\"\"\n view = navigate_to(self, 'Add')\n view.fill({\n 'name': name,\n 'description': description,\n 'assign_catalog_items': items\n })\n view.add_button.click()\n catalog = self.instantiate(name=name, description=description, items=items)\n view = self.create_view(CatalogsView)\n assert view.is_displayed\n view.flash.assert_no_error()\n return catalog\n\n\[email protected](CatalogCollection)\nclass All(CFMENavigateStep):\n VIEW = CatalogsView\n prerequisite = NavigateToAttribute('appliance.server', 'LoggedIn')\n\n def step(self, *args, **kwargs):\n self.prerequisite_view.navigation.select('Services', 'Catalogs')\n self.view.catalogs.tree.click_path(\"All Catalogs\")\n\n\[email protected](CatalogCollection)\nclass Add(CFMENavigateStep):\n VIEW = AddCatalogView\n prerequisite = NavigateToSibling('All')\n\n def step(self, *args, **kwargs):\n self.prerequisite_view.configuration.item_select('Add a New Catalog')\n\n\[email protected](Catalog)\nclass Details(CFMENavigateStep):\n VIEW = DetailsCatalogView\n prerequisite = NavigateToAttribute('parent', 'All')\n\n def step(self, *args, **kwargs):\n self.prerequisite_view.catalogs.tree.click_path(\"All Catalogs\", self.obj.name)\n\n\[email protected](Catalog)\nclass Edit(CFMENavigateStep):\n VIEW = EditCatalogView\n prerequisite = NavigateToSibling('Details')\n\n def step(self, *args, **kwargs):\n self.prerequisite_view.configuration.item_select('Edit this Item')\n", "path": "cfme/services/catalogs/catalog.py"}], "after_files": [{"content": "import attr\nfrom navmazing import NavigateToAttribute\nfrom navmazing import NavigateToSibling\nfrom widgetastic.utils import Parameter\nfrom widgetastic.widget import Text\nfrom widgetastic_patternfly import Button\nfrom widgetastic_patternfly import CandidateNotFound\nfrom widgetastic_patternfly import Input\n\nfrom . import ServicesCatalogView\nfrom cfme.common import Taggable\nfrom cfme.modeling.base import BaseCollection\nfrom cfme.modeling.base import BaseEntity\nfrom cfme.utils.appliance.implementations.ui import CFMENavigateStep\nfrom cfme.utils.appliance.implementations.ui import navigate_to\nfrom cfme.utils.appliance.implementations.ui import navigator\nfrom cfme.utils.pretty import Pretty\nfrom cfme.utils.update import Updateable\nfrom cfme.utils.wait import wait_for\nfrom widgetastic_manageiq import MultiBoxSelect\n\n\nclass CatalogsMultiBoxSelect(MultiBoxSelect):\n move_into_button = Button(title=Parameter(\"@move_into\"))\n move_from_button = Button(title=Parameter(\"@move_from\"))\n\n\nclass CatalogForm(ServicesCatalogView):\n title = Text('#explorer_title_text')\n\n name = Input(name='name')\n description = Input(name=\"description\")\n assign_catalog_items = CatalogsMultiBoxSelect(\n move_into=\"Move Selected buttons right\",\n move_from=\"Move Selected buttons left\",\n available_items=\"available_fields\",\n chosen_items=\"selected_fields\"\n )\n\n save_button = Button('Save')\n cancel_button = Button('Cancel')\n\n\nclass CatalogsView(ServicesCatalogView):\n title = Text(\"#explorer_title_text\")\n\n @property\n def is_displayed(self):\n return (\n self.in_explorer and\n self.catalogs.is_opened and\n self.title.text == \"All Catalogs\" and\n self.catalogs.tree.currently_selected == [\"All Catalogs\"])\n\n\nclass DetailsCatalogView(ServicesCatalogView):\n title = Text(\"#explorer_title_text\")\n\n @property\n def is_displayed(self):\n return (\n self.in_explorer and self.catalogs.is_opened and\n self.title.text == 'Catalog \"{}\"'.format(self.context[\"object\"].name)\n )\n\n\nclass AddCatalogView(CatalogForm):\n\n add_button = Button(\"Add\")\n\n @property\n def is_displayed(self):\n return (\n self.in_explorer and self.catalogs.is_opened and\n self.title.text == 'Adding a new Catalog'\n )\n\n\nclass EditCatalogView(CatalogForm):\n\n save_button = Button('Save')\n reset_button = Button('Reset')\n\n @property\n def is_displayed(self):\n return (\n self.in_explorer and self.catalogs.is_opened and\n self.title.text == 'Editing Catalog \"{}\"'.format(self.context[\"object\"].name)\n )\n\n\[email protected]\nclass Catalog(BaseEntity, Updateable, Pretty, Taggable):\n\n name = attr.ib()\n description = attr.ib()\n items = attr.ib(default=None)\n\n def update(self, updates):\n view = navigate_to(self, 'Edit')\n changed = view.fill(updates)\n if changed:\n view.save_button.click()\n else:\n view.cancel_button.click()\n view = self.create_view(DetailsCatalogView, override=updates, wait='10s')\n view.flash.assert_no_error()\n if changed:\n view.flash.assert_message(\n 'Catalog \"{}\" was saved'.format(updates.get('name', self.name)))\n else:\n view.flash.assert_message(\n 'Edit of Catalog \"{}\" was cancelled by the user'.format(self.name))\n\n def delete(self):\n view = navigate_to(self, \"Details\")\n view.configuration.item_select('Remove Catalog', handle_alert=True)\n view = self.create_view(CatalogsView, wait='10s')\n view.flash.assert_no_error()\n view.flash.assert_success_message(\n 'Catalog \"{}\": Delete successful'.format(self.description or self.name))\n\n @property\n def exists(self):\n try:\n navigate_to(self, 'Details')\n return True\n except (NameError, CandidateNotFound):\n return False\n\n\[email protected]\nclass CatalogCollection(BaseCollection):\n \"\"\"A collection for the :py:class:`cfme.services.catalogs.catalog.Catalog`\"\"\"\n ENTITY = Catalog\n\n def create(self, name, description, items=None):\n \"\"\"Create a catalog.\n\n Args:\n name: The name of the catalog\n description: The description of the catalog\n items: Items in the catalog\n \"\"\"\n view = navigate_to(self, 'Add')\n view.fill({\n 'name': name,\n 'description': description,\n 'assign_catalog_items': items\n })\n view.add_button.click()\n catalog = self.instantiate(name=name, description=description, items=items)\n view = self.create_view(CatalogsView)\n assert view.is_displayed\n view.flash.assert_no_error()\n return catalog\n\n\[email protected](CatalogCollection)\nclass All(CFMENavigateStep):\n VIEW = CatalogsView\n prerequisite = NavigateToAttribute('appliance.server', 'LoggedIn')\n\n def step(self, *args, **kwargs):\n self.prerequisite_view.navigation.select('Services', 'Catalogs')\n self.view.catalogs.tree.click_path(\"All Catalogs\")\n\n\[email protected](CatalogCollection)\nclass Add(CFMENavigateStep):\n VIEW = AddCatalogView\n prerequisite = NavigateToSibling('All')\n\n def step(self, *args, **kwargs):\n self.prerequisite_view.configuration.item_select('Add a New Catalog')\n\n\[email protected](Catalog)\nclass Details(CFMENavigateStep):\n VIEW = DetailsCatalogView\n prerequisite = NavigateToAttribute('parent', 'All')\n\n def step(self, *args, **kwargs):\n self.prerequisite_view.catalogs.tree.click_path(\"All Catalogs\", self.obj.name)\n\n\[email protected](Catalog)\nclass Edit(CFMENavigateStep):\n VIEW = EditCatalogView\n prerequisite = NavigateToSibling('Details')\n\n def step(self, *args, **kwargs):\n self.prerequisite_view.configuration.item_select('Edit this Item')\n", "path": "cfme/services/catalogs/catalog.py"}]} | 2,067 | 102 |
gh_patches_debug_25072 | rasdani/github-patches | git_diff | mindsdb__mindsdb-1010 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Removing predictor through HTTP returns 500
If mindsdb config have connection to DB with `publish: True`, but this DB cant be reached, then if removing predictor in Scout will return 'error 500'. That because mindsdb try to remove table related to predictor from database. That error is not critical. Probably, we need return code 200 with 'warnings' in this case, or only code 200.
Request:
```
Request URL: http://localhost:5000/api/predictors/home_days_on_market
Request Method: DELETE
Status Code: 500 INTERNAL SERVER ERROR
response
{"message": "ProgrammingError: 1698 (28000): Access denied for user 'roote'@'localhost'"}
```
error:
```
[2020-12-14 10:40:17,942] ERROR in app: Exception on /api/predictors/home_initial [DELETE]
Traceback (most recent call last):
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/mysql/connector/connection_cext.py", line 233, in _open_connection
self._cmysql.connect(**cnx_kwargs)
_mysql_connector.MySQLInterfaceError: Access denied for user 'roote'@'localhost'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/flask/app.py", line 1950, in full_dispatch_request
rv = self.dispatch_request()
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/flask/app.py", line 1936, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/flask_restx/api.py", line 375, in wrapper
resp = resource(*args, **kwargs)
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/flask/views.py", line 89, in view
return self.dispatch_request(*args, **kwargs)
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/flask_restx/resource.py", line 44, in dispatch_request
resp = meth(*args, **kwargs)
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/mindsdb/mindsdb/api/http/namespaces/predictor.py", line 116, in delete
ca.mindsdb_native.delete_model(name)
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/mindsdb/mindsdb/interfaces/native/mindsdb.py", line 97, in delete_model
self.dbw.unregister_predictor(name)
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/mindsdb/mindsdb/interfaces/database/database.py", line 59, in unregister_predictor
integration.unregister_predictor(name)
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/mindsdb/mindsdb/integrations/mariadb/mariadb.py", line 131, in unregister_predictor
self._query(q)
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/mindsdb/mindsdb/integrations/mariadb/mariadb.py", line 42, in _query
con = mysql.connector.connect(
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/mysql/connector/__init__.py", line 270, in connect
return CMySQLConnection(*args, **kwargs)
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/mysql/connector/connection_cext.py", line 86, in __init__
self.connect(**kwargs)
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/mysql/connector/abstracts.py", line 985, in connect
self._open_connection()
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/mysql/connector/connection_cext.py", line 235, in _open_connection
raise errors.get_mysql_exception(msg=exc.msg, errno=exc.errno,
mysql.connector.errors.ProgrammingError: 1698 (28000): Access denied for user 'roote'@'localhost'
ERROR:mindsdb.api.http.initialize:Exception on /api/predictors/home_initial [DELETE]
Traceback (most recent call last):
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/mysql/connector/connection_cext.py", line 233, in _open_connection
self._cmysql.connect(**cnx_kwargs)
_mysql_connector.MySQLInterfaceError: Access denied for user 'roote'@'localhost'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/flask/app.py", line 1950, in full_dispatch_request
rv = self.dispatch_request()
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/flask/app.py", line 1936, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/flask_restx/api.py", line 375, in wrapper
resp = resource(*args, **kwargs)
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/flask/views.py", line 89, in view
return self.dispatch_request(*args, **kwargs)
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/flask_restx/resource.py", line 44, in dispatch_request
resp = meth(*args, **kwargs)
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/mindsdb/mindsdb/api/http/namespaces/predictor.py", line 116, in delete
ca.mindsdb_native.delete_model(name)
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/mindsdb/mindsdb/interfaces/native/mindsdb.py", line 97, in delete_model
self.dbw.unregister_predictor(name)
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/mindsdb/mindsdb/interfaces/database/database.py", line 59, in unregister_predictor
integration.unregister_predictor(name)
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/mindsdb/mindsdb/integrations/mariadb/mariadb.py", line 131, in unregister_predictor
self._query(q)
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/mindsdb/mindsdb/integrations/mariadb/mariadb.py", line 42, in _query
con = mysql.connector.connect(
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/mysql/connector/__init__.py", line 270, in connect
return CMySQLConnection(*args, **kwargs)
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/mysql/connector/connection_cext.py", line 86, in __init__
self.connect(**kwargs)
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/mysql/connector/abstracts.py", line 985, in connect
self._open_connection()
File "/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/mysql/connector/connection_cext.py", line 235, in _open_connection
raise errors.get_mysql_exception(msg=exc.msg, errno=exc.errno,
mysql.connector.errors.ProgrammingError: 1698 (28000): Access denied for user 'roote'@'localhost'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mindsdb/interfaces/database/database.py`
Content:
```
1 from mindsdb.integrations.clickhouse.clickhouse import Clickhouse
2 from mindsdb.integrations.postgres.postgres import PostgreSQL
3 from mindsdb.integrations.mariadb.mariadb import Mariadb
4 from mindsdb.integrations.mysql.mysql import MySQL
5 from mindsdb.integrations.mssql.mssql import MSSQL
6
7
8 class DatabaseWrapper():
9
10 def __init__(self, config):
11 self.config = config
12 self._get_integrations()
13
14 def _setup_integration(self, integration):
15 success = False
16 try:
17 integration.setup()
18 success = True
19 except Exception as e:
20 print('Failed to integrate with database ' + integration.name + f', error: {e}')
21 return success
22
23 def _get_integrations(self):
24 # @TODO Once we have a presistent state sorted out this should be simplified as to not refresh the existing integrations every single time
25 integration_arr = []
26 for db_alias in self.config['integrations']:
27 if self.config['integrations'][db_alias]['publish']:
28 db_type = self.config['integrations'][db_alias]['type']
29 if db_type == 'clickhouse':
30 integration_arr.append(Clickhouse(self.config, db_alias))
31 elif db_type == 'mariadb':
32 integration_arr.append(Mariadb(self.config, db_alias))
33 elif db_type == 'mysql':
34 integration_arr.append(MySQL(self.config, db_alias))
35 elif db_type == 'postgres':
36 integration_arr.append(PostgreSQL(self.config, db_alias))
37 elif db_type == 'mssql':
38 integration_arr.append(MSSQL(self.config, db_alias))
39 elif db_type == 'mongodb':
40 pass
41 else:
42 print(f'Uknown integration type: {db_type} for database called: {db_alias}')
43
44 return integration_arr
45
46 def register_predictors(self, model_data_arr, setup=True):
47 it = self._get_integrations()
48 for integration in it:
49 register = True
50 if setup:
51 register = self._setup_integration(integration)
52 if register:
53 integration.register_predictors(model_data_arr)
54
55 integration = [integration]
56
57 def unregister_predictor(self, name):
58 for integration in self._get_integrations():
59 integration.unregister_predictor(name)
60
61 def check_connections(self):
62 connections = {}
63 for integration in self._get_integrations():
64 connections[integration.name] = integration.check_connection()
65
66 return connections
67
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mindsdb/interfaces/database/database.py b/mindsdb/interfaces/database/database.py
--- a/mindsdb/interfaces/database/database.py
+++ b/mindsdb/interfaces/database/database.py
@@ -4,6 +4,8 @@
from mindsdb.integrations.mysql.mysql import MySQL
from mindsdb.integrations.mssql.mssql import MSSQL
+from mindsdb.utilities.log import log as logger
+
class DatabaseWrapper():
@@ -50,13 +52,19 @@
if setup:
register = self._setup_integration(integration)
if register:
- integration.register_predictors(model_data_arr)
+ if integration.check_connection():
+ integration.register_predictors(model_data_arr)
+ else:
+ logger.warning(f"There is no connection to {integration.name}. predictor wouldn't be registred.")
integration = [integration]
def unregister_predictor(self, name):
for integration in self._get_integrations():
- integration.unregister_predictor(name)
+ if integration.check_connection():
+ integration.unregister_predictor(name)
+ else:
+ logger.warning(f"There is no connection to {integration.name}. predictor wouldn't be unregistred")
def check_connections(self):
connections = {}
| {"golden_diff": "diff --git a/mindsdb/interfaces/database/database.py b/mindsdb/interfaces/database/database.py\n--- a/mindsdb/interfaces/database/database.py\n+++ b/mindsdb/interfaces/database/database.py\n@@ -4,6 +4,8 @@\n from mindsdb.integrations.mysql.mysql import MySQL\n from mindsdb.integrations.mssql.mssql import MSSQL\n \n+from mindsdb.utilities.log import log as logger\n+\n \n class DatabaseWrapper():\n \n@@ -50,13 +52,19 @@\n if setup:\n register = self._setup_integration(integration)\n if register:\n- integration.register_predictors(model_data_arr)\n+ if integration.check_connection():\n+ integration.register_predictors(model_data_arr)\n+ else:\n+ logger.warning(f\"There is no connection to {integration.name}. predictor wouldn't be registred.\")\n \n integration = [integration]\n \n def unregister_predictor(self, name):\n for integration in self._get_integrations():\n- integration.unregister_predictor(name)\n+ if integration.check_connection():\n+ integration.unregister_predictor(name)\n+ else:\n+ logger.warning(f\"There is no connection to {integration.name}. predictor wouldn't be unregistred\")\n \n def check_connections(self):\n connections = {}\n", "issue": "Removing predictor through HTTP returns 500 \nIf mindsdb config have connection to DB with `publish: True`, but this DB cant be reached, then if removing predictor in Scout will return 'error 500'. That because mindsdb try to remove table related to predictor from database. That error is not critical. Probably, we need return code 200 with 'warnings' in this case, or only code 200.\r\nRequest:\r\n```\r\nRequest URL: http://localhost:5000/api/predictors/home_days_on_market\r\nRequest Method: DELETE\r\nStatus Code: 500 INTERNAL SERVER ERROR\r\nresponse\r\n{\"message\": \"ProgrammingError: 1698 (28000): Access denied for user 'roote'@'localhost'\"}\r\n```\r\nerror:\r\n```\r\n[2020-12-14 10:40:17,942] ERROR in app: Exception on /api/predictors/home_initial [DELETE]\r\nTraceback (most recent call last):\r\n File \"/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/mysql/connector/connection_cext.py\", line 233, in _open_connection\r\n self._cmysql.connect(**cnx_kwargs)\r\n_mysql_connector.MySQLInterfaceError: Access denied for user 'roote'@'localhost'\r\nDuring handling of the above exception, another exception occurred:\r\nTraceback (most recent call last):\r\n File \"/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/flask/app.py\", line 1950, in full_dispatch_request\r\n rv = self.dispatch_request()\r\n File \"/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/flask/app.py\", line 1936, in dispatch_request\r\n return self.view_functions[rule.endpoint](**req.view_args)\r\n File \"/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/flask_restx/api.py\", line 375, in wrapper\r\n resp = resource(*args, **kwargs)\r\n File \"/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/flask/views.py\", line 89, in view\r\n return self.dispatch_request(*args, **kwargs)\r\n File \"/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/flask_restx/resource.py\", line 44, in dispatch_request\r\n resp = meth(*args, **kwargs)\r\n File \"/Users/alejo/Documents/mindsdb/venv-mindsdb/mindsdb/mindsdb/api/http/namespaces/predictor.py\", line 116, in delete\r\n ca.mindsdb_native.delete_model(name)\r\n File \"/Users/alejo/Documents/mindsdb/venv-mindsdb/mindsdb/mindsdb/interfaces/native/mindsdb.py\", line 97, in delete_model\r\n self.dbw.unregister_predictor(name)\r\n File \"/Users/alejo/Documents/mindsdb/venv-mindsdb/mindsdb/mindsdb/interfaces/database/database.py\", line 59, in unregister_predictor\r\n integration.unregister_predictor(name)\r\n File \"/Users/alejo/Documents/mindsdb/venv-mindsdb/mindsdb/mindsdb/integrations/mariadb/mariadb.py\", line 131, in unregister_predictor\r\n self._query(q)\r\n File \"/Users/alejo/Documents/mindsdb/venv-mindsdb/mindsdb/mindsdb/integrations/mariadb/mariadb.py\", line 42, in _query\r\n con = mysql.connector.connect(\r\n File \"/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/mysql/connector/__init__.py\", line 270, in connect\r\n return CMySQLConnection(*args, **kwargs)\r\n File \"/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/mysql/connector/connection_cext.py\", line 86, in __init__\r\n self.connect(**kwargs)\r\n File \"/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/mysql/connector/abstracts.py\", line 985, in connect\r\n self._open_connection()\r\n File \"/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/mysql/connector/connection_cext.py\", line 235, in _open_connection\r\n raise errors.get_mysql_exception(msg=exc.msg, errno=exc.errno,\r\nmysql.connector.errors.ProgrammingError: 1698 (28000): Access denied for user 'roote'@'localhost'\r\nERROR:mindsdb.api.http.initialize:Exception on /api/predictors/home_initial [DELETE]\r\nTraceback (most recent call last):\r\n File \"/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/mysql/connector/connection_cext.py\", line 233, in _open_connection\r\n self._cmysql.connect(**cnx_kwargs)\r\n_mysql_connector.MySQLInterfaceError: Access denied for user 'roote'@'localhost'\r\nDuring handling of the above exception, another exception occurred:\r\nTraceback (most recent call last):\r\n File \"/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/flask/app.py\", line 1950, in full_dispatch_request\r\n rv = self.dispatch_request()\r\n File \"/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/flask/app.py\", line 1936, in dispatch_request\r\n return self.view_functions[rule.endpoint](**req.view_args)\r\n File \"/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/flask_restx/api.py\", line 375, in wrapper\r\n resp = resource(*args, **kwargs)\r\n File \"/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/flask/views.py\", line 89, in view\r\n return self.dispatch_request(*args, **kwargs)\r\n File \"/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/flask_restx/resource.py\", line 44, in dispatch_request\r\n resp = meth(*args, **kwargs)\r\n File \"/Users/alejo/Documents/mindsdb/venv-mindsdb/mindsdb/mindsdb/api/http/namespaces/predictor.py\", line 116, in delete\r\n ca.mindsdb_native.delete_model(name)\r\n File \"/Users/alejo/Documents/mindsdb/venv-mindsdb/mindsdb/mindsdb/interfaces/native/mindsdb.py\", line 97, in delete_model\r\n self.dbw.unregister_predictor(name)\r\n File \"/Users/alejo/Documents/mindsdb/venv-mindsdb/mindsdb/mindsdb/interfaces/database/database.py\", line 59, in unregister_predictor\r\n integration.unregister_predictor(name)\r\n File \"/Users/alejo/Documents/mindsdb/venv-mindsdb/mindsdb/mindsdb/integrations/mariadb/mariadb.py\", line 131, in unregister_predictor\r\n self._query(q)\r\n File \"/Users/alejo/Documents/mindsdb/venv-mindsdb/mindsdb/mindsdb/integrations/mariadb/mariadb.py\", line 42, in _query\r\n con = mysql.connector.connect(\r\n File \"/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/mysql/connector/__init__.py\", line 270, in connect\r\n return CMySQLConnection(*args, **kwargs)\r\n File \"/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/mysql/connector/connection_cext.py\", line 86, in __init__\r\n self.connect(**kwargs)\r\n File \"/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/mysql/connector/abstracts.py\", line 985, in connect\r\n self._open_connection()\r\n File \"/Users/alejo/Documents/mindsdb/venv-mindsdb/lib/python3.8/site-packages/mysql/connector/connection_cext.py\", line 235, in _open_connection\r\n raise errors.get_mysql_exception(msg=exc.msg, errno=exc.errno,\r\nmysql.connector.errors.ProgrammingError: 1698 (28000): Access denied for user 'roote'@'localhost'\r\n```\n", "before_files": [{"content": "from mindsdb.integrations.clickhouse.clickhouse import Clickhouse\nfrom mindsdb.integrations.postgres.postgres import PostgreSQL\nfrom mindsdb.integrations.mariadb.mariadb import Mariadb\nfrom mindsdb.integrations.mysql.mysql import MySQL\nfrom mindsdb.integrations.mssql.mssql import MSSQL\n\n\nclass DatabaseWrapper():\n\n def __init__(self, config):\n self.config = config\n self._get_integrations()\n\n def _setup_integration(self, integration):\n success = False\n try:\n integration.setup()\n success = True\n except Exception as e:\n print('Failed to integrate with database ' + integration.name + f', error: {e}')\n return success\n\n def _get_integrations(self):\n # @TODO Once we have a presistent state sorted out this should be simplified as to not refresh the existing integrations every single time\n integration_arr = []\n for db_alias in self.config['integrations']:\n if self.config['integrations'][db_alias]['publish']:\n db_type = self.config['integrations'][db_alias]['type']\n if db_type == 'clickhouse':\n integration_arr.append(Clickhouse(self.config, db_alias))\n elif db_type == 'mariadb':\n integration_arr.append(Mariadb(self.config, db_alias))\n elif db_type == 'mysql':\n integration_arr.append(MySQL(self.config, db_alias))\n elif db_type == 'postgres':\n integration_arr.append(PostgreSQL(self.config, db_alias))\n elif db_type == 'mssql':\n integration_arr.append(MSSQL(self.config, db_alias))\n elif db_type == 'mongodb':\n pass\n else:\n print(f'Uknown integration type: {db_type} for database called: {db_alias}')\n\n return integration_arr\n\n def register_predictors(self, model_data_arr, setup=True):\n it = self._get_integrations()\n for integration in it:\n register = True\n if setup:\n register = self._setup_integration(integration)\n if register:\n integration.register_predictors(model_data_arr)\n\n integration = [integration]\n\n def unregister_predictor(self, name):\n for integration in self._get_integrations():\n integration.unregister_predictor(name)\n\n def check_connections(self):\n connections = {}\n for integration in self._get_integrations():\n connections[integration.name] = integration.check_connection()\n\n return connections\n", "path": "mindsdb/interfaces/database/database.py"}], "after_files": [{"content": "from mindsdb.integrations.clickhouse.clickhouse import Clickhouse\nfrom mindsdb.integrations.postgres.postgres import PostgreSQL\nfrom mindsdb.integrations.mariadb.mariadb import Mariadb\nfrom mindsdb.integrations.mysql.mysql import MySQL\nfrom mindsdb.integrations.mssql.mssql import MSSQL\n\nfrom mindsdb.utilities.log import log as logger\n\n\nclass DatabaseWrapper():\n\n def __init__(self, config):\n self.config = config\n self._get_integrations()\n\n def _setup_integration(self, integration):\n success = False\n try:\n integration.setup()\n success = True\n except Exception as e:\n print('Failed to integrate with database ' + integration.name + f', error: {e}')\n return success\n\n def _get_integrations(self):\n # @TODO Once we have a presistent state sorted out this should be simplified as to not refresh the existing integrations every single time\n integration_arr = []\n for db_alias in self.config['integrations']:\n if self.config['integrations'][db_alias]['publish']:\n db_type = self.config['integrations'][db_alias]['type']\n if db_type == 'clickhouse':\n integration_arr.append(Clickhouse(self.config, db_alias))\n elif db_type == 'mariadb':\n integration_arr.append(Mariadb(self.config, db_alias))\n elif db_type == 'mysql':\n integration_arr.append(MySQL(self.config, db_alias))\n elif db_type == 'postgres':\n integration_arr.append(PostgreSQL(self.config, db_alias))\n elif db_type == 'mssql':\n integration_arr.append(MSSQL(self.config, db_alias))\n elif db_type == 'mongodb':\n pass\n else:\n print(f'Uknown integration type: {db_type} for database called: {db_alias}')\n\n return integration_arr\n\n def register_predictors(self, model_data_arr, setup=True):\n it = self._get_integrations()\n for integration in it:\n register = True\n if setup:\n register = self._setup_integration(integration)\n if register:\n if integration.check_connection():\n integration.register_predictors(model_data_arr)\n else:\n logger.warning(f\"There is no connection to {integration.name}. predictor wouldn't be registred.\")\n\n integration = [integration]\n\n def unregister_predictor(self, name):\n for integration in self._get_integrations():\n if integration.check_connection():\n integration.unregister_predictor(name)\n else:\n logger.warning(f\"There is no connection to {integration.name}. predictor wouldn't be unregistred\")\n\n def check_connections(self):\n connections = {}\n for integration in self._get_integrations():\n connections[integration.name] = integration.check_connection()\n\n return connections\n", "path": "mindsdb/interfaces/database/database.py"}]} | 2,809 | 265 |
gh_patches_debug_29922 | rasdani/github-patches | git_diff | rasterio__rasterio-1058 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Return None when a dataset has an undefined CRS
Instead of `CRS()`. Returning `CRS()` means that application-level concerns about what an undefined CRS means become Rasterio's concerns. `None` is what we have for undefined things in Python, and using it will help us tackle the concerns at the proper level.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `rasterio/rio/edit_info.py`
Content:
```
1 """Fetch and edit raster dataset metadata from the command line."""
2
3
4 import json
5 import warnings
6
7 import click
8
9 import rasterio
10 import rasterio.crs
11 from rasterio.crs import CRS
12 from rasterio.errors import CRSError
13 from rasterio.rio import options
14 from rasterio.transform import guard_transform
15
16
17 # Handlers for info module options.
18
19 def all_handler(ctx, param, value):
20 """Get tags from a template file or command line."""
21 if ctx.obj and ctx.obj.get('like') and value is not None:
22 ctx.obj['all_like'] = value
23 value = ctx.obj.get('like')
24 return value
25
26
27 def crs_handler(ctx, param, value):
28 """Get crs value from a template file or command line."""
29 retval = options.from_like_context(ctx, param, value)
30 if retval is None and value:
31 try:
32 retval = json.loads(value)
33 except ValueError:
34 retval = value
35 try:
36 if isinstance(retval, dict):
37 retval = CRS(retval)
38 else:
39 retval = CRS.from_string(retval)
40 except CRSError:
41 raise click.BadParameter(
42 "'%s' is not a recognized CRS." % retval,
43 param=param, param_hint='crs')
44 return retval
45
46
47 def tags_handler(ctx, param, value):
48 """Get tags from a template file or command line."""
49 retval = options.from_like_context(ctx, param, value)
50 if retval is None and value:
51 try:
52 retval = dict(p.split('=') for p in value)
53 except:
54 raise click.BadParameter(
55 "'%s' contains a malformed tag." % value,
56 param=param, param_hint='transform')
57 return retval
58
59
60 def transform_handler(ctx, param, value):
61 """Get transform value from a template file or command line."""
62 retval = options.from_like_context(ctx, param, value)
63 if retval is None and value:
64 try:
65 value = json.loads(value)
66 except ValueError:
67 pass
68 try:
69 retval = guard_transform(value)
70 except:
71 raise click.BadParameter(
72 "'%s' is not recognized as an Affine array." % value,
73 param=param, param_hint='transform')
74 return retval
75
76
77 @click.command('edit-info', short_help="Edit dataset metadata.")
78 @options.file_in_arg
79 @options.bidx_opt
80 @options.edit_nodata_opt
81 @click.option('--unset-nodata', default=False, is_flag=True,
82 help="Unset the dataset's nodata value.")
83 @click.option('--crs', callback=crs_handler, default=None,
84 help="New coordinate reference system")
85 @click.option('--unset-crs', default=False, is_flag=True,
86 help="Unset the dataset's CRS value.")
87 @click.option('--transform', callback=transform_handler,
88 help="New affine transform matrix")
89 @click.option('--units', help="Edit units of a band (requires --bidx)")
90 @click.option('--description',
91 help="Edit description of a band (requires --bidx)")
92 @click.option('--tag', 'tags', callback=tags_handler, multiple=True,
93 metavar='KEY=VAL', help="New tag.")
94 @click.option('--all', 'allmd', callback=all_handler, flag_value='like',
95 is_eager=True, default=False,
96 help="Copy all metadata items from the template file.")
97 @options.like_opt
98 @click.pass_context
99 def edit(ctx, input, bidx, nodata, unset_nodata, crs, unset_crs, transform,
100 units, description, tags, allmd, like):
101 """Edit a dataset's metadata: coordinate reference system, affine
102 transformation matrix, nodata value, and tags.
103
104 The coordinate reference system may be either a PROJ.4 or EPSG:nnnn
105 string,
106
107 --crs 'EPSG:4326'
108
109 or a JSON text-encoded PROJ.4 object.
110
111 --crs '{"proj": "utm", "zone": 18, ...}'
112
113 Transforms are JSON-encoded Affine objects like:
114
115 --transform '[300.038, 0.0, 101985.0, 0.0, -300.042, 2826915.0]'
116
117 Prior to Rasterio 1.0 GDAL geotransforms were supported for --transform,
118 but are no longer supported.
119
120 Metadata items may also be read from an existing dataset using a
121 combination of the --like option with at least one of --all,
122 `--crs like`, `--nodata like`, and `--transform like`.
123
124 rio edit-info example.tif --like template.tif --all
125
126 To get just the transform from the template:
127
128 rio edit-info example.tif --like template.tif --transform like
129
130 """
131 import numpy as np
132
133 def in_dtype_range(value, dtype):
134 infos = {'c': np.finfo, 'f': np.finfo, 'i': np.iinfo,
135 'u': np.iinfo}
136 rng = infos[np.dtype(dtype).kind](dtype)
137 return rng.min <= value <= rng.max
138
139 with ctx.obj['env'], rasterio.open(input, 'r+') as dst:
140
141 if allmd:
142 nodata = allmd['nodata']
143 crs = allmd['crs']
144 transform = allmd['transform']
145 tags = allmd['tags']
146
147 if unset_nodata and nodata is not options.IgnoreOption:
148 raise click.BadParameter(
149 "--unset-nodata and --nodata cannot be used together.")
150
151 if unset_crs and crs:
152 raise click.BadParameter(
153 "--unset-crs and --crs cannot be used together.")
154
155 if unset_nodata:
156 # Setting nodata to None will raise NotImplementedError
157 # if GDALDeleteRasterNoDataValue() isn't present in the
158 # GDAL library.
159 try:
160 dst.nodata = None
161 except NotImplementedError as exc: # pragma: no cover
162 raise click.ClickException(str(exc))
163
164 elif nodata is not options.IgnoreOption:
165 dtype = dst.dtypes[0]
166 if nodata is not None and not in_dtype_range(nodata, dtype):
167 raise click.BadParameter(
168 "outside the range of the file's "
169 "data type (%s)." % dtype,
170 param=nodata, param_hint='nodata')
171 dst.nodata = nodata
172
173 if unset_crs:
174 dst.crs = CRS()
175 elif crs:
176 dst.crs = crs
177
178 if transform:
179 dst.transform = transform
180
181 if tags:
182 dst.update_tags(**tags)
183
184 if units:
185 dst.set_units(bidx, units)
186
187 if description:
188 dst.set_description(bidx, description)
189
190 # Post check - ensure that crs was unset properly
191 if unset_crs:
192 with ctx.obj['env'], rasterio.open(input, 'r') as src:
193 if dict(src.crs) != {}:
194 warnings.warn(
195 'CRS was not unset. Availability of his functionality '
196 'differs depending on GDAL version and driver')
197
```
Path: `rasterio/rio/info.py`
Content:
```
1 """Command access to dataset metadata, stats, and more."""
2
3
4 import json
5
6 import click
7
8 import rasterio
9 import rasterio.crs
10 from rasterio.rio import options
11
12
13 @click.command(short_help="Print information about a data file.")
14 @options.file_in_arg
15 @click.option('--meta', 'aspect', flag_value='meta', default=True,
16 help="Show data file structure (default).")
17 @click.option('--tags', 'aspect', flag_value='tags',
18 help="Show data file tags.")
19 @click.option('--namespace', help="Select a tag namespace.")
20 @click.option('--indent', default=None, type=int,
21 help="Indentation level for pretty printed output")
22 # Options to pick out a single metadata item and print it as
23 # a string.
24 @click.option('--count', 'meta_member', flag_value='count',
25 help="Print the count of bands.")
26 @click.option('-t', '--dtype', 'meta_member', flag_value='dtype',
27 help="Print the dtype name.")
28 @click.option('--nodata', 'meta_member', flag_value='nodata',
29 help="Print the nodata value.")
30 @click.option('-f', '--format', '--driver', 'meta_member', flag_value='driver',
31 help="Print the format driver.")
32 @click.option('--shape', 'meta_member', flag_value='shape',
33 help="Print the (height, width) shape.")
34 @click.option('--height', 'meta_member', flag_value='height',
35 help="Print the height (number of rows).")
36 @click.option('--width', 'meta_member', flag_value='width',
37 help="Print the width (number of columns).")
38 @click.option('--crs', 'meta_member', flag_value='crs',
39 help="Print the CRS as a PROJ.4 string.")
40 @click.option('--bounds', 'meta_member', flag_value='bounds',
41 help="Print the boundary coordinates "
42 "(left, bottom, right, top).")
43 @click.option('-r', '--res', 'meta_member', flag_value='res',
44 help="Print pixel width and height.")
45 @click.option('--lnglat', 'meta_member', flag_value='lnglat',
46 help="Print longitude and latitude at center.")
47 @click.option('--stats', 'meta_member', flag_value='stats',
48 help="Print statistics (min, max, mean) of a single band "
49 "(use --bidx).")
50 @click.option('--checksum', 'meta_member', flag_value='checksum',
51 help="Print integer checksum of a single band "
52 "(use --bidx).")
53 @click.option('-v', '--tell-me-more', '--verbose', is_flag=True,
54 help="Output extra information.")
55 @options.bidx_opt
56 @options.masked_opt
57 @click.pass_context
58 def info(ctx, input, aspect, indent, namespace, meta_member, verbose, bidx,
59 masked):
60 """Print metadata about the dataset as JSON.
61
62 Optionally print a single metadata item as a string.
63 """
64 try:
65 with ctx.obj['env'], rasterio.open(input) as src:
66
67 info = dict(src.profile)
68 info['shape'] = (info['height'], info['width'])
69 info['bounds'] = src.bounds
70 proj4 = src.crs.to_string()
71 if proj4.startswith('+init=epsg'):
72 proj4 = proj4.split('=')[1].upper()
73 info['crs'] = proj4
74 info['res'] = src.res
75 info['colorinterp'] = [src.colorinterp(i).name
76 for i in src.indexes]
77 info['units'] = [units or None for units in src.units]
78 info['descriptions'] = src.descriptions
79 info['indexes'] = src.indexes
80 info['mask_flags'] = [[
81 flag.name for flag in flags] for flags in src.mask_flag_enums]
82
83 if proj4 != '':
84 info['lnglat'] = src.lnglat()
85
86 if verbose:
87 stats = [{'min': float(b.min()),
88 'max': float(b.max()),
89 'mean': float(b.mean())
90 } for b in src.read(masked=masked)]
91 info['stats'] = stats
92
93 info['checksum'] = [src.checksum(i) for i in src.indexes]
94
95 gcps, crs = src.gcps
96 proj4 = crs.to_string()
97 if proj4.startswith('+init=epsg'):
98 proj4 = proj4.split('=')[1].upper()
99 if gcps:
100 info['gcps'] = {
101 'crs': proj4, 'points': [p.asdict() for p in gcps]}
102
103 if aspect == 'meta':
104 if meta_member == 'stats':
105 band = src.read(bidx, masked=masked)
106 click.echo('%f %f %f' % (
107 float(band.min()),
108 float(band.max()),
109 float(band.mean())))
110 elif meta_member == 'checksum':
111 click.echo(str(src.checksum(bidx)))
112 elif meta_member:
113 if isinstance(info[meta_member], (list, tuple)):
114 click.echo(" ".join(map(str, info[meta_member])))
115 else:
116 click.echo(info[meta_member])
117 else:
118 click.echo(json.dumps(info, sort_keys=True, indent=indent))
119
120 elif aspect == 'tags':
121 click.echo(
122 json.dumps(src.tags(ns=namespace), indent=indent))
123 except Exception:
124 raise click.Abort()
125
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/rasterio/rio/edit_info.py b/rasterio/rio/edit_info.py
--- a/rasterio/rio/edit_info.py
+++ b/rasterio/rio/edit_info.py
@@ -171,7 +171,7 @@
dst.nodata = nodata
if unset_crs:
- dst.crs = CRS()
+ dst.crs = None # CRS()
elif crs:
dst.crs = crs
@@ -190,7 +190,7 @@
# Post check - ensure that crs was unset properly
if unset_crs:
with ctx.obj['env'], rasterio.open(input, 'r') as src:
- if dict(src.crs) != {}:
+ if src.crs:
warnings.warn(
'CRS was not unset. Availability of his functionality '
'differs depending on GDAL version and driver')
diff --git a/rasterio/rio/info.py b/rasterio/rio/info.py
--- a/rasterio/rio/info.py
+++ b/rasterio/rio/info.py
@@ -67,10 +67,15 @@
info = dict(src.profile)
info['shape'] = (info['height'], info['width'])
info['bounds'] = src.bounds
- proj4 = src.crs.to_string()
- if proj4.startswith('+init=epsg'):
- proj4 = proj4.split('=')[1].upper()
- info['crs'] = proj4
+
+ if src.crs:
+ proj4 = src.crs.to_string()
+ if proj4.startswith('+init=epsg'):
+ proj4 = proj4.split('=')[1].upper()
+ info['crs'] = proj4
+ else:
+ info['crs'] = None
+
info['res'] = src.res
info['colorinterp'] = [src.colorinterp(i).name
for i in src.indexes]
| {"golden_diff": "diff --git a/rasterio/rio/edit_info.py b/rasterio/rio/edit_info.py\n--- a/rasterio/rio/edit_info.py\n+++ b/rasterio/rio/edit_info.py\n@@ -171,7 +171,7 @@\n dst.nodata = nodata\n \n if unset_crs:\n- dst.crs = CRS()\n+ dst.crs = None # CRS()\n elif crs:\n dst.crs = crs\n \n@@ -190,7 +190,7 @@\n # Post check - ensure that crs was unset properly\n if unset_crs:\n with ctx.obj['env'], rasterio.open(input, 'r') as src:\n- if dict(src.crs) != {}:\n+ if src.crs:\n warnings.warn(\n 'CRS was not unset. Availability of his functionality '\n 'differs depending on GDAL version and driver')\ndiff --git a/rasterio/rio/info.py b/rasterio/rio/info.py\n--- a/rasterio/rio/info.py\n+++ b/rasterio/rio/info.py\n@@ -67,10 +67,15 @@\n info = dict(src.profile)\n info['shape'] = (info['height'], info['width'])\n info['bounds'] = src.bounds\n- proj4 = src.crs.to_string()\n- if proj4.startswith('+init=epsg'):\n- proj4 = proj4.split('=')[1].upper()\n- info['crs'] = proj4\n+\n+ if src.crs:\n+ proj4 = src.crs.to_string()\n+ if proj4.startswith('+init=epsg'):\n+ proj4 = proj4.split('=')[1].upper()\n+ info['crs'] = proj4\n+ else:\n+ info['crs'] = None\n+\n info['res'] = src.res\n info['colorinterp'] = [src.colorinterp(i).name\n for i in src.indexes]\n", "issue": "Return None when a dataset has an undefined CRS\nInstead of `CRS()`. Returning `CRS()` means that application-level concerns about what an undefined CRS means become Rasterio's concerns. `None` is what we have for undefined things in Python, and using it will help us tackle the concerns at the proper level.\n", "before_files": [{"content": "\"\"\"Fetch and edit raster dataset metadata from the command line.\"\"\"\n\n\nimport json\nimport warnings\n\nimport click\n\nimport rasterio\nimport rasterio.crs\nfrom rasterio.crs import CRS\nfrom rasterio.errors import CRSError\nfrom rasterio.rio import options\nfrom rasterio.transform import guard_transform\n\n\n# Handlers for info module options.\n\ndef all_handler(ctx, param, value):\n \"\"\"Get tags from a template file or command line.\"\"\"\n if ctx.obj and ctx.obj.get('like') and value is not None:\n ctx.obj['all_like'] = value\n value = ctx.obj.get('like')\n return value\n\n\ndef crs_handler(ctx, param, value):\n \"\"\"Get crs value from a template file or command line.\"\"\"\n retval = options.from_like_context(ctx, param, value)\n if retval is None and value:\n try:\n retval = json.loads(value)\n except ValueError:\n retval = value\n try:\n if isinstance(retval, dict):\n retval = CRS(retval)\n else:\n retval = CRS.from_string(retval)\n except CRSError:\n raise click.BadParameter(\n \"'%s' is not a recognized CRS.\" % retval,\n param=param, param_hint='crs')\n return retval\n\n\ndef tags_handler(ctx, param, value):\n \"\"\"Get tags from a template file or command line.\"\"\"\n retval = options.from_like_context(ctx, param, value)\n if retval is None and value:\n try:\n retval = dict(p.split('=') for p in value)\n except:\n raise click.BadParameter(\n \"'%s' contains a malformed tag.\" % value,\n param=param, param_hint='transform')\n return retval\n\n\ndef transform_handler(ctx, param, value):\n \"\"\"Get transform value from a template file or command line.\"\"\"\n retval = options.from_like_context(ctx, param, value)\n if retval is None and value:\n try:\n value = json.loads(value)\n except ValueError:\n pass\n try:\n retval = guard_transform(value)\n except:\n raise click.BadParameter(\n \"'%s' is not recognized as an Affine array.\" % value,\n param=param, param_hint='transform')\n return retval\n\n\[email protected]('edit-info', short_help=\"Edit dataset metadata.\")\[email protected]_in_arg\[email protected]_opt\[email protected]_nodata_opt\[email protected]('--unset-nodata', default=False, is_flag=True,\n help=\"Unset the dataset's nodata value.\")\[email protected]('--crs', callback=crs_handler, default=None,\n help=\"New coordinate reference system\")\[email protected]('--unset-crs', default=False, is_flag=True,\n help=\"Unset the dataset's CRS value.\")\[email protected]('--transform', callback=transform_handler,\n help=\"New affine transform matrix\")\[email protected]('--units', help=\"Edit units of a band (requires --bidx)\")\[email protected]('--description',\n help=\"Edit description of a band (requires --bidx)\")\[email protected]('--tag', 'tags', callback=tags_handler, multiple=True,\n metavar='KEY=VAL', help=\"New tag.\")\[email protected]('--all', 'allmd', callback=all_handler, flag_value='like',\n is_eager=True, default=False,\n help=\"Copy all metadata items from the template file.\")\[email protected]_opt\[email protected]_context\ndef edit(ctx, input, bidx, nodata, unset_nodata, crs, unset_crs, transform,\n units, description, tags, allmd, like):\n \"\"\"Edit a dataset's metadata: coordinate reference system, affine\n transformation matrix, nodata value, and tags.\n\n The coordinate reference system may be either a PROJ.4 or EPSG:nnnn\n string,\n\n --crs 'EPSG:4326'\n\n or a JSON text-encoded PROJ.4 object.\n\n --crs '{\"proj\": \"utm\", \"zone\": 18, ...}'\n\n Transforms are JSON-encoded Affine objects like:\n\n --transform '[300.038, 0.0, 101985.0, 0.0, -300.042, 2826915.0]'\n\n Prior to Rasterio 1.0 GDAL geotransforms were supported for --transform,\n but are no longer supported.\n\n Metadata items may also be read from an existing dataset using a\n combination of the --like option with at least one of --all,\n `--crs like`, `--nodata like`, and `--transform like`.\n\n rio edit-info example.tif --like template.tif --all\n\n To get just the transform from the template:\n\n rio edit-info example.tif --like template.tif --transform like\n\n \"\"\"\n import numpy as np\n\n def in_dtype_range(value, dtype):\n infos = {'c': np.finfo, 'f': np.finfo, 'i': np.iinfo,\n 'u': np.iinfo}\n rng = infos[np.dtype(dtype).kind](dtype)\n return rng.min <= value <= rng.max\n\n with ctx.obj['env'], rasterio.open(input, 'r+') as dst:\n\n if allmd:\n nodata = allmd['nodata']\n crs = allmd['crs']\n transform = allmd['transform']\n tags = allmd['tags']\n\n if unset_nodata and nodata is not options.IgnoreOption:\n raise click.BadParameter(\n \"--unset-nodata and --nodata cannot be used together.\")\n\n if unset_crs and crs:\n raise click.BadParameter(\n \"--unset-crs and --crs cannot be used together.\")\n\n if unset_nodata:\n # Setting nodata to None will raise NotImplementedError\n # if GDALDeleteRasterNoDataValue() isn't present in the\n # GDAL library.\n try:\n dst.nodata = None\n except NotImplementedError as exc: # pragma: no cover\n raise click.ClickException(str(exc))\n\n elif nodata is not options.IgnoreOption:\n dtype = dst.dtypes[0]\n if nodata is not None and not in_dtype_range(nodata, dtype):\n raise click.BadParameter(\n \"outside the range of the file's \"\n \"data type (%s).\" % dtype,\n param=nodata, param_hint='nodata')\n dst.nodata = nodata\n\n if unset_crs:\n dst.crs = CRS()\n elif crs:\n dst.crs = crs\n\n if transform:\n dst.transform = transform\n\n if tags:\n dst.update_tags(**tags)\n\n if units:\n dst.set_units(bidx, units)\n\n if description:\n dst.set_description(bidx, description)\n\n # Post check - ensure that crs was unset properly\n if unset_crs:\n with ctx.obj['env'], rasterio.open(input, 'r') as src:\n if dict(src.crs) != {}:\n warnings.warn(\n 'CRS was not unset. Availability of his functionality '\n 'differs depending on GDAL version and driver')\n", "path": "rasterio/rio/edit_info.py"}, {"content": "\"\"\"Command access to dataset metadata, stats, and more.\"\"\"\n\n\nimport json\n\nimport click\n\nimport rasterio\nimport rasterio.crs\nfrom rasterio.rio import options\n\n\[email protected](short_help=\"Print information about a data file.\")\[email protected]_in_arg\[email protected]('--meta', 'aspect', flag_value='meta', default=True,\n help=\"Show data file structure (default).\")\[email protected]('--tags', 'aspect', flag_value='tags',\n help=\"Show data file tags.\")\[email protected]('--namespace', help=\"Select a tag namespace.\")\[email protected]('--indent', default=None, type=int,\n help=\"Indentation level for pretty printed output\")\n# Options to pick out a single metadata item and print it as\n# a string.\[email protected]('--count', 'meta_member', flag_value='count',\n help=\"Print the count of bands.\")\[email protected]('-t', '--dtype', 'meta_member', flag_value='dtype',\n help=\"Print the dtype name.\")\[email protected]('--nodata', 'meta_member', flag_value='nodata',\n help=\"Print the nodata value.\")\[email protected]('-f', '--format', '--driver', 'meta_member', flag_value='driver',\n help=\"Print the format driver.\")\[email protected]('--shape', 'meta_member', flag_value='shape',\n help=\"Print the (height, width) shape.\")\[email protected]('--height', 'meta_member', flag_value='height',\n help=\"Print the height (number of rows).\")\[email protected]('--width', 'meta_member', flag_value='width',\n help=\"Print the width (number of columns).\")\[email protected]('--crs', 'meta_member', flag_value='crs',\n help=\"Print the CRS as a PROJ.4 string.\")\[email protected]('--bounds', 'meta_member', flag_value='bounds',\n help=\"Print the boundary coordinates \"\n \"(left, bottom, right, top).\")\[email protected]('-r', '--res', 'meta_member', flag_value='res',\n help=\"Print pixel width and height.\")\[email protected]('--lnglat', 'meta_member', flag_value='lnglat',\n help=\"Print longitude and latitude at center.\")\[email protected]('--stats', 'meta_member', flag_value='stats',\n help=\"Print statistics (min, max, mean) of a single band \"\n \"(use --bidx).\")\[email protected]('--checksum', 'meta_member', flag_value='checksum',\n help=\"Print integer checksum of a single band \"\n \"(use --bidx).\")\[email protected]('-v', '--tell-me-more', '--verbose', is_flag=True,\n help=\"Output extra information.\")\[email protected]_opt\[email protected]_opt\[email protected]_context\ndef info(ctx, input, aspect, indent, namespace, meta_member, verbose, bidx,\n masked):\n \"\"\"Print metadata about the dataset as JSON.\n\n Optionally print a single metadata item as a string.\n \"\"\"\n try:\n with ctx.obj['env'], rasterio.open(input) as src:\n\n info = dict(src.profile)\n info['shape'] = (info['height'], info['width'])\n info['bounds'] = src.bounds\n proj4 = src.crs.to_string()\n if proj4.startswith('+init=epsg'):\n proj4 = proj4.split('=')[1].upper()\n info['crs'] = proj4\n info['res'] = src.res\n info['colorinterp'] = [src.colorinterp(i).name\n for i in src.indexes]\n info['units'] = [units or None for units in src.units]\n info['descriptions'] = src.descriptions\n info['indexes'] = src.indexes\n info['mask_flags'] = [[\n flag.name for flag in flags] for flags in src.mask_flag_enums]\n\n if proj4 != '':\n info['lnglat'] = src.lnglat()\n\n if verbose:\n stats = [{'min': float(b.min()),\n 'max': float(b.max()),\n 'mean': float(b.mean())\n } for b in src.read(masked=masked)]\n info['stats'] = stats\n\n info['checksum'] = [src.checksum(i) for i in src.indexes]\n\n gcps, crs = src.gcps\n proj4 = crs.to_string()\n if proj4.startswith('+init=epsg'):\n proj4 = proj4.split('=')[1].upper()\n if gcps:\n info['gcps'] = {\n 'crs': proj4, 'points': [p.asdict() for p in gcps]}\n\n if aspect == 'meta':\n if meta_member == 'stats':\n band = src.read(bidx, masked=masked)\n click.echo('%f %f %f' % (\n float(band.min()),\n float(band.max()),\n float(band.mean())))\n elif meta_member == 'checksum':\n click.echo(str(src.checksum(bidx)))\n elif meta_member:\n if isinstance(info[meta_member], (list, tuple)):\n click.echo(\" \".join(map(str, info[meta_member])))\n else:\n click.echo(info[meta_member])\n else:\n click.echo(json.dumps(info, sort_keys=True, indent=indent))\n\n elif aspect == 'tags':\n click.echo(\n json.dumps(src.tags(ns=namespace), indent=indent))\n except Exception:\n raise click.Abort()\n", "path": "rasterio/rio/info.py"}], "after_files": [{"content": "\"\"\"Fetch and edit raster dataset metadata from the command line.\"\"\"\n\n\nimport json\nimport warnings\n\nimport click\n\nimport rasterio\nimport rasterio.crs\nfrom rasterio.crs import CRS\nfrom rasterio.errors import CRSError\nfrom rasterio.rio import options\nfrom rasterio.transform import guard_transform\n\n\n# Handlers for info module options.\n\ndef all_handler(ctx, param, value):\n \"\"\"Get tags from a template file or command line.\"\"\"\n if ctx.obj and ctx.obj.get('like') and value is not None:\n ctx.obj['all_like'] = value\n value = ctx.obj.get('like')\n return value\n\n\ndef crs_handler(ctx, param, value):\n \"\"\"Get crs value from a template file or command line.\"\"\"\n retval = options.from_like_context(ctx, param, value)\n if retval is None and value:\n try:\n retval = json.loads(value)\n except ValueError:\n retval = value\n try:\n if isinstance(retval, dict):\n retval = CRS(retval)\n else:\n retval = CRS.from_string(retval)\n except CRSError:\n raise click.BadParameter(\n \"'%s' is not a recognized CRS.\" % retval,\n param=param, param_hint='crs')\n return retval\n\n\ndef tags_handler(ctx, param, value):\n \"\"\"Get tags from a template file or command line.\"\"\"\n retval = options.from_like_context(ctx, param, value)\n if retval is None and value:\n try:\n retval = dict(p.split('=') for p in value)\n except:\n raise click.BadParameter(\n \"'%s' contains a malformed tag.\" % value,\n param=param, param_hint='transform')\n return retval\n\n\ndef transform_handler(ctx, param, value):\n \"\"\"Get transform value from a template file or command line.\"\"\"\n retval = options.from_like_context(ctx, param, value)\n if retval is None and value:\n try:\n value = json.loads(value)\n except ValueError:\n pass\n try:\n retval = guard_transform(value)\n except:\n raise click.BadParameter(\n \"'%s' is not recognized as an Affine array.\" % value,\n param=param, param_hint='transform')\n return retval\n\n\[email protected]('edit-info', short_help=\"Edit dataset metadata.\")\[email protected]_in_arg\[email protected]_opt\[email protected]_nodata_opt\[email protected]('--unset-nodata', default=False, is_flag=True,\n help=\"Unset the dataset's nodata value.\")\[email protected]('--crs', callback=crs_handler, default=None,\n help=\"New coordinate reference system\")\[email protected]('--unset-crs', default=False, is_flag=True,\n help=\"Unset the dataset's CRS value.\")\[email protected]('--transform', callback=transform_handler,\n help=\"New affine transform matrix\")\[email protected]('--units', help=\"Edit units of a band (requires --bidx)\")\[email protected]('--description',\n help=\"Edit description of a band (requires --bidx)\")\[email protected]('--tag', 'tags', callback=tags_handler, multiple=True,\n metavar='KEY=VAL', help=\"New tag.\")\[email protected]('--all', 'allmd', callback=all_handler, flag_value='like',\n is_eager=True, default=False,\n help=\"Copy all metadata items from the template file.\")\[email protected]_opt\[email protected]_context\ndef edit(ctx, input, bidx, nodata, unset_nodata, crs, unset_crs, transform,\n units, description, tags, allmd, like):\n \"\"\"Edit a dataset's metadata: coordinate reference system, affine\n transformation matrix, nodata value, and tags.\n\n The coordinate reference system may be either a PROJ.4 or EPSG:nnnn\n string,\n\n --crs 'EPSG:4326'\n\n or a JSON text-encoded PROJ.4 object.\n\n --crs '{\"proj\": \"utm\", \"zone\": 18, ...}'\n\n Transforms are JSON-encoded Affine objects like:\n\n --transform '[300.038, 0.0, 101985.0, 0.0, -300.042, 2826915.0]'\n\n Prior to Rasterio 1.0 GDAL geotransforms were supported for --transform,\n but are no longer supported.\n\n Metadata items may also be read from an existing dataset using a\n combination of the --like option with at least one of --all,\n `--crs like`, `--nodata like`, and `--transform like`.\n\n rio edit-info example.tif --like template.tif --all\n\n To get just the transform from the template:\n\n rio edit-info example.tif --like template.tif --transform like\n\n \"\"\"\n import numpy as np\n\n def in_dtype_range(value, dtype):\n infos = {'c': np.finfo, 'f': np.finfo, 'i': np.iinfo,\n 'u': np.iinfo}\n rng = infos[np.dtype(dtype).kind](dtype)\n return rng.min <= value <= rng.max\n\n with ctx.obj['env'], rasterio.open(input, 'r+') as dst:\n\n if allmd:\n nodata = allmd['nodata']\n crs = allmd['crs']\n transform = allmd['transform']\n tags = allmd['tags']\n\n if unset_nodata and nodata is not options.IgnoreOption:\n raise click.BadParameter(\n \"--unset-nodata and --nodata cannot be used together.\")\n\n if unset_crs and crs:\n raise click.BadParameter(\n \"--unset-crs and --crs cannot be used together.\")\n\n if unset_nodata:\n # Setting nodata to None will raise NotImplementedError\n # if GDALDeleteRasterNoDataValue() isn't present in the\n # GDAL library.\n try:\n dst.nodata = None\n except NotImplementedError as exc: # pragma: no cover\n raise click.ClickException(str(exc))\n\n elif nodata is not options.IgnoreOption:\n dtype = dst.dtypes[0]\n if nodata is not None and not in_dtype_range(nodata, dtype):\n raise click.BadParameter(\n \"outside the range of the file's \"\n \"data type (%s).\" % dtype,\n param=nodata, param_hint='nodata')\n dst.nodata = nodata\n\n if unset_crs:\n dst.crs = None # CRS()\n elif crs:\n dst.crs = crs\n\n if transform:\n dst.transform = transform\n\n if tags:\n dst.update_tags(**tags)\n\n if units:\n dst.set_units(bidx, units)\n\n if description:\n dst.set_description(bidx, description)\n\n # Post check - ensure that crs was unset properly\n if unset_crs:\n with ctx.obj['env'], rasterio.open(input, 'r') as src:\n if src.crs:\n warnings.warn(\n 'CRS was not unset. Availability of his functionality '\n 'differs depending on GDAL version and driver')\n", "path": "rasterio/rio/edit_info.py"}, {"content": "\"\"\"Command access to dataset metadata, stats, and more.\"\"\"\n\n\nimport json\n\nimport click\n\nimport rasterio\nimport rasterio.crs\nfrom rasterio.rio import options\n\n\[email protected](short_help=\"Print information about a data file.\")\[email protected]_in_arg\[email protected]('--meta', 'aspect', flag_value='meta', default=True,\n help=\"Show data file structure (default).\")\[email protected]('--tags', 'aspect', flag_value='tags',\n help=\"Show data file tags.\")\[email protected]('--namespace', help=\"Select a tag namespace.\")\[email protected]('--indent', default=None, type=int,\n help=\"Indentation level for pretty printed output\")\n# Options to pick out a single metadata item and print it as\n# a string.\[email protected]('--count', 'meta_member', flag_value='count',\n help=\"Print the count of bands.\")\[email protected]('-t', '--dtype', 'meta_member', flag_value='dtype',\n help=\"Print the dtype name.\")\[email protected]('--nodata', 'meta_member', flag_value='nodata',\n help=\"Print the nodata value.\")\[email protected]('-f', '--format', '--driver', 'meta_member', flag_value='driver',\n help=\"Print the format driver.\")\[email protected]('--shape', 'meta_member', flag_value='shape',\n help=\"Print the (height, width) shape.\")\[email protected]('--height', 'meta_member', flag_value='height',\n help=\"Print the height (number of rows).\")\[email protected]('--width', 'meta_member', flag_value='width',\n help=\"Print the width (number of columns).\")\[email protected]('--crs', 'meta_member', flag_value='crs',\n help=\"Print the CRS as a PROJ.4 string.\")\[email protected]('--bounds', 'meta_member', flag_value='bounds',\n help=\"Print the boundary coordinates \"\n \"(left, bottom, right, top).\")\[email protected]('-r', '--res', 'meta_member', flag_value='res',\n help=\"Print pixel width and height.\")\[email protected]('--lnglat', 'meta_member', flag_value='lnglat',\n help=\"Print longitude and latitude at center.\")\[email protected]('--stats', 'meta_member', flag_value='stats',\n help=\"Print statistics (min, max, mean) of a single band \"\n \"(use --bidx).\")\[email protected]('--checksum', 'meta_member', flag_value='checksum',\n help=\"Print integer checksum of a single band \"\n \"(use --bidx).\")\[email protected]('-v', '--tell-me-more', '--verbose', is_flag=True,\n help=\"Output extra information.\")\[email protected]_opt\[email protected]_opt\[email protected]_context\ndef info(ctx, input, aspect, indent, namespace, meta_member, verbose, bidx,\n masked):\n \"\"\"Print metadata about the dataset as JSON.\n\n Optionally print a single metadata item as a string.\n \"\"\"\n try:\n with ctx.obj['env'], rasterio.open(input) as src:\n\n info = dict(src.profile)\n info['shape'] = (info['height'], info['width'])\n info['bounds'] = src.bounds\n\n if src.crs:\n proj4 = src.crs.to_string()\n if proj4.startswith('+init=epsg'):\n proj4 = proj4.split('=')[1].upper()\n info['crs'] = proj4\n else:\n info['crs'] = None\n\n info['res'] = src.res\n info['colorinterp'] = [src.colorinterp(i).name\n for i in src.indexes]\n info['units'] = [units or None for units in src.units]\n info['descriptions'] = src.descriptions\n info['indexes'] = src.indexes\n info['mask_flags'] = [[\n flag.name for flag in flags] for flags in src.mask_flag_enums]\n\n if proj4 != '':\n info['lnglat'] = src.lnglat()\n\n if verbose:\n stats = [{'min': float(b.min()),\n 'max': float(b.max()),\n 'mean': float(b.mean())\n } for b in src.read(masked=masked)]\n info['stats'] = stats\n\n info['checksum'] = [src.checksum(i) for i in src.indexes]\n\n gcps, crs = src.gcps\n proj4 = crs.to_string()\n if proj4.startswith('+init=epsg'):\n proj4 = proj4.split('=')[1].upper()\n if gcps:\n info['gcps'] = {\n 'crs': proj4, 'points': [p.asdict() for p in gcps]}\n\n if aspect == 'meta':\n if meta_member == 'stats':\n band = src.read(bidx, masked=masked)\n click.echo('%f %f %f' % (\n float(band.min()),\n float(band.max()),\n float(band.mean())))\n elif meta_member == 'checksum':\n click.echo(str(src.checksum(bidx)))\n elif meta_member:\n if isinstance(info[meta_member], (list, tuple)):\n click.echo(\" \".join(map(str, info[meta_member])))\n else:\n click.echo(info[meta_member])\n else:\n click.echo(json.dumps(info, sort_keys=True, indent=indent))\n\n elif aspect == 'tags':\n click.echo(\n json.dumps(src.tags(ns=namespace), indent=indent))\n except Exception:\n raise click.Abort()\n", "path": "rasterio/rio/info.py"}]} | 3,813 | 438 |
gh_patches_debug_64681 | rasdani/github-patches | git_diff | chainer__chainer-751 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`cupy.array_split` doesn't accept empty indecies
```
>>> x=cupy.array([1])
>>> cupy.array_split(x, [])
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/unno/git/chainer/cupy/manipulation/split.py", line 32, in array_split
ret.append(ary[skip + (slice(index, size),)])
UnboundLocalError: local variable 'index' referenced before assignment
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cupy/manipulation/split.py`
Content:
```
1 import numpy
2 import six
3
4
5 def array_split(ary, indices_or_sections, axis=0):
6 """Splits an array into multiple sub arrays along a given axis.
7
8 This function is almost equivalent to :func:`cupy.split`. The only
9 difference is that this function allows an integer sections that does not
10 evenly divide the axis.
11
12 .. seealso:: :func:`cupy.split` for more detail, :func:`numpy.array_split`
13
14 """
15 if ary.ndim <= axis:
16 raise IndexError('Axis exceeds ndim')
17 size = ary.shape[axis]
18
19 if numpy.isscalar(indices_or_sections):
20 each_size = (size - 1) // indices_or_sections + 1
21 indices = [i * each_size
22 for i in six.moves.range(1, indices_or_sections)]
23 else:
24 indices = indices_or_sections
25
26 skip = (slice(None),) * axis
27 ret = []
28 i = 0
29 for index in indices:
30 ret.append(ary[skip + (slice(i, index),)])
31 i = index
32 ret.append(ary[skip + (slice(index, size),)])
33
34 return ret
35
36
37 def dsplit(ary, indices_or_sections):
38 """Splits an array into multiple sub arrays along the third axis.
39
40 This is equivalent to ``split`` with ``axis=2``.
41
42 .. seealso:: :func:`cupy.split` for more detail, :func:`numpy.dsplit`
43
44 """
45 if ary.ndim <= 2:
46 raise ValueError('Cannot dsplit an array with less than 3 dimensions')
47 return split(ary, indices_or_sections, 2)
48
49
50 def hsplit(ary, indices_or_sections):
51 """Splits an array into multiple sub arrays horizontally.
52
53 This is equivalent to ``split`` with ``axis=0`` if ``ary`` has one
54 dimension, and otherwise that with ``axis=1``.
55
56 .. seealso:: :func:`cupy.split` for more detail, :func:`numpy.hsplit`
57
58 """
59 if ary.ndim == 0:
60 raise ValueError('Cannot hsplit a zero-dimensional array')
61 if ary.ndim == 1:
62 return split(ary, indices_or_sections, 0)
63 else:
64 return split(ary, indices_or_sections, 1)
65
66
67 def split(ary, indices_or_sections, axis=0):
68 """Splits an array into multiple sub arrays along a given axis.
69
70 Args:
71 ary (cupy.ndarray): Array to split.
72 indices_or_sections (int or sequence of ints): A value indicating how
73 to divide the axis. If it is an integer, then is treated as the
74 number of sections, and the axis is evenly divided. Otherwise,
75 the integers indicate indices to split at. Note that the sequence
76 on the device memory is not allowed.
77 axis (int): Axis along which the array is split.
78
79 Returns:
80 A list of sub arrays. Eacy array is a view of the corresponding input
81 array.
82
83 .. seealso:: :func:`numpy.split`
84
85 """
86 if ary.ndim <= axis:
87 raise IndexError('Axis exceeds ndim')
88 size = ary.shape[axis]
89
90 if numpy.isscalar(indices_or_sections):
91 if size % indices_or_sections != 0:
92 raise ValueError(
93 'indices_or_sections must divide the size along the axes.\n'
94 'If you want to split the array into non-equally-sized '
95 'arrays, use array_split instead.')
96 return array_split(ary, indices_or_sections, axis)
97
98
99 def vsplit(ary, indices_or_sections):
100 """Splits an array into multiple sub arrays along the first axis.
101
102 This is equivalent to ``split`` with ``axis=0``.
103
104 .. seealso:: :func:`cupy.split` for more detail, :func:`numpy.dsplit`
105
106 """
107 if ary.ndim <= 1:
108 raise ValueError('Cannot vsplit an array with less than 2 dimensions')
109 return split(ary, indices_or_sections, 0)
110
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cupy/manipulation/split.py b/cupy/manipulation/split.py
--- a/cupy/manipulation/split.py
+++ b/cupy/manipulation/split.py
@@ -23,6 +23,9 @@
else:
indices = indices_or_sections
+ if len(indices) == 0:
+ return [ary]
+
skip = (slice(None),) * axis
ret = []
i = 0
| {"golden_diff": "diff --git a/cupy/manipulation/split.py b/cupy/manipulation/split.py\n--- a/cupy/manipulation/split.py\n+++ b/cupy/manipulation/split.py\n@@ -23,6 +23,9 @@\n else:\n indices = indices_or_sections\n \n+ if len(indices) == 0:\n+ return [ary]\n+\n skip = (slice(None),) * axis\n ret = []\n i = 0\n", "issue": "`cupy.array_split` doesn't accept empty indecies\n```\n>>> x=cupy.array([1])\n>>> cupy.array_split(x, [])\nTraceback (most recent call last):\n File \"<stdin>\", line 1, in <module>\n File \"/home/unno/git/chainer/cupy/manipulation/split.py\", line 32, in array_split\n ret.append(ary[skip + (slice(index, size),)])\nUnboundLocalError: local variable 'index' referenced before assignment\n```\n\n", "before_files": [{"content": "import numpy\nimport six\n\n\ndef array_split(ary, indices_or_sections, axis=0):\n \"\"\"Splits an array into multiple sub arrays along a given axis.\n\n This function is almost equivalent to :func:`cupy.split`. The only\n difference is that this function allows an integer sections that does not\n evenly divide the axis.\n\n .. seealso:: :func:`cupy.split` for more detail, :func:`numpy.array_split`\n\n \"\"\"\n if ary.ndim <= axis:\n raise IndexError('Axis exceeds ndim')\n size = ary.shape[axis]\n\n if numpy.isscalar(indices_or_sections):\n each_size = (size - 1) // indices_or_sections + 1\n indices = [i * each_size\n for i in six.moves.range(1, indices_or_sections)]\n else:\n indices = indices_or_sections\n\n skip = (slice(None),) * axis\n ret = []\n i = 0\n for index in indices:\n ret.append(ary[skip + (slice(i, index),)])\n i = index\n ret.append(ary[skip + (slice(index, size),)])\n\n return ret\n\n\ndef dsplit(ary, indices_or_sections):\n \"\"\"Splits an array into multiple sub arrays along the third axis.\n\n This is equivalent to ``split`` with ``axis=2``.\n\n .. seealso:: :func:`cupy.split` for more detail, :func:`numpy.dsplit`\n\n \"\"\"\n if ary.ndim <= 2:\n raise ValueError('Cannot dsplit an array with less than 3 dimensions')\n return split(ary, indices_or_sections, 2)\n\n\ndef hsplit(ary, indices_or_sections):\n \"\"\"Splits an array into multiple sub arrays horizontally.\n\n This is equivalent to ``split`` with ``axis=0`` if ``ary`` has one\n dimension, and otherwise that with ``axis=1``.\n\n .. seealso:: :func:`cupy.split` for more detail, :func:`numpy.hsplit`\n\n \"\"\"\n if ary.ndim == 0:\n raise ValueError('Cannot hsplit a zero-dimensional array')\n if ary.ndim == 1:\n return split(ary, indices_or_sections, 0)\n else:\n return split(ary, indices_or_sections, 1)\n\n\ndef split(ary, indices_or_sections, axis=0):\n \"\"\"Splits an array into multiple sub arrays along a given axis.\n\n Args:\n ary (cupy.ndarray): Array to split.\n indices_or_sections (int or sequence of ints): A value indicating how\n to divide the axis. If it is an integer, then is treated as the\n number of sections, and the axis is evenly divided. Otherwise,\n the integers indicate indices to split at. Note that the sequence\n on the device memory is not allowed.\n axis (int): Axis along which the array is split.\n\n Returns:\n A list of sub arrays. Eacy array is a view of the corresponding input\n array.\n\n .. seealso:: :func:`numpy.split`\n\n \"\"\"\n if ary.ndim <= axis:\n raise IndexError('Axis exceeds ndim')\n size = ary.shape[axis]\n\n if numpy.isscalar(indices_or_sections):\n if size % indices_or_sections != 0:\n raise ValueError(\n 'indices_or_sections must divide the size along the axes.\\n'\n 'If you want to split the array into non-equally-sized '\n 'arrays, use array_split instead.')\n return array_split(ary, indices_or_sections, axis)\n\n\ndef vsplit(ary, indices_or_sections):\n \"\"\"Splits an array into multiple sub arrays along the first axis.\n\n This is equivalent to ``split`` with ``axis=0``.\n\n .. seealso:: :func:`cupy.split` for more detail, :func:`numpy.dsplit`\n\n \"\"\"\n if ary.ndim <= 1:\n raise ValueError('Cannot vsplit an array with less than 2 dimensions')\n return split(ary, indices_or_sections, 0)\n", "path": "cupy/manipulation/split.py"}], "after_files": [{"content": "import numpy\nimport six\n\n\ndef array_split(ary, indices_or_sections, axis=0):\n \"\"\"Splits an array into multiple sub arrays along a given axis.\n\n This function is almost equivalent to :func:`cupy.split`. The only\n difference is that this function allows an integer sections that does not\n evenly divide the axis.\n\n .. seealso:: :func:`cupy.split` for more detail, :func:`numpy.array_split`\n\n \"\"\"\n if ary.ndim <= axis:\n raise IndexError('Axis exceeds ndim')\n size = ary.shape[axis]\n\n if numpy.isscalar(indices_or_sections):\n each_size = (size - 1) // indices_or_sections + 1\n indices = [i * each_size\n for i in six.moves.range(1, indices_or_sections)]\n else:\n indices = indices_or_sections\n\n if len(indices) == 0:\n return [ary]\n\n skip = (slice(None),) * axis\n ret = []\n i = 0\n for index in indices:\n ret.append(ary[skip + (slice(i, index),)])\n i = index\n ret.append(ary[skip + (slice(index, size),)])\n\n return ret\n\n\ndef dsplit(ary, indices_or_sections):\n \"\"\"Splits an array into multiple sub arrays along the third axis.\n\n This is equivalent to ``split`` with ``axis=2``.\n\n .. seealso:: :func:`cupy.split` for more detail, :func:`numpy.dsplit`\n\n \"\"\"\n if ary.ndim <= 2:\n raise ValueError('Cannot dsplit an array with less than 3 dimensions')\n return split(ary, indices_or_sections, 2)\n\n\ndef hsplit(ary, indices_or_sections):\n \"\"\"Splits an array into multiple sub arrays horizontally.\n\n This is equivalent to ``split`` with ``axis=0`` if ``ary`` has one\n dimension, and otherwise that with ``axis=1``.\n\n .. seealso:: :func:`cupy.split` for more detail, :func:`numpy.hsplit`\n\n \"\"\"\n if ary.ndim == 0:\n raise ValueError('Cannot hsplit a zero-dimensional array')\n if ary.ndim == 1:\n return split(ary, indices_or_sections, 0)\n else:\n return split(ary, indices_or_sections, 1)\n\n\ndef split(ary, indices_or_sections, axis=0):\n \"\"\"Splits an array into multiple sub arrays along a given axis.\n\n Args:\n ary (cupy.ndarray): Array to split.\n indices_or_sections (int or sequence of ints): A value indicating how\n to divide the axis. If it is an integer, then is treated as the\n number of sections, and the axis is evenly divided. Otherwise,\n the integers indicate indices to split at. Note that the sequence\n on the device memory is not allowed.\n axis (int): Axis along which the array is split.\n\n Returns:\n A list of sub arrays. Eacy array is a view of the corresponding input\n array.\n\n .. seealso:: :func:`numpy.split`\n\n \"\"\"\n if ary.ndim <= axis:\n raise IndexError('Axis exceeds ndim')\n size = ary.shape[axis]\n\n if numpy.isscalar(indices_or_sections):\n if size % indices_or_sections != 0:\n raise ValueError(\n 'indices_or_sections must divide the size along the axes.\\n'\n 'If you want to split the array into non-equally-sized '\n 'arrays, use array_split instead.')\n return array_split(ary, indices_or_sections, axis)\n\n\ndef vsplit(ary, indices_or_sections):\n \"\"\"Splits an array into multiple sub arrays along the first axis.\n\n This is equivalent to ``split`` with ``axis=0``.\n\n .. seealso:: :func:`cupy.split` for more detail, :func:`numpy.dsplit`\n\n \"\"\"\n if ary.ndim <= 1:\n raise ValueError('Cannot vsplit an array with less than 2 dimensions')\n return split(ary, indices_or_sections, 0)\n", "path": "cupy/manipulation/split.py"}]} | 1,476 | 104 |
gh_patches_debug_14270 | rasdani/github-patches | git_diff | streamlink__streamlink-562 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
0.3.2 Release
Closes #562
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/streamlink/__init__.py`
Content:
```
1 # coding: utf8
2 """Streamlink extracts streams from various services.
3
4 The main compontent of Streamlink is a command-line utility that
5 launches the streams in a video player.
6
7 An API is also provided that allows direct access to stream data.
8
9 Full documentation is available at https://streamlink.github.io.
10
11 """
12
13
14 __title__ = "streamlink"
15 __version__ = "0.3.1"
16 __license__ = "Simplified BSD"
17 __author__ = "Streamlink"
18 __copyright__ = "Copyright 2016 Streamlink"
19 __credits__ = [
20 "Agustín Carrasco (@asermax)",
21 "Andrew Bashore (@bashtech)",
22 "Andy Mikhailenko (@neithere)",
23 "Athanasios Oikonomou (@athoik)",
24 "Brian Callahan (@ibara)",
25 "Che (@chhe)",
26 "Christopher Rosell (@streamlink)",
27 "Daniel Meißner (@meise)",
28 "Daniel Miranda (@danielkza)",
29 "Daniel Wallace (@gtmanfred)",
30 "David Arvelo (@darvelo)",
31 "Dominik Dabrowski (@doda)",
32 "Erik G (@tboss)",
33 "Eric J (@wormeyman)",
34 "Ethan Jones (@jonesz)",
35 "Gaspard Jankowiak (@gapato)",
36 "Jaime Marquínez Ferrándiz (@jaimeMF)",
37 "Jan Tore Morken (@jantore)",
38 "John Peterson (@john-peterson)",
39 "Jon Bergli Heier (@sn4kebite)",
40 "Joseph Glanville (@josephglanville)",
41 "Julian Richen (@FireDart)",
42 "Kacper (@kasper93)",
43 "Martin Panter (@vadmium)",
44 "Max Nordlund (@maxnordlund)",
45 "Michael Cheah (@cheah)",
46 "Moritz Blanke",
47 "Niall McAndrew (@niallm90)",
48 "Niels Kräupl (@Gamewalker)",
49 "Pascal Romahn (@skulblakka)",
50 "Sam Edwards (@dotsam)",
51 "Stefan Breunig (@breunigs)",
52 "Suhail Patel (@suhailpatel)",
53 "Sunaga Takahiro (@sunaga720)",
54 "Vitaly Evtushenko (@eltiren)",
55 "Warnar Boekkooi (@boekkooi)",
56 "@blxd",
57 "@btiom",
58 "@daslicious",
59 "@MasterofJOKers",
60 "@mammothb",
61 "@medina",
62 "@monkeyphysics",
63 "@nixxquality",
64 "@papplampe",
65 "@Raziel-23",
66 "@t0mm0",
67 "@ToadKing",
68 "@unintended",
69 "@wolftankk",
70 "@yeeeargh"
71 ]
72
73 from .api import streams
74 from .exceptions import (StreamlinkError, PluginError, NoStreamsError,
75 NoPluginError, StreamError)
76 from .session import Streamlink
77
```
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2
3 from os import environ
4 from os.path import abspath, dirname, join
5 from setuptools import setup, find_packages
6 from sys import version_info, path as sys_path
7
8 deps = []
9
10 if version_info[0] == 2:
11 # Require backport of concurrent.futures on Python 2
12 deps.append("futures")
13
14 # Require backport of argparse on Python 2.6
15 if version_info[1] == 6:
16 deps.append("argparse")
17
18 # Require singledispatch on Python <3.4
19 if version_info[0] == 2 or (version_info[0] == 3 and version_info[1] < 4):
20 deps.append("singledispatch")
21
22 # requests 2.0 does not work correctly on Python <2.6.3
23 if (version_info[0] == 2 and version_info[1] == 6 and version_info[2] < 3):
24 deps.append("requests>=1.0,<2.0")
25 else:
26 deps.append("requests>=1.0,!=2.12.0,!=2.12.1,<3.0")
27
28 # this version of pycryptodome is known to work and has a Windows wheel for py2.7, py3.3-3.6
29 deps.append("pycryptodome>=3.4.3,<4")
30
31 # shutil.get_terminal_size and which were added in Python 3.3
32 if version_info[0] == 2:
33 deps.append("backports.shutil_which")
34 deps.append("backports.shutil_get_terminal_size")
35
36 # for localization
37 deps.append("iso-639")
38 deps.append("iso3166")
39
40 # When we build an egg for the Win32 bootstrap we don't want dependency
41 # information built into it.
42 if environ.get("NO_DEPS"):
43 deps = []
44
45 srcdir = join(dirname(abspath(__file__)), "src/")
46 sys_path.insert(0, srcdir)
47
48 setup(name="streamlink",
49 version="0.3.1",
50 description="Streamlink is command-line utility that extracts streams "
51 "from various services and pipes them into a video player of "
52 "choice.",
53 url="https://github.com/streamlink/streamlink",
54 author="Streamlink",
55 author_email="[email protected]", # temp until we have a mailing list / global email
56 license="Simplified BSD",
57 packages=find_packages("src"),
58 package_dir={"": "src"},
59 entry_points={
60 "console_scripts": ["streamlink=streamlink_cli.main:main"]
61 },
62 install_requires=deps,
63 test_suite="tests",
64 classifiers=["Development Status :: 5 - Production/Stable",
65 "Environment :: Console",
66 "Operating System :: POSIX",
67 "Operating System :: Microsoft :: Windows",
68 "Programming Language :: Python :: 2.6",
69 "Programming Language :: Python :: 2.7",
70 "Programming Language :: Python :: 3.3",
71 "Programming Language :: Python :: 3.4",
72 "Topic :: Internet :: WWW/HTTP",
73 "Topic :: Multimedia :: Sound/Audio",
74 "Topic :: Multimedia :: Video",
75 "Topic :: Utilities"])
76
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -46,7 +46,7 @@
sys_path.insert(0, srcdir)
setup(name="streamlink",
- version="0.3.1",
+ version="0.3.2",
description="Streamlink is command-line utility that extracts streams "
"from various services and pipes them into a video player of "
"choice.",
diff --git a/src/streamlink/__init__.py b/src/streamlink/__init__.py
--- a/src/streamlink/__init__.py
+++ b/src/streamlink/__init__.py
@@ -12,7 +12,7 @@
__title__ = "streamlink"
-__version__ = "0.3.1"
+__version__ = "0.3.2"
__license__ = "Simplified BSD"
__author__ = "Streamlink"
__copyright__ = "Copyright 2016 Streamlink"
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -46,7 +46,7 @@\n sys_path.insert(0, srcdir)\n \n setup(name=\"streamlink\",\n- version=\"0.3.1\",\n+ version=\"0.3.2\",\n description=\"Streamlink is command-line utility that extracts streams \"\n \"from various services and pipes them into a video player of \"\n \"choice.\",\ndiff --git a/src/streamlink/__init__.py b/src/streamlink/__init__.py\n--- a/src/streamlink/__init__.py\n+++ b/src/streamlink/__init__.py\n@@ -12,7 +12,7 @@\n \n \n __title__ = \"streamlink\"\n-__version__ = \"0.3.1\"\n+__version__ = \"0.3.2\"\n __license__ = \"Simplified BSD\"\n __author__ = \"Streamlink\"\n __copyright__ = \"Copyright 2016 Streamlink\"\n", "issue": "0.3.2 Release\nCloses #562 \n", "before_files": [{"content": "# coding: utf8\n\"\"\"Streamlink extracts streams from various services.\n\nThe main compontent of Streamlink is a command-line utility that\nlaunches the streams in a video player.\n\nAn API is also provided that allows direct access to stream data.\n\nFull documentation is available at https://streamlink.github.io.\n\n\"\"\"\n\n\n__title__ = \"streamlink\"\n__version__ = \"0.3.1\"\n__license__ = \"Simplified BSD\"\n__author__ = \"Streamlink\"\n__copyright__ = \"Copyright 2016 Streamlink\"\n__credits__ = [\n \"Agust\u00edn Carrasco (@asermax)\",\n \"Andrew Bashore (@bashtech)\",\n \"Andy Mikhailenko (@neithere)\",\n \"Athanasios Oikonomou (@athoik)\",\n \"Brian Callahan (@ibara)\",\n \"Che (@chhe)\",\n \"Christopher Rosell (@streamlink)\",\n \"Daniel Mei\u00dfner (@meise)\",\n \"Daniel Miranda (@danielkza)\",\n \"Daniel Wallace (@gtmanfred)\",\n \"David Arvelo (@darvelo)\",\n \"Dominik Dabrowski (@doda)\",\n \"Erik G (@tboss)\",\n \"Eric J (@wormeyman)\",\n \"Ethan Jones (@jonesz)\",\n \"Gaspard Jankowiak (@gapato)\",\n \"Jaime Marqu\u00ednez Ferr\u00e1ndiz (@jaimeMF)\",\n \"Jan Tore Morken (@jantore)\",\n \"John Peterson (@john-peterson)\",\n \"Jon Bergli Heier (@sn4kebite)\",\n \"Joseph Glanville (@josephglanville)\",\n \"Julian Richen (@FireDart)\",\n \"Kacper (@kasper93)\",\n \"Martin Panter (@vadmium)\",\n \"Max Nordlund (@maxnordlund)\",\n \"Michael Cheah (@cheah)\",\n \"Moritz Blanke\",\n \"Niall McAndrew (@niallm90)\",\n \"Niels Kr\u00e4upl (@Gamewalker)\",\n \"Pascal Romahn (@skulblakka)\",\n \"Sam Edwards (@dotsam)\",\n \"Stefan Breunig (@breunigs)\",\n \"Suhail Patel (@suhailpatel)\",\n \"Sunaga Takahiro (@sunaga720)\",\n \"Vitaly Evtushenko (@eltiren)\",\n \"Warnar Boekkooi (@boekkooi)\",\n \"@blxd\",\n \"@btiom\",\n \"@daslicious\",\n \"@MasterofJOKers\",\n \"@mammothb\",\n \"@medina\",\n \"@monkeyphysics\",\n \"@nixxquality\",\n \"@papplampe\",\n \"@Raziel-23\",\n \"@t0mm0\",\n \"@ToadKing\",\n \"@unintended\",\n \"@wolftankk\",\n \"@yeeeargh\"\n]\n\nfrom .api import streams\nfrom .exceptions import (StreamlinkError, PluginError, NoStreamsError,\n NoPluginError, StreamError)\nfrom .session import Streamlink\n", "path": "src/streamlink/__init__.py"}, {"content": "#!/usr/bin/env python\n\nfrom os import environ\nfrom os.path import abspath, dirname, join\nfrom setuptools import setup, find_packages\nfrom sys import version_info, path as sys_path\n\ndeps = []\n\nif version_info[0] == 2:\n # Require backport of concurrent.futures on Python 2\n deps.append(\"futures\")\n\n # Require backport of argparse on Python 2.6\n if version_info[1] == 6:\n deps.append(\"argparse\")\n\n# Require singledispatch on Python <3.4\nif version_info[0] == 2 or (version_info[0] == 3 and version_info[1] < 4):\n deps.append(\"singledispatch\")\n\n# requests 2.0 does not work correctly on Python <2.6.3\nif (version_info[0] == 2 and version_info[1] == 6 and version_info[2] < 3):\n deps.append(\"requests>=1.0,<2.0\")\nelse:\n deps.append(\"requests>=1.0,!=2.12.0,!=2.12.1,<3.0\")\n\n# this version of pycryptodome is known to work and has a Windows wheel for py2.7, py3.3-3.6\ndeps.append(\"pycryptodome>=3.4.3,<4\")\n\n# shutil.get_terminal_size and which were added in Python 3.3\nif version_info[0] == 2:\n deps.append(\"backports.shutil_which\")\n deps.append(\"backports.shutil_get_terminal_size\")\n\n# for localization\ndeps.append(\"iso-639\")\ndeps.append(\"iso3166\")\n\n# When we build an egg for the Win32 bootstrap we don't want dependency\n# information built into it.\nif environ.get(\"NO_DEPS\"):\n deps = []\n\nsrcdir = join(dirname(abspath(__file__)), \"src/\")\nsys_path.insert(0, srcdir)\n\nsetup(name=\"streamlink\",\n version=\"0.3.1\",\n description=\"Streamlink is command-line utility that extracts streams \"\n \"from various services and pipes them into a video player of \"\n \"choice.\",\n url=\"https://github.com/streamlink/streamlink\",\n author=\"Streamlink\",\n author_email=\"[email protected]\", # temp until we have a mailing list / global email\n license=\"Simplified BSD\",\n packages=find_packages(\"src\"),\n package_dir={\"\": \"src\"},\n entry_points={\n \"console_scripts\": [\"streamlink=streamlink_cli.main:main\"]\n },\n install_requires=deps,\n test_suite=\"tests\",\n classifiers=[\"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Operating System :: POSIX\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python :: 2.6\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3.3\",\n \"Programming Language :: Python :: 3.4\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Multimedia :: Sound/Audio\",\n \"Topic :: Multimedia :: Video\",\n \"Topic :: Utilities\"])\n", "path": "setup.py"}], "after_files": [{"content": "# coding: utf8\n\"\"\"Streamlink extracts streams from various services.\n\nThe main compontent of Streamlink is a command-line utility that\nlaunches the streams in a video player.\n\nAn API is also provided that allows direct access to stream data.\n\nFull documentation is available at https://streamlink.github.io.\n\n\"\"\"\n\n\n__title__ = \"streamlink\"\n__version__ = \"0.3.2\"\n__license__ = \"Simplified BSD\"\n__author__ = \"Streamlink\"\n__copyright__ = \"Copyright 2016 Streamlink\"\n__credits__ = [\n \"Agust\u00edn Carrasco (@asermax)\",\n \"Andrew Bashore (@bashtech)\",\n \"Andy Mikhailenko (@neithere)\",\n \"Athanasios Oikonomou (@athoik)\",\n \"Brian Callahan (@ibara)\",\n \"Che (@chhe)\",\n \"Christopher Rosell (@streamlink)\",\n \"Daniel Mei\u00dfner (@meise)\",\n \"Daniel Miranda (@danielkza)\",\n \"Daniel Wallace (@gtmanfred)\",\n \"David Arvelo (@darvelo)\",\n \"Dominik Dabrowski (@doda)\",\n \"Erik G (@tboss)\",\n \"Eric J (@wormeyman)\",\n \"Ethan Jones (@jonesz)\",\n \"Gaspard Jankowiak (@gapato)\",\n \"Jaime Marqu\u00ednez Ferr\u00e1ndiz (@jaimeMF)\",\n \"Jan Tore Morken (@jantore)\",\n \"John Peterson (@john-peterson)\",\n \"Jon Bergli Heier (@sn4kebite)\",\n \"Joseph Glanville (@josephglanville)\",\n \"Julian Richen (@FireDart)\",\n \"Kacper (@kasper93)\",\n \"Martin Panter (@vadmium)\",\n \"Max Nordlund (@maxnordlund)\",\n \"Michael Cheah (@cheah)\",\n \"Moritz Blanke\",\n \"Niall McAndrew (@niallm90)\",\n \"Niels Kr\u00e4upl (@Gamewalker)\",\n \"Pascal Romahn (@skulblakka)\",\n \"Sam Edwards (@dotsam)\",\n \"Stefan Breunig (@breunigs)\",\n \"Suhail Patel (@suhailpatel)\",\n \"Sunaga Takahiro (@sunaga720)\",\n \"Vitaly Evtushenko (@eltiren)\",\n \"Warnar Boekkooi (@boekkooi)\",\n \"@blxd\",\n \"@btiom\",\n \"@daslicious\",\n \"@MasterofJOKers\",\n \"@mammothb\",\n \"@medina\",\n \"@monkeyphysics\",\n \"@nixxquality\",\n \"@papplampe\",\n \"@Raziel-23\",\n \"@t0mm0\",\n \"@ToadKing\",\n \"@unintended\",\n \"@wolftankk\",\n \"@yeeeargh\"\n]\n\nfrom .api import streams\nfrom .exceptions import (StreamlinkError, PluginError, NoStreamsError,\n NoPluginError, StreamError)\nfrom .session import Streamlink\n", "path": "src/streamlink/__init__.py"}, {"content": "#!/usr/bin/env python\n\nfrom os import environ\nfrom os.path import abspath, dirname, join\nfrom setuptools import setup, find_packages\nfrom sys import version_info, path as sys_path\n\ndeps = []\n\nif version_info[0] == 2:\n # Require backport of concurrent.futures on Python 2\n deps.append(\"futures\")\n\n # Require backport of argparse on Python 2.6\n if version_info[1] == 6:\n deps.append(\"argparse\")\n\n# Require singledispatch on Python <3.4\nif version_info[0] == 2 or (version_info[0] == 3 and version_info[1] < 4):\n deps.append(\"singledispatch\")\n\n# requests 2.0 does not work correctly on Python <2.6.3\nif (version_info[0] == 2 and version_info[1] == 6 and version_info[2] < 3):\n deps.append(\"requests>=1.0,<2.0\")\nelse:\n deps.append(\"requests>=1.0,!=2.12.0,!=2.12.1,<3.0\")\n\n# this version of pycryptodome is known to work and has a Windows wheel for py2.7, py3.3-3.6\ndeps.append(\"pycryptodome>=3.4.3,<4\")\n\n# shutil.get_terminal_size and which were added in Python 3.3\nif version_info[0] == 2:\n deps.append(\"backports.shutil_which\")\n deps.append(\"backports.shutil_get_terminal_size\")\n\n# for localization\ndeps.append(\"iso-639\")\ndeps.append(\"iso3166\")\n\n# When we build an egg for the Win32 bootstrap we don't want dependency\n# information built into it.\nif environ.get(\"NO_DEPS\"):\n deps = []\n\nsrcdir = join(dirname(abspath(__file__)), \"src/\")\nsys_path.insert(0, srcdir)\n\nsetup(name=\"streamlink\",\n version=\"0.3.2\",\n description=\"Streamlink is command-line utility that extracts streams \"\n \"from various services and pipes them into a video player of \"\n \"choice.\",\n url=\"https://github.com/streamlink/streamlink\",\n author=\"Streamlink\",\n author_email=\"[email protected]\", # temp until we have a mailing list / global email\n license=\"Simplified BSD\",\n packages=find_packages(\"src\"),\n package_dir={\"\": \"src\"},\n entry_points={\n \"console_scripts\": [\"streamlink=streamlink_cli.main:main\"]\n },\n install_requires=deps,\n test_suite=\"tests\",\n classifiers=[\"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Operating System :: POSIX\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python :: 2.6\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3.3\",\n \"Programming Language :: Python :: 3.4\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Multimedia :: Sound/Audio\",\n \"Topic :: Multimedia :: Video\",\n \"Topic :: Utilities\"])\n", "path": "setup.py"}]} | 1,949 | 215 |
gh_patches_debug_41086 | rasdani/github-patches | git_diff | chainer__chainer-6824 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Stack overflow when `to_cpu`-family is overridden
### Code
```py
import chainer
class MyNet(chainer.Link):
def to_cpu(self):
super(MyNet, self).to_cpu()
net = MyNet()
net.to_cpu()
```
### Result
```
:
File "/data2/work/w/repos/chainer/chainer/link.py", line 447, in device_resident_accept
super(Link, self).device_resident_accept(visitor)
File "/data2/work/w/repos/chainer/chainer/device_resident.py", line 31, in device_resident_accept
visitor.visit_device_resident(self)
File "/data2/work/w/repos/chainer/chainer/device_resident.py", line 240, in visit_device_resident
to_method(**kwargs)
File "/home/niboshi/t/a.py", line 6, in to_cpu
super(MyNet, self).to_cpu()
File "/data2/work/w/repos/chainer/chainer/device_resident.py", line 67, in to_cpu
self.__to_device(visitor)
File "/data2/work/w/repos/chainer/chainer/device_resident.py", line 139, in __to_device
self.device_resident_accept(to_device_visitor)
File "/data2/work/w/repos/chainer/chainer/link.py", line 447, in device_resident_accept
super(Link, self).device_resident_accept(visitor)
File "/data2/work/w/repos/chainer/chainer/device_resident.py", line 31, in device_resident_accept
visitor.visit_device_resident(self)
RecursionError: maximum recursion depth exceeded
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `chainer/device_resident.py`
Content:
```
1 import abc
2 import sys
3 import typing as tp # NOQA
4
5 import numpy
6
7 import chainer
8 from chainer import backend
9 from chainer.backends import _cpu
10 from chainer.backends import cuda
11 from chainer.backends import intel64
12 from chainer import types # NOQA
13 from chainer import utils
14 import chainerx
15
16
17 class DeviceResident(utils.enable_final(meta_base=abc.ABCMeta)):
18
19 """A base class of objects with multi-device hierarchy."""
20
21 _device = _cpu.CpuDevice()
22
23 def __init__(self):
24 # Store overridden to_device family method names.
25 self._overridden_to_methods = tuple([
26 m for m in ('to_cpu', 'to_gpu', 'to_intel64')
27 if _is_to_device_method_overridden(self, m)])
28
29 def device_resident_accept(self, visitor):
30 """Applies the visitor to all the device objects in this instance."""
31 visitor.visit_device_resident(self)
32
33 @property
34 def device(self):
35 """Returns the device"""
36 return self._device
37
38 @property
39 def xp(self):
40 # type: () -> types.Xp
41 """Array module for this link.
42
43 Depending on which of CPU/GPU this link is on, this property returns
44 :mod:`numpy` or :mod:`cupy`.
45
46 """
47 device = self.device
48 if device is None:
49 return None
50 return device.xp
51
52 def to_cpu(self):
53 # type: () -> 'DeviceResident'
54 """Copies parameter variables and persistent values to CPU.
55
56 This method does not handle non-registered attributes. If some of such
57 attributes must be copied to CPU, the link implementation must
58 override :meth:`Link.to_device` to do so.
59
60 Returns: self
61
62 """
63 visitor = _ToDeviceVisitor(
64 backend.CpuDevice(),
65 entry_method_info=('to_cpu', {}),
66 skip_between_cupy_devices=True)
67 self.__to_device(visitor)
68 return self
69
70 def to_gpu(
71 self,
72 device=None, # type: tp.Optional[types.CudaDeviceSpec]
73 ):
74 # type: (...) -> 'DeviceResident'
75 """Copies parameter variables and persistent values to GPU.
76
77 This method does not handle non-registered attributes. If some of such
78 attributes must be copied to GPU, the link implementation must
79 override :meth:`Link.to_device` to do so.
80
81 Args:
82 device: Target device specifier. If omitted, the current device is
83 used.
84
85 Returns: self
86
87 """
88 cuda.check_cuda_available()
89 cuda_device = cuda._get_device_or_current(device)
90 device = chainer.backends.cuda.GpuDevice(cuda_device)
91 visitor = _ToDeviceVisitor(
92 device,
93 entry_method_info=('to_gpu', {'device': device.device}),
94 skip_between_cupy_devices=True)
95 self.__to_device(visitor)
96 return self
97
98 def to_intel64(self):
99 # type: () -> 'DeviceResident'
100 """Copies parameter variables and persistent values to CPU."""
101 intel64.check_ideep_available()
102 visitor = _ToDeviceVisitor(
103 chainer.get_device(intel64.Intel64Device()),
104 entry_method_info=('to_intel64', {}))
105 self.__to_device(visitor)
106 return self
107
108 @utils.final
109 def to_chx(self):
110 """Converts parameter variables and persistent values to ChainerX \
111 without any copy.
112
113 This method does not handle non-registered attributes. If some of such
114 attributes must be copied to ChainerX, the link implementation must
115 override this method to do so.
116
117 Returns: self
118 """
119 if not chainerx.is_available():
120 raise RuntimeError('ChainerX is not available.')
121
122 if self.xp is chainerx:
123 return self
124
125 self.device_resident_accept(_ToChxVisitor())
126 return self
127
128 @utils.final
129 def from_chx(self):
130 """Converts parameter variables and persistent values from ChainerX \
131 to NumPy/CuPy devices without any copy."""
132 if isinstance(self._device, backend.ChainerxDevice):
133 self._device = self._device.fallback_device
134
135 self.device_resident_accept(_FromChxVisitor())
136 return self
137
138 def __to_device(self, to_device_visitor):
139 self.device_resident_accept(to_device_visitor)
140
141 @utils.final
142 def to_device(
143 self,
144 device # type: types.DeviceSpec
145 ):
146 # type: (...) -> 'DeviceResident'
147 """Copies parameter variables and persistent values to the specified \
148 device.
149
150 This method does not handle non-registered attributes. If some of such
151 attributes must be copied to the device, the link implementation must
152 override this method to do so.
153
154 Args:
155 device: Target device specifier. See
156 :func:`~chainer.get_device` for available values.
157
158 Returns: self
159
160 """
161 device = chainer.get_device(device)
162 self.__to_device(_ToDeviceVisitor(device))
163 return self
164
165
166 def _is_to_device_method_overridden(device_resident, method_name):
167 # Returns whether the specified to_device family method is overridden.
168 to_method = getattr(device_resident, method_name, None).__func__
169 to_method_orig = getattr(DeviceResident, method_name)
170 if sys.version_info < (3,):
171 to_method_orig = to_method_orig.__func__
172 if to_method is not to_method_orig:
173 return True # overridden
174 return False
175
176
177 class DeviceResidentsVisitor(object):
178
179 """Base class of visitors that visits device resident objects recursively.
180 """
181
182 def visit_device_resident(self, device_resident):
183 """Processes a :class:`DeviceResident` instance."""
184 raise NotImplementedError()
185
186 def visit_array(self, arr):
187 """Processes an array and returns a new one.
188
189 If the visitor does not create a new array, it can simply return the
190 original array.
191 """
192 raise NotImplementedError()
193
194 def visit_variable(self, param):
195 """Processes a variable or a parameter."""
196 raise NotImplementedError()
197
198
199 class _ToDeviceVisitor(DeviceResidentsVisitor):
200 # A visitor that implements recursive to_device().
201 # For backward compatibility, if any of to_cpu/to_gpu/to_intel64 are
202 # overridden on a device resident, this visitor calls it instead of
203 # `visit_device_resident`. That's true even if `to_device` was originally
204 # called.
205
206 def __init__(
207 self, device, entry_method_info=None,
208 skip_between_cupy_devices=False):
209
210 assert isinstance(device, chainer.backend.Device)
211
212 # `entry_method_info` is for backward compatibility workaround for
213 # overridden methods.
214 # It indicates which method originally causes this visitor.
215 # If it is any of the to_??? method names, descendant resident's
216 # respective method will be called if it's overridden
217 # (instead of `device_resident_accept`).
218 if entry_method_info is not None:
219 assert len(entry_method_info) == 2
220 assert entry_method_info[0] in ('to_cpu', 'to_gpu', 'to_intel64')
221
222 self._device = device
223 self._entry_method_info = entry_method_info
224 self._skip_between_cupy_devices = skip_between_cupy_devices
225
226 def visit_device_resident(self, device_resident):
227 device_resident._device = self._device
228
229 # Backward compatibility workaround for overridden methods
230 if device_resident._overridden_to_methods:
231 if self._entry_method_info is not None:
232 # Deprecated method is being called: e.g. to_cpu and to_gpu.
233 method_name, kwargs = self._entry_method_info
234 else:
235 # to_device is being called
236 method_name, kwargs = (
237 self._device_to_method_name_and_kwargs(self._device))
238 if method_name in device_resident._overridden_to_methods:
239 to_method = getattr(device_resident, method_name)
240 to_method(**kwargs)
241 return
242
243 def _device_to_method_name_and_kwargs(self, device):
244 # Converts a device instance to the corresponding combination of
245 # to_??? method name and kwargs.
246
247 # chainerx
248 if device.xp is chainerx:
249 return None, {}
250 # cupy
251 if device.xp is cuda.cupy:
252 return 'to_gpu', {'device': device.device.id}
253 # numpy
254 assert device.xp is numpy
255 if isinstance(device, _cpu.CpuDevice):
256 return 'to_cpu', {}
257 # intel64
258 assert isinstance(device, intel64.Intel64Device)
259 return 'to_intel64', {}
260
261 def visit_array(self, arr):
262 assert isinstance(arr, chainer.get_array_types())
263 if not (self._skip_between_cupy_devices
264 and self._device.xp is cuda.cupy
265 and isinstance(arr, cuda.ndarray)):
266 return self._device.send(arr)
267 return arr
268
269 def visit_variable(self, param):
270 assert isinstance(param, chainer.Variable)
271 if not (self._skip_between_cupy_devices
272 and self._device.xp is cuda.cupy
273 and param.device.xp is cuda.cupy):
274 param.to_device(self._device)
275
276
277 class _ToChxVisitor(DeviceResidentsVisitor):
278 # A visitor that recursively calls to_chx().
279
280 def visit_device_resident(self, device_resident):
281 device_resident._device = backend.ChainerxDevice.from_fallback_device(
282 device_resident._device)
283
284 def visit_array(self, arr):
285 assert isinstance(arr, chainer.get_array_types())
286 return backend.to_chx(arr)
287
288 def visit_variable(self, param):
289 assert isinstance(param, chainer.Variable)
290 param.to_chx()
291
292
293 class _FromChxVisitor(DeviceResidentsVisitor):
294 # A visitor that recursively calls from_chx().
295
296 def visit_device_resident(self, device_resident):
297 if isinstance(device_resident._device, backend.ChainerxDevice):
298 device_resident._device = device_resident._device.fallback_device
299
300 def visit_array(self, arr):
301 assert isinstance(arr, chainer.get_array_types())
302 return backend.from_chx(arr)
303
304 def visit_variable(self, param):
305 assert isinstance(param, chainer.Variable)
306 param.from_chx()
307
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/chainer/device_resident.py b/chainer/device_resident.py
--- a/chainer/device_resident.py
+++ b/chainer/device_resident.py
@@ -63,7 +63,8 @@
visitor = _ToDeviceVisitor(
backend.CpuDevice(),
entry_method_info=('to_cpu', {}),
- skip_between_cupy_devices=True)
+ skip_between_cupy_devices=True,
+ starting_device_resident=self)
self.__to_device(visitor)
return self
@@ -91,7 +92,8 @@
visitor = _ToDeviceVisitor(
device,
entry_method_info=('to_gpu', {'device': device.device}),
- skip_between_cupy_devices=True)
+ skip_between_cupy_devices=True,
+ starting_device_resident=self)
self.__to_device(visitor)
return self
@@ -101,7 +103,8 @@
intel64.check_ideep_available()
visitor = _ToDeviceVisitor(
chainer.get_device(intel64.Intel64Device()),
- entry_method_info=('to_intel64', {}))
+ entry_method_info=('to_intel64', {}),
+ starting_device_resident=self)
self.__to_device(visitor)
return self
@@ -205,7 +208,8 @@
def __init__(
self, device, entry_method_info=None,
- skip_between_cupy_devices=False):
+ skip_between_cupy_devices=False,
+ starting_device_resident=None):
assert isinstance(device, chainer.backend.Device)
@@ -219,15 +223,29 @@
assert len(entry_method_info) == 2
assert entry_method_info[0] in ('to_cpu', 'to_gpu', 'to_intel64')
+ # starting_device_resident is also for backward compatibility
+ # workaround for overridden methods.
+ # It is a DeviceResident if to_xxx methods were initially called
+ # on this visitor. This is used to avoid infinite accept-visit loop
+ # that would occur by calling to_xxx methods.
+ assert (starting_device_resident is None
+ or isinstance(starting_device_resident, DeviceResident))
+
self._device = device
self._entry_method_info = entry_method_info
self._skip_between_cupy_devices = skip_between_cupy_devices
+ self._starting_device_resident = starting_device_resident
def visit_device_resident(self, device_resident):
device_resident._device = self._device
# Backward compatibility workaround for overridden methods
if device_resident._overridden_to_methods:
+ # Skip this device resident, if the visitor was initially triggered
+ # from it.
+ if device_resident is self._starting_device_resident:
+ return
+
if self._entry_method_info is not None:
# Deprecated method is being called: e.g. to_cpu and to_gpu.
method_name, kwargs = self._entry_method_info
| {"golden_diff": "diff --git a/chainer/device_resident.py b/chainer/device_resident.py\n--- a/chainer/device_resident.py\n+++ b/chainer/device_resident.py\n@@ -63,7 +63,8 @@\n visitor = _ToDeviceVisitor(\n backend.CpuDevice(),\n entry_method_info=('to_cpu', {}),\n- skip_between_cupy_devices=True)\n+ skip_between_cupy_devices=True,\n+ starting_device_resident=self)\n self.__to_device(visitor)\n return self\n \n@@ -91,7 +92,8 @@\n visitor = _ToDeviceVisitor(\n device,\n entry_method_info=('to_gpu', {'device': device.device}),\n- skip_between_cupy_devices=True)\n+ skip_between_cupy_devices=True,\n+ starting_device_resident=self)\n self.__to_device(visitor)\n return self\n \n@@ -101,7 +103,8 @@\n intel64.check_ideep_available()\n visitor = _ToDeviceVisitor(\n chainer.get_device(intel64.Intel64Device()),\n- entry_method_info=('to_intel64', {}))\n+ entry_method_info=('to_intel64', {}),\n+ starting_device_resident=self)\n self.__to_device(visitor)\n return self\n \n@@ -205,7 +208,8 @@\n \n def __init__(\n self, device, entry_method_info=None,\n- skip_between_cupy_devices=False):\n+ skip_between_cupy_devices=False,\n+ starting_device_resident=None):\n \n assert isinstance(device, chainer.backend.Device)\n \n@@ -219,15 +223,29 @@\n assert len(entry_method_info) == 2\n assert entry_method_info[0] in ('to_cpu', 'to_gpu', 'to_intel64')\n \n+ # starting_device_resident is also for backward compatibility\n+ # workaround for overridden methods.\n+ # It is a DeviceResident if to_xxx methods were initially called\n+ # on this visitor. This is used to avoid infinite accept-visit loop\n+ # that would occur by calling to_xxx methods.\n+ assert (starting_device_resident is None\n+ or isinstance(starting_device_resident, DeviceResident))\n+\n self._device = device\n self._entry_method_info = entry_method_info\n self._skip_between_cupy_devices = skip_between_cupy_devices\n+ self._starting_device_resident = starting_device_resident\n \n def visit_device_resident(self, device_resident):\n device_resident._device = self._device\n \n # Backward compatibility workaround for overridden methods\n if device_resident._overridden_to_methods:\n+ # Skip this device resident, if the visitor was initially triggered\n+ # from it.\n+ if device_resident is self._starting_device_resident:\n+ return\n+\n if self._entry_method_info is not None:\n # Deprecated method is being called: e.g. to_cpu and to_gpu.\n method_name, kwargs = self._entry_method_info\n", "issue": "Stack overflow when `to_cpu`-family is overridden\n### Code\r\n```py\r\nimport chainer\r\n\r\n\r\nclass MyNet(chainer.Link):\r\n def to_cpu(self):\r\n super(MyNet, self).to_cpu()\r\n\r\n\r\nnet = MyNet()\r\nnet.to_cpu()\r\n```\r\n\r\n### Result\r\n```\r\n:\r\n File \"/data2/work/w/repos/chainer/chainer/link.py\", line 447, in device_resident_accept\r\n super(Link, self).device_resident_accept(visitor)\r\n File \"/data2/work/w/repos/chainer/chainer/device_resident.py\", line 31, in device_resident_accept\r\n visitor.visit_device_resident(self)\r\n File \"/data2/work/w/repos/chainer/chainer/device_resident.py\", line 240, in visit_device_resident\r\n to_method(**kwargs)\r\n File \"/home/niboshi/t/a.py\", line 6, in to_cpu\r\n super(MyNet, self).to_cpu()\r\n File \"/data2/work/w/repos/chainer/chainer/device_resident.py\", line 67, in to_cpu\r\n self.__to_device(visitor)\r\n File \"/data2/work/w/repos/chainer/chainer/device_resident.py\", line 139, in __to_device\r\n self.device_resident_accept(to_device_visitor)\r\n File \"/data2/work/w/repos/chainer/chainer/link.py\", line 447, in device_resident_accept\r\n super(Link, self).device_resident_accept(visitor)\r\n File \"/data2/work/w/repos/chainer/chainer/device_resident.py\", line 31, in device_resident_accept\r\n visitor.visit_device_resident(self)\r\nRecursionError: maximum recursion depth exceeded\r\n```\n", "before_files": [{"content": "import abc\nimport sys\nimport typing as tp # NOQA\n\nimport numpy\n\nimport chainer\nfrom chainer import backend\nfrom chainer.backends import _cpu\nfrom chainer.backends import cuda\nfrom chainer.backends import intel64\nfrom chainer import types # NOQA\nfrom chainer import utils\nimport chainerx\n\n\nclass DeviceResident(utils.enable_final(meta_base=abc.ABCMeta)):\n\n \"\"\"A base class of objects with multi-device hierarchy.\"\"\"\n\n _device = _cpu.CpuDevice()\n\n def __init__(self):\n # Store overridden to_device family method names.\n self._overridden_to_methods = tuple([\n m for m in ('to_cpu', 'to_gpu', 'to_intel64')\n if _is_to_device_method_overridden(self, m)])\n\n def device_resident_accept(self, visitor):\n \"\"\"Applies the visitor to all the device objects in this instance.\"\"\"\n visitor.visit_device_resident(self)\n\n @property\n def device(self):\n \"\"\"Returns the device\"\"\"\n return self._device\n\n @property\n def xp(self):\n # type: () -> types.Xp\n \"\"\"Array module for this link.\n\n Depending on which of CPU/GPU this link is on, this property returns\n :mod:`numpy` or :mod:`cupy`.\n\n \"\"\"\n device = self.device\n if device is None:\n return None\n return device.xp\n\n def to_cpu(self):\n # type: () -> 'DeviceResident'\n \"\"\"Copies parameter variables and persistent values to CPU.\n\n This method does not handle non-registered attributes. If some of such\n attributes must be copied to CPU, the link implementation must\n override :meth:`Link.to_device` to do so.\n\n Returns: self\n\n \"\"\"\n visitor = _ToDeviceVisitor(\n backend.CpuDevice(),\n entry_method_info=('to_cpu', {}),\n skip_between_cupy_devices=True)\n self.__to_device(visitor)\n return self\n\n def to_gpu(\n self,\n device=None, # type: tp.Optional[types.CudaDeviceSpec]\n ):\n # type: (...) -> 'DeviceResident'\n \"\"\"Copies parameter variables and persistent values to GPU.\n\n This method does not handle non-registered attributes. If some of such\n attributes must be copied to GPU, the link implementation must\n override :meth:`Link.to_device` to do so.\n\n Args:\n device: Target device specifier. If omitted, the current device is\n used.\n\n Returns: self\n\n \"\"\"\n cuda.check_cuda_available()\n cuda_device = cuda._get_device_or_current(device)\n device = chainer.backends.cuda.GpuDevice(cuda_device)\n visitor = _ToDeviceVisitor(\n device,\n entry_method_info=('to_gpu', {'device': device.device}),\n skip_between_cupy_devices=True)\n self.__to_device(visitor)\n return self\n\n def to_intel64(self):\n # type: () -> 'DeviceResident'\n \"\"\"Copies parameter variables and persistent values to CPU.\"\"\"\n intel64.check_ideep_available()\n visitor = _ToDeviceVisitor(\n chainer.get_device(intel64.Intel64Device()),\n entry_method_info=('to_intel64', {}))\n self.__to_device(visitor)\n return self\n\n @utils.final\n def to_chx(self):\n \"\"\"Converts parameter variables and persistent values to ChainerX \\\nwithout any copy.\n\n This method does not handle non-registered attributes. If some of such\n attributes must be copied to ChainerX, the link implementation must\n override this method to do so.\n\n Returns: self\n \"\"\"\n if not chainerx.is_available():\n raise RuntimeError('ChainerX is not available.')\n\n if self.xp is chainerx:\n return self\n\n self.device_resident_accept(_ToChxVisitor())\n return self\n\n @utils.final\n def from_chx(self):\n \"\"\"Converts parameter variables and persistent values from ChainerX \\\nto NumPy/CuPy devices without any copy.\"\"\"\n if isinstance(self._device, backend.ChainerxDevice):\n self._device = self._device.fallback_device\n\n self.device_resident_accept(_FromChxVisitor())\n return self\n\n def __to_device(self, to_device_visitor):\n self.device_resident_accept(to_device_visitor)\n\n @utils.final\n def to_device(\n self,\n device # type: types.DeviceSpec\n ):\n # type: (...) -> 'DeviceResident'\n \"\"\"Copies parameter variables and persistent values to the specified \\\ndevice.\n\n This method does not handle non-registered attributes. If some of such\n attributes must be copied to the device, the link implementation must\n override this method to do so.\n\n Args:\n device: Target device specifier. See\n :func:`~chainer.get_device` for available values.\n\n Returns: self\n\n \"\"\"\n device = chainer.get_device(device)\n self.__to_device(_ToDeviceVisitor(device))\n return self\n\n\ndef _is_to_device_method_overridden(device_resident, method_name):\n # Returns whether the specified to_device family method is overridden.\n to_method = getattr(device_resident, method_name, None).__func__\n to_method_orig = getattr(DeviceResident, method_name)\n if sys.version_info < (3,):\n to_method_orig = to_method_orig.__func__\n if to_method is not to_method_orig:\n return True # overridden\n return False\n\n\nclass DeviceResidentsVisitor(object):\n\n \"\"\"Base class of visitors that visits device resident objects recursively.\n \"\"\"\n\n def visit_device_resident(self, device_resident):\n \"\"\"Processes a :class:`DeviceResident` instance.\"\"\"\n raise NotImplementedError()\n\n def visit_array(self, arr):\n \"\"\"Processes an array and returns a new one.\n\n If the visitor does not create a new array, it can simply return the\n original array.\n \"\"\"\n raise NotImplementedError()\n\n def visit_variable(self, param):\n \"\"\"Processes a variable or a parameter.\"\"\"\n raise NotImplementedError()\n\n\nclass _ToDeviceVisitor(DeviceResidentsVisitor):\n # A visitor that implements recursive to_device().\n # For backward compatibility, if any of to_cpu/to_gpu/to_intel64 are\n # overridden on a device resident, this visitor calls it instead of\n # `visit_device_resident`. That's true even if `to_device` was originally\n # called.\n\n def __init__(\n self, device, entry_method_info=None,\n skip_between_cupy_devices=False):\n\n assert isinstance(device, chainer.backend.Device)\n\n # `entry_method_info` is for backward compatibility workaround for\n # overridden methods.\n # It indicates which method originally causes this visitor.\n # If it is any of the to_??? method names, descendant resident's\n # respective method will be called if it's overridden\n # (instead of `device_resident_accept`).\n if entry_method_info is not None:\n assert len(entry_method_info) == 2\n assert entry_method_info[0] in ('to_cpu', 'to_gpu', 'to_intel64')\n\n self._device = device\n self._entry_method_info = entry_method_info\n self._skip_between_cupy_devices = skip_between_cupy_devices\n\n def visit_device_resident(self, device_resident):\n device_resident._device = self._device\n\n # Backward compatibility workaround for overridden methods\n if device_resident._overridden_to_methods:\n if self._entry_method_info is not None:\n # Deprecated method is being called: e.g. to_cpu and to_gpu.\n method_name, kwargs = self._entry_method_info\n else:\n # to_device is being called\n method_name, kwargs = (\n self._device_to_method_name_and_kwargs(self._device))\n if method_name in device_resident._overridden_to_methods:\n to_method = getattr(device_resident, method_name)\n to_method(**kwargs)\n return\n\n def _device_to_method_name_and_kwargs(self, device):\n # Converts a device instance to the corresponding combination of\n # to_??? method name and kwargs.\n\n # chainerx\n if device.xp is chainerx:\n return None, {}\n # cupy\n if device.xp is cuda.cupy:\n return 'to_gpu', {'device': device.device.id}\n # numpy\n assert device.xp is numpy\n if isinstance(device, _cpu.CpuDevice):\n return 'to_cpu', {}\n # intel64\n assert isinstance(device, intel64.Intel64Device)\n return 'to_intel64', {}\n\n def visit_array(self, arr):\n assert isinstance(arr, chainer.get_array_types())\n if not (self._skip_between_cupy_devices\n and self._device.xp is cuda.cupy\n and isinstance(arr, cuda.ndarray)):\n return self._device.send(arr)\n return arr\n\n def visit_variable(self, param):\n assert isinstance(param, chainer.Variable)\n if not (self._skip_between_cupy_devices\n and self._device.xp is cuda.cupy\n and param.device.xp is cuda.cupy):\n param.to_device(self._device)\n\n\nclass _ToChxVisitor(DeviceResidentsVisitor):\n # A visitor that recursively calls to_chx().\n\n def visit_device_resident(self, device_resident):\n device_resident._device = backend.ChainerxDevice.from_fallback_device(\n device_resident._device)\n\n def visit_array(self, arr):\n assert isinstance(arr, chainer.get_array_types())\n return backend.to_chx(arr)\n\n def visit_variable(self, param):\n assert isinstance(param, chainer.Variable)\n param.to_chx()\n\n\nclass _FromChxVisitor(DeviceResidentsVisitor):\n # A visitor that recursively calls from_chx().\n\n def visit_device_resident(self, device_resident):\n if isinstance(device_resident._device, backend.ChainerxDevice):\n device_resident._device = device_resident._device.fallback_device\n\n def visit_array(self, arr):\n assert isinstance(arr, chainer.get_array_types())\n return backend.from_chx(arr)\n\n def visit_variable(self, param):\n assert isinstance(param, chainer.Variable)\n param.from_chx()\n", "path": "chainer/device_resident.py"}], "after_files": [{"content": "import abc\nimport sys\nimport typing as tp # NOQA\n\nimport numpy\n\nimport chainer\nfrom chainer import backend\nfrom chainer.backends import _cpu\nfrom chainer.backends import cuda\nfrom chainer.backends import intel64\nfrom chainer import types # NOQA\nfrom chainer import utils\nimport chainerx\n\n\nclass DeviceResident(utils.enable_final(meta_base=abc.ABCMeta)):\n\n \"\"\"A base class of objects with multi-device hierarchy.\"\"\"\n\n _device = _cpu.CpuDevice()\n\n def __init__(self):\n # Store overridden to_device family method names.\n self._overridden_to_methods = tuple([\n m for m in ('to_cpu', 'to_gpu', 'to_intel64')\n if _is_to_device_method_overridden(self, m)])\n\n def device_resident_accept(self, visitor):\n \"\"\"Applies the visitor to all the device objects in this instance.\"\"\"\n visitor.visit_device_resident(self)\n\n @property\n def device(self):\n \"\"\"Returns the device\"\"\"\n return self._device\n\n @property\n def xp(self):\n # type: () -> types.Xp\n \"\"\"Array module for this link.\n\n Depending on which of CPU/GPU this link is on, this property returns\n :mod:`numpy` or :mod:`cupy`.\n\n \"\"\"\n device = self.device\n if device is None:\n return None\n return device.xp\n\n def to_cpu(self):\n # type: () -> 'DeviceResident'\n \"\"\"Copies parameter variables and persistent values to CPU.\n\n This method does not handle non-registered attributes. If some of such\n attributes must be copied to CPU, the link implementation must\n override :meth:`Link.to_device` to do so.\n\n Returns: self\n\n \"\"\"\n visitor = _ToDeviceVisitor(\n backend.CpuDevice(),\n entry_method_info=('to_cpu', {}),\n skip_between_cupy_devices=True,\n starting_device_resident=self)\n self.__to_device(visitor)\n return self\n\n def to_gpu(\n self,\n device=None, # type: tp.Optional[types.CudaDeviceSpec]\n ):\n # type: (...) -> 'DeviceResident'\n \"\"\"Copies parameter variables and persistent values to GPU.\n\n This method does not handle non-registered attributes. If some of such\n attributes must be copied to GPU, the link implementation must\n override :meth:`Link.to_device` to do so.\n\n Args:\n device: Target device specifier. If omitted, the current device is\n used.\n\n Returns: self\n\n \"\"\"\n cuda.check_cuda_available()\n cuda_device = cuda._get_device_or_current(device)\n device = chainer.backends.cuda.GpuDevice(cuda_device)\n visitor = _ToDeviceVisitor(\n device,\n entry_method_info=('to_gpu', {'device': device.device}),\n skip_between_cupy_devices=True,\n starting_device_resident=self)\n self.__to_device(visitor)\n return self\n\n def to_intel64(self):\n # type: () -> 'DeviceResident'\n \"\"\"Copies parameter variables and persistent values to CPU.\"\"\"\n intel64.check_ideep_available()\n visitor = _ToDeviceVisitor(\n chainer.get_device(intel64.Intel64Device()),\n entry_method_info=('to_intel64', {}),\n starting_device_resident=self)\n self.__to_device(visitor)\n return self\n\n @utils.final\n def to_chx(self):\n \"\"\"Converts parameter variables and persistent values to ChainerX \\\nwithout any copy.\n\n This method does not handle non-registered attributes. If some of such\n attributes must be copied to ChainerX, the link implementation must\n override this method to do so.\n\n Returns: self\n \"\"\"\n if not chainerx.is_available():\n raise RuntimeError('ChainerX is not available.')\n\n if self.xp is chainerx:\n return self\n\n self.device_resident_accept(_ToChxVisitor())\n return self\n\n @utils.final\n def from_chx(self):\n \"\"\"Converts parameter variables and persistent values from ChainerX \\\nto NumPy/CuPy devices without any copy.\"\"\"\n if isinstance(self._device, backend.ChainerxDevice):\n self._device = self._device.fallback_device\n\n self.device_resident_accept(_FromChxVisitor())\n return self\n\n def __to_device(self, to_device_visitor):\n self.device_resident_accept(to_device_visitor)\n\n @utils.final\n def to_device(\n self,\n device # type: types.DeviceSpec\n ):\n # type: (...) -> 'DeviceResident'\n \"\"\"Copies parameter variables and persistent values to the specified \\\ndevice.\n\n This method does not handle non-registered attributes. If some of such\n attributes must be copied to the device, the link implementation must\n override this method to do so.\n\n Args:\n device: Target device specifier. See\n :func:`~chainer.get_device` for available values.\n\n Returns: self\n\n \"\"\"\n device = chainer.get_device(device)\n self.__to_device(_ToDeviceVisitor(device))\n return self\n\n\ndef _is_to_device_method_overridden(device_resident, method_name):\n # Returns whether the specified to_device family method is overridden.\n to_method = getattr(device_resident, method_name, None).__func__\n to_method_orig = getattr(DeviceResident, method_name)\n if sys.version_info < (3,):\n to_method_orig = to_method_orig.__func__\n if to_method is not to_method_orig:\n return True # overridden\n return False\n\n\nclass DeviceResidentsVisitor(object):\n\n \"\"\"Base class of visitors that visits device resident objects recursively.\n \"\"\"\n\n def visit_device_resident(self, device_resident):\n \"\"\"Processes a :class:`DeviceResident` instance.\"\"\"\n raise NotImplementedError()\n\n def visit_array(self, arr):\n \"\"\"Processes an array and returns a new one.\n\n If the visitor does not create a new array, it can simply return the\n original array.\n \"\"\"\n raise NotImplementedError()\n\n def visit_variable(self, param):\n \"\"\"Processes a variable or a parameter.\"\"\"\n raise NotImplementedError()\n\n\nclass _ToDeviceVisitor(DeviceResidentsVisitor):\n # A visitor that implements recursive to_device().\n # For backward compatibility, if any of to_cpu/to_gpu/to_intel64 are\n # overridden on a device resident, this visitor calls it instead of\n # `visit_device_resident`. That's true even if `to_device` was originally\n # called.\n\n def __init__(\n self, device, entry_method_info=None,\n skip_between_cupy_devices=False,\n starting_device_resident=None):\n\n assert isinstance(device, chainer.backend.Device)\n\n # `entry_method_info` is for backward compatibility workaround for\n # overridden methods.\n # It indicates which method originally causes this visitor.\n # If it is any of the to_??? method names, descendant resident's\n # respective method will be called if it's overridden\n # (instead of `device_resident_accept`).\n if entry_method_info is not None:\n assert len(entry_method_info) == 2\n assert entry_method_info[0] in ('to_cpu', 'to_gpu', 'to_intel64')\n\n # starting_device_resident is also for backward compatibility\n # workaround for overridden methods.\n # It is a DeviceResident if to_xxx methods were initially called\n # on this visitor. This is used to avoid infinite accept-visit loop\n # that would occur by calling to_xxx methods.\n assert (starting_device_resident is None\n or isinstance(starting_device_resident, DeviceResident))\n\n self._device = device\n self._entry_method_info = entry_method_info\n self._skip_between_cupy_devices = skip_between_cupy_devices\n self._starting_device_resident = starting_device_resident\n\n def visit_device_resident(self, device_resident):\n device_resident._device = self._device\n\n # Backward compatibility workaround for overridden methods\n if device_resident._overridden_to_methods:\n # Skip this device resident, if the visitor was initially triggered\n # from it.\n if device_resident is self._starting_device_resident:\n return\n\n if self._entry_method_info is not None:\n # Deprecated method is being called: e.g. to_cpu and to_gpu.\n method_name, kwargs = self._entry_method_info\n else:\n # to_device is being called\n method_name, kwargs = (\n self._device_to_method_name_and_kwargs(self._device))\n if method_name in device_resident._overridden_to_methods:\n to_method = getattr(device_resident, method_name)\n to_method(**kwargs)\n return\n\n def _device_to_method_name_and_kwargs(self, device):\n # Converts a device instance to the corresponding combination of\n # to_??? method name and kwargs.\n\n # chainerx\n if device.xp is chainerx:\n return None, {}\n # cupy\n if device.xp is cuda.cupy:\n return 'to_gpu', {'device': device.device.id}\n # numpy\n assert device.xp is numpy\n if isinstance(device, _cpu.CpuDevice):\n return 'to_cpu', {}\n # intel64\n assert isinstance(device, intel64.Intel64Device)\n return 'to_intel64', {}\n\n def visit_array(self, arr):\n assert isinstance(arr, chainer.get_array_types())\n if not (self._skip_between_cupy_devices\n and self._device.xp is cuda.cupy\n and isinstance(arr, cuda.ndarray)):\n return self._device.send(arr)\n return arr\n\n def visit_variable(self, param):\n assert isinstance(param, chainer.Variable)\n if not (self._skip_between_cupy_devices\n and self._device.xp is cuda.cupy\n and param.device.xp is cuda.cupy):\n param.to_device(self._device)\n\n\nclass _ToChxVisitor(DeviceResidentsVisitor):\n # A visitor that recursively calls to_chx().\n\n def visit_device_resident(self, device_resident):\n device_resident._device = backend.ChainerxDevice.from_fallback_device(\n device_resident._device)\n\n def visit_array(self, arr):\n assert isinstance(arr, chainer.get_array_types())\n return backend.to_chx(arr)\n\n def visit_variable(self, param):\n assert isinstance(param, chainer.Variable)\n param.to_chx()\n\n\nclass _FromChxVisitor(DeviceResidentsVisitor):\n # A visitor that recursively calls from_chx().\n\n def visit_device_resident(self, device_resident):\n if isinstance(device_resident._device, backend.ChainerxDevice):\n device_resident._device = device_resident._device.fallback_device\n\n def visit_array(self, arr):\n assert isinstance(arr, chainer.get_array_types())\n return backend.from_chx(arr)\n\n def visit_variable(self, param):\n assert isinstance(param, chainer.Variable)\n param.from_chx()\n", "path": "chainer/device_resident.py"}]} | 3,685 | 659 |
gh_patches_debug_303 | rasdani/github-patches | git_diff | pyinstaller__pyinstaller-2347 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
gi._gobject.option is not part of pygobject
The [GObject hook](https://github.com/pyinstaller/pyinstaller/blob/develop/PyInstaller/hooks/hook-gi.repository.GObject.py) adds a `hiddenimport` for `gi._gobject.option` however `gi/_gobject/option.py` is not part of pygobject.
This leads to the following warning when packaging a Gtk application:
```
4813 INFO: Loading module hook "hook-gi.py"...
4818 INFO: Loading module hook "hook-gi.repository.GObject.py"...
4926 INFO: Processing pre-safe import module hook gi.repository.GLib
4963 WARNING: Hidden import "gi._gobject.option" not found!
```
Browsing through the [pygobject git history](https://git.gnome.org/browse/pygobject/), I find commit [8afd7e8](https://git.gnome.org/browse/pygobject/commit/gi/_option.py?id=8afd7e880a72a44e6ea46c763bab82146fd75c96) which moved `gi/_glib/option.py` into `gi/_option.py`
Replacing the `hiddenimport` to `hiddenimports += ['gi._option', 'gi._gobject']` silences the issue. However, I do not yet understand enough about pygobject and pyinstaller to know if this is the right thing to do.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `PyInstaller/hooks/hook-gi.repository.GObject.py`
Content:
```
1 #-----------------------------------------------------------------------------
2 # Copyright (c) 2005-2016, PyInstaller Development Team.
3 #
4 # Distributed under the terms of the GNU General Public License with exception
5 # for distributing bootloader.
6 #
7 # The full license is in the file COPYING.txt, distributed with this software.
8 #-----------------------------------------------------------------------------
9 """
10 Import hook for GObject https://developer.gnome.org/gobject/stable/ from the GLib
11 library https://wiki.gnome.org/Projects/GLib introspected through PyGobject https://wiki.gnome.org/PyGObject
12 via the GObject Introspection middleware layer https://wiki.gnome.org/Projects/GObjectIntrospection
13
14 Tested with GLib 2.44.1, PyGObject 3.16.2, and GObject Introspection 1.44.0 on Mac OS X 10.10 and
15 GLib 2.42.2, PyGObject 3.14.0, and GObject Introspection 1.42 on Windows 7
16 """
17
18 from PyInstaller.utils.hooks import get_gi_typelibs
19
20 binaries, datas, hiddenimports = get_gi_typelibs('GObject', '2.0')
21
22 hiddenimports += ['gi._gobject.option', 'gi._gobject']
23
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/PyInstaller/hooks/hook-gi.repository.GObject.py b/PyInstaller/hooks/hook-gi.repository.GObject.py
--- a/PyInstaller/hooks/hook-gi.repository.GObject.py
+++ b/PyInstaller/hooks/hook-gi.repository.GObject.py
@@ -19,4 +19,4 @@
binaries, datas, hiddenimports = get_gi_typelibs('GObject', '2.0')
-hiddenimports += ['gi._gobject.option', 'gi._gobject']
+hiddenimports += ['gi._gobject']
| {"golden_diff": "diff --git a/PyInstaller/hooks/hook-gi.repository.GObject.py b/PyInstaller/hooks/hook-gi.repository.GObject.py\n--- a/PyInstaller/hooks/hook-gi.repository.GObject.py\n+++ b/PyInstaller/hooks/hook-gi.repository.GObject.py\n@@ -19,4 +19,4 @@\n \n binaries, datas, hiddenimports = get_gi_typelibs('GObject', '2.0')\n \n-hiddenimports += ['gi._gobject.option', 'gi._gobject']\n+hiddenimports += ['gi._gobject']\n", "issue": "gi._gobject.option is not part of pygobject\nThe [GObject hook](https://github.com/pyinstaller/pyinstaller/blob/develop/PyInstaller/hooks/hook-gi.repository.GObject.py) adds a `hiddenimport` for `gi._gobject.option` however `gi/_gobject/option.py` is not part of pygobject.\r\n\r\nThis leads to the following warning when packaging a Gtk application:\r\n```\r\n4813 INFO: Loading module hook \"hook-gi.py\"...\r\n4818 INFO: Loading module hook \"hook-gi.repository.GObject.py\"...\r\n4926 INFO: Processing pre-safe import module hook gi.repository.GLib\r\n4963 WARNING: Hidden import \"gi._gobject.option\" not found!\r\n```\r\n\r\nBrowsing through the [pygobject git history](https://git.gnome.org/browse/pygobject/), I find commit [8afd7e8](https://git.gnome.org/browse/pygobject/commit/gi/_option.py?id=8afd7e880a72a44e6ea46c763bab82146fd75c96) which moved `gi/_glib/option.py` into `gi/_option.py`\r\n\r\nReplacing the `hiddenimport` to `hiddenimports += ['gi._option', 'gi._gobject']` silences the issue. However, I do not yet understand enough about pygobject and pyinstaller to know if this is the right thing to do.\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2005-2016, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License with exception\n# for distributing bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\"\"\"\nImport hook for GObject https://developer.gnome.org/gobject/stable/ from the GLib\nlibrary https://wiki.gnome.org/Projects/GLib introspected through PyGobject https://wiki.gnome.org/PyGObject\nvia the GObject Introspection middleware layer https://wiki.gnome.org/Projects/GObjectIntrospection\n\nTested with GLib 2.44.1, PyGObject 3.16.2, and GObject Introspection 1.44.0 on Mac OS X 10.10 and\nGLib 2.42.2, PyGObject 3.14.0, and GObject Introspection 1.42 on Windows 7\n\"\"\"\n\nfrom PyInstaller.utils.hooks import get_gi_typelibs\n\nbinaries, datas, hiddenimports = get_gi_typelibs('GObject', '2.0')\n\nhiddenimports += ['gi._gobject.option', 'gi._gobject']\n", "path": "PyInstaller/hooks/hook-gi.repository.GObject.py"}], "after_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2005-2016, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License with exception\n# for distributing bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\"\"\"\nImport hook for GObject https://developer.gnome.org/gobject/stable/ from the GLib\nlibrary https://wiki.gnome.org/Projects/GLib introspected through PyGobject https://wiki.gnome.org/PyGObject\nvia the GObject Introspection middleware layer https://wiki.gnome.org/Projects/GObjectIntrospection\n\nTested with GLib 2.44.1, PyGObject 3.16.2, and GObject Introspection 1.44.0 on Mac OS X 10.10 and\nGLib 2.42.2, PyGObject 3.14.0, and GObject Introspection 1.42 on Windows 7\n\"\"\"\n\nfrom PyInstaller.utils.hooks import get_gi_typelibs\n\nbinaries, datas, hiddenimports = get_gi_typelibs('GObject', '2.0')\n\nhiddenimports += ['gi._gobject']\n", "path": "PyInstaller/hooks/hook-gi.repository.GObject.py"}]} | 899 | 122 |
gh_patches_debug_45143 | rasdani/github-patches | git_diff | NVIDIA__NVFlare-353 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
stop_fl.sh does not terminate client side worker process
The stop_fl.sh just does `touch shutdown.fl` , wait 5 seconds, then kill the process of pid.fl.
The worker process of client is not killed / terminated.
Steps to reproduce:
1. Launch server (start.sh)
2. Launch client (start.sh)
3. Using Admin: Upload app
4. Using Admin: Deploy app
5. Using Admin: Start app
6. Use stop_fl.sh
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nvflare/private/fed/app/client/worker_process.py`
Content:
```
1 # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Provides a command line interface for a federated client trainer."""
16
17 import argparse
18 import os
19 import sys
20 import traceback
21
22 from nvflare.apis.fl_constant import FLContextKey
23 from nvflare.apis.workspace import Workspace
24 from nvflare.fuel.sec.security_content_service import SecurityContentService
25 from nvflare.fuel.utils.argument_utils import parse_vars
26 from nvflare.private.defs import EngineConstant
27 from nvflare.private.fed.app.fl_conf import FLClientStarterConfiger
28 from nvflare.private.fed.client.client_json_config import ClientJsonConfigurator
29 from nvflare.private.fed.client.client_run_manager import ClientRunManager
30 from nvflare.private.fed.client.client_runner import ClientRunner
31 from nvflare.private.fed.client.client_status import ClientStatus
32 from nvflare.private.fed.client.command_agent import CommandAgent
33
34
35 def main():
36 """Worker_process start program."""
37 parser = argparse.ArgumentParser()
38 parser.add_argument("--workspace", "-m", type=str, help="WORKSPACE folder", required=True)
39 parser.add_argument("--startup", "-w", type=str, help="startup folder", required=True)
40
41 parser.add_argument(
42 "--fed_client", "-s", type=str, help="an aggregation server specification json file", required=True
43 )
44
45 parser.add_argument("--set", metavar="KEY=VALUE", nargs="*")
46
47 parser.add_argument("--local_rank", type=int, default=0)
48
49 args = parser.parse_args()
50 kv_list = parse_vars(args.set)
51
52 args.train_config = os.path.join("config", "config_train.json")
53 config_folder = kv_list.get("config_folder", "")
54 secure_train = kv_list.get("secure_train", True)
55 if config_folder == "":
56 args.client_config = "config_fed_client.json"
57 else:
58 args.client_config = os.path.join(config_folder, "config_fed_client.json")
59 args.config_folder = config_folder
60 args.env = os.path.join("config", "environment.json")
61
62 try:
63 remove_restart_file(args)
64 except BaseException:
65 print("Could not remove the restart.fl / shutdown.fl file. Please check your system before starting FL.")
66 sys.exit(-1)
67
68 restart_file = os.path.join(args.workspace, "restart.fl")
69 if os.path.exists(restart_file):
70 os.remove(restart_file)
71
72 print("starting the client .....")
73
74 deployer = None
75 command_agent = None
76
77 startup = os.path.join(args.workspace, "startup")
78 SecurityContentService.initialize(content_folder=startup)
79
80 try:
81 token_file = os.path.join(args.workspace, EngineConstant.CLIENT_TOKEN_FILE)
82 with open(token_file, "r") as f:
83 token = f.readline().strip()
84 run_number = f.readline().strip()
85 client_name = f.readline().strip()
86 listen_port = f.readline().strip()
87 print(
88 "token is: {} run_number is: {} client_name: {} listen_port: {}".format(
89 token, run_number, client_name, listen_port
90 )
91 )
92
93 startup = args.startup
94 app_root = os.path.join(args.workspace, "run_" + str(run_number), "app_" + client_name)
95
96 app_log_config = os.path.join(app_root, config_folder, "log.config")
97 if os.path.exists(app_log_config):
98 args.log_config = app_log_config
99 else:
100 args.log_config = os.path.join(startup, "log.config")
101
102 conf = FLClientStarterConfiger(
103 app_root=startup,
104 client_config_file_name=args.fed_client,
105 log_config_file_name=args.log_config,
106 kv_list=args.set,
107 )
108 conf.configure()
109
110 deployer = conf.base_deployer
111 federated_client = deployer.create_fed_client()
112 federated_client.status = ClientStatus.STARTING
113
114 federated_client.token = token
115 federated_client.client_name = client_name
116 federated_client.fl_ctx.set_prop(FLContextKey.CLIENT_NAME, client_name, private=False)
117 federated_client.fl_ctx.set_prop(EngineConstant.FL_TOKEN, token, private=False)
118 federated_client.fl_ctx.set_prop(FLContextKey.WORKSPACE_ROOT, args.workspace, private=True)
119
120 client_config_file_name = os.path.join(app_root, args.client_config)
121 conf = ClientJsonConfigurator(
122 config_file_name=client_config_file_name,
123 )
124 conf.configure()
125
126 workspace = Workspace(args.workspace, client_name, config_folder)
127 run_manager = ClientRunManager(
128 client_name=client_name,
129 run_num=int(run_number),
130 workspace=workspace,
131 client=federated_client,
132 components=conf.runner_config.components,
133 handlers=conf.runner_config.handlers,
134 conf=conf,
135 )
136 federated_client.run_manager = run_manager
137
138 with run_manager.new_context() as fl_ctx:
139 fl_ctx.set_prop(FLContextKey.CLIENT_NAME, client_name, private=False)
140 fl_ctx.set_prop(EngineConstant.FL_TOKEN, token, private=False)
141 fl_ctx.set_prop(FLContextKey.WORKSPACE_ROOT, args.workspace, private=True)
142 fl_ctx.set_prop(FLContextKey.ARGS, args, sticky=True)
143 fl_ctx.set_prop(FLContextKey.APP_ROOT, app_root, private=True, sticky=True)
144 fl_ctx.set_prop(FLContextKey.WORKSPACE_OBJECT, workspace, private=True)
145 fl_ctx.set_prop(FLContextKey.SECURE_MODE, secure_train, private=True, sticky=True)
146
147 client_runner = ClientRunner(config=conf.runner_config, run_num=int(run_number), engine=run_manager)
148 run_manager.add_handler(client_runner)
149 fl_ctx.set_prop(FLContextKey.RUNNER, client_runner, private=True)
150
151 # # Start the thread for responding the inquire
152 # federated_client.stop_listen = False
153 # thread = threading.Thread(target=listen_command, args=[federated_client, int(listen_port), client_runner])
154 # thread.start()
155 # Start the command agent
156 command_agent = CommandAgent(federated_client, int(listen_port), client_runner)
157 command_agent.start(fl_ctx)
158
159 federated_client.status = ClientStatus.STARTED
160 client_runner.run(app_root, args)
161
162 except BaseException as e:
163 traceback.print_exc()
164 print("FL client execution exception: " + str(e))
165 finally:
166 # if federated_client:
167 # federated_client.stop_listen = True
168 # thread.join()
169 if command_agent:
170 command_agent.shutdown()
171 if deployer:
172 deployer.close()
173 # address = ('localhost', 6000)
174 # conn_client = Client(address, authkey='client process secret password'.encode())
175 # conn_client.send('bye')
176
177
178 def remove_restart_file(args):
179 """To remove the restart.fl file.
180
181 Args:
182 args: command args
183
184 """
185 restart_file = os.path.join(args.workspace, "restart.fl")
186 if os.path.exists(restart_file):
187 os.remove(restart_file)
188 restart_file = os.path.join(args.workspace, "shutdown.fl")
189 if os.path.exists(restart_file):
190 os.remove(restart_file)
191
192
193 if __name__ == "__main__":
194 """
195 This is the program when starting the child process for running the NVIDIA FLARE executor.
196 """
197
198 main()
199
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/nvflare/private/fed/app/client/worker_process.py b/nvflare/private/fed/app/client/worker_process.py
--- a/nvflare/private/fed/app/client/worker_process.py
+++ b/nvflare/private/fed/app/client/worker_process.py
@@ -17,8 +17,12 @@
import argparse
import os
import sys
+import threading
+import time
import traceback
+import psutil
+
from nvflare.apis.fl_constant import FLContextKey
from nvflare.apis.workspace import Workspace
from nvflare.fuel.sec.security_content_service import SecurityContentService
@@ -32,23 +36,33 @@
from nvflare.private.fed.client.command_agent import CommandAgent
+def check_parent_alive(parent_pid, stop_event: threading.Event):
+ while True:
+ if stop_event.is_set():
+ break
+ if not psutil.pid_exists(parent_pid):
+ # if parent is not alive, kill its worker process
+ os.killpg(os.getpgid(os.getpid()), 9)
+ break
+ time.sleep(1)
+
+
def main():
- """Worker_process start program."""
+ """Worker process start program."""
parser = argparse.ArgumentParser()
parser.add_argument("--workspace", "-m", type=str, help="WORKSPACE folder", required=True)
parser.add_argument("--startup", "-w", type=str, help="startup folder", required=True)
-
parser.add_argument(
"--fed_client", "-s", type=str, help="an aggregation server specification json file", required=True
)
-
parser.add_argument("--set", metavar="KEY=VALUE", nargs="*")
-
parser.add_argument("--local_rank", type=int, default=0)
-
args = parser.parse_args()
kv_list = parse_vars(args.set)
+ # get parent process id
+ parent_pid = os.getppid()
+
args.train_config = os.path.join("config", "config_train.json")
config_folder = kv_list.get("config_folder", "")
secure_train = kv_list.get("secure_train", True)
@@ -68,7 +82,6 @@
restart_file = os.path.join(args.workspace, "restart.fl")
if os.path.exists(restart_file):
os.remove(restart_file)
-
print("starting the client .....")
deployer = None
@@ -77,7 +90,13 @@
startup = os.path.join(args.workspace, "startup")
SecurityContentService.initialize(content_folder=startup)
+ federated_client = None
+ thread = None
+ stop_event = threading.Event()
try:
+ # start parent process checking thread
+ thread = threading.Thread(target=check_parent_alive, args=(parent_pid, stop_event))
+ thread.start()
token_file = os.path.join(args.workspace, EngineConstant.CLIENT_TOKEN_FILE)
with open(token_file, "r") as f:
token = f.readline().strip()
@@ -158,7 +177,6 @@
federated_client.status = ClientStatus.STARTED
client_runner.run(app_root, args)
-
except BaseException as e:
traceback.print_exc()
print("FL client execution exception: " + str(e))
@@ -166,10 +184,16 @@
# if federated_client:
# federated_client.stop_listen = True
# thread.join()
+ stop_event.set()
if command_agent:
command_agent.shutdown()
if deployer:
deployer.close()
+ if federated_client:
+ federated_client.close()
+ if thread and thread.is_alive():
+ thread.join()
+
# address = ('localhost', 6000)
# conn_client = Client(address, authkey='client process secret password'.encode())
# conn_client.send('bye')
@@ -177,10 +201,8 @@
def remove_restart_file(args):
"""To remove the restart.fl file.
-
Args:
args: command args
-
"""
restart_file = os.path.join(args.workspace, "restart.fl")
if os.path.exists(restart_file):
| {"golden_diff": "diff --git a/nvflare/private/fed/app/client/worker_process.py b/nvflare/private/fed/app/client/worker_process.py\n--- a/nvflare/private/fed/app/client/worker_process.py\n+++ b/nvflare/private/fed/app/client/worker_process.py\n@@ -17,8 +17,12 @@\n import argparse\n import os\n import sys\n+import threading\n+import time\n import traceback\n \n+import psutil\n+\n from nvflare.apis.fl_constant import FLContextKey\n from nvflare.apis.workspace import Workspace\n from nvflare.fuel.sec.security_content_service import SecurityContentService\n@@ -32,23 +36,33 @@\n from nvflare.private.fed.client.command_agent import CommandAgent\n \n \n+def check_parent_alive(parent_pid, stop_event: threading.Event):\n+ while True:\n+ if stop_event.is_set():\n+ break\n+ if not psutil.pid_exists(parent_pid):\n+ # if parent is not alive, kill its worker process\n+ os.killpg(os.getpgid(os.getpid()), 9)\n+ break\n+ time.sleep(1)\n+\n+\n def main():\n- \"\"\"Worker_process start program.\"\"\"\n+ \"\"\"Worker process start program.\"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--workspace\", \"-m\", type=str, help=\"WORKSPACE folder\", required=True)\n parser.add_argument(\"--startup\", \"-w\", type=str, help=\"startup folder\", required=True)\n-\n parser.add_argument(\n \"--fed_client\", \"-s\", type=str, help=\"an aggregation server specification json file\", required=True\n )\n-\n parser.add_argument(\"--set\", metavar=\"KEY=VALUE\", nargs=\"*\")\n-\n parser.add_argument(\"--local_rank\", type=int, default=0)\n-\n args = parser.parse_args()\n kv_list = parse_vars(args.set)\n \n+ # get parent process id\n+ parent_pid = os.getppid()\n+\n args.train_config = os.path.join(\"config\", \"config_train.json\")\n config_folder = kv_list.get(\"config_folder\", \"\")\n secure_train = kv_list.get(\"secure_train\", True)\n@@ -68,7 +82,6 @@\n restart_file = os.path.join(args.workspace, \"restart.fl\")\n if os.path.exists(restart_file):\n os.remove(restart_file)\n-\n print(\"starting the client .....\")\n \n deployer = None\n@@ -77,7 +90,13 @@\n startup = os.path.join(args.workspace, \"startup\")\n SecurityContentService.initialize(content_folder=startup)\n \n+ federated_client = None\n+ thread = None\n+ stop_event = threading.Event()\n try:\n+ # start parent process checking thread\n+ thread = threading.Thread(target=check_parent_alive, args=(parent_pid, stop_event))\n+ thread.start()\n token_file = os.path.join(args.workspace, EngineConstant.CLIENT_TOKEN_FILE)\n with open(token_file, \"r\") as f:\n token = f.readline().strip()\n@@ -158,7 +177,6 @@\n \n federated_client.status = ClientStatus.STARTED\n client_runner.run(app_root, args)\n-\n except BaseException as e:\n traceback.print_exc()\n print(\"FL client execution exception: \" + str(e))\n@@ -166,10 +184,16 @@\n # if federated_client:\n # federated_client.stop_listen = True\n # thread.join()\n+ stop_event.set()\n if command_agent:\n command_agent.shutdown()\n if deployer:\n deployer.close()\n+ if federated_client:\n+ federated_client.close()\n+ if thread and thread.is_alive():\n+ thread.join()\n+\n # address = ('localhost', 6000)\n # conn_client = Client(address, authkey='client process secret password'.encode())\n # conn_client.send('bye')\n@@ -177,10 +201,8 @@\n \n def remove_restart_file(args):\n \"\"\"To remove the restart.fl file.\n-\n Args:\n args: command args\n-\n \"\"\"\n restart_file = os.path.join(args.workspace, \"restart.fl\")\n if os.path.exists(restart_file):\n", "issue": "stop_fl.sh does not terminate client side worker process\nThe stop_fl.sh just does `touch shutdown.fl` , wait 5 seconds, then kill the process of pid.fl.\r\n\r\n\r\nThe worker process of client is not killed / terminated.\r\n\r\n\r\nSteps to reproduce:\r\n1. Launch server (start.sh)\r\n2. Launch client (start.sh)\r\n3. Using Admin: Upload app\r\n4. Using Admin: Deploy app\r\n5. Using Admin: Start app\r\n6. Use stop_fl.sh\n", "before_files": [{"content": "# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Provides a command line interface for a federated client trainer.\"\"\"\n\nimport argparse\nimport os\nimport sys\nimport traceback\n\nfrom nvflare.apis.fl_constant import FLContextKey\nfrom nvflare.apis.workspace import Workspace\nfrom nvflare.fuel.sec.security_content_service import SecurityContentService\nfrom nvflare.fuel.utils.argument_utils import parse_vars\nfrom nvflare.private.defs import EngineConstant\nfrom nvflare.private.fed.app.fl_conf import FLClientStarterConfiger\nfrom nvflare.private.fed.client.client_json_config import ClientJsonConfigurator\nfrom nvflare.private.fed.client.client_run_manager import ClientRunManager\nfrom nvflare.private.fed.client.client_runner import ClientRunner\nfrom nvflare.private.fed.client.client_status import ClientStatus\nfrom nvflare.private.fed.client.command_agent import CommandAgent\n\n\ndef main():\n \"\"\"Worker_process start program.\"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--workspace\", \"-m\", type=str, help=\"WORKSPACE folder\", required=True)\n parser.add_argument(\"--startup\", \"-w\", type=str, help=\"startup folder\", required=True)\n\n parser.add_argument(\n \"--fed_client\", \"-s\", type=str, help=\"an aggregation server specification json file\", required=True\n )\n\n parser.add_argument(\"--set\", metavar=\"KEY=VALUE\", nargs=\"*\")\n\n parser.add_argument(\"--local_rank\", type=int, default=0)\n\n args = parser.parse_args()\n kv_list = parse_vars(args.set)\n\n args.train_config = os.path.join(\"config\", \"config_train.json\")\n config_folder = kv_list.get(\"config_folder\", \"\")\n secure_train = kv_list.get(\"secure_train\", True)\n if config_folder == \"\":\n args.client_config = \"config_fed_client.json\"\n else:\n args.client_config = os.path.join(config_folder, \"config_fed_client.json\")\n args.config_folder = config_folder\n args.env = os.path.join(\"config\", \"environment.json\")\n\n try:\n remove_restart_file(args)\n except BaseException:\n print(\"Could not remove the restart.fl / shutdown.fl file. Please check your system before starting FL.\")\n sys.exit(-1)\n\n restart_file = os.path.join(args.workspace, \"restart.fl\")\n if os.path.exists(restart_file):\n os.remove(restart_file)\n\n print(\"starting the client .....\")\n\n deployer = None\n command_agent = None\n\n startup = os.path.join(args.workspace, \"startup\")\n SecurityContentService.initialize(content_folder=startup)\n\n try:\n token_file = os.path.join(args.workspace, EngineConstant.CLIENT_TOKEN_FILE)\n with open(token_file, \"r\") as f:\n token = f.readline().strip()\n run_number = f.readline().strip()\n client_name = f.readline().strip()\n listen_port = f.readline().strip()\n print(\n \"token is: {} run_number is: {} client_name: {} listen_port: {}\".format(\n token, run_number, client_name, listen_port\n )\n )\n\n startup = args.startup\n app_root = os.path.join(args.workspace, \"run_\" + str(run_number), \"app_\" + client_name)\n\n app_log_config = os.path.join(app_root, config_folder, \"log.config\")\n if os.path.exists(app_log_config):\n args.log_config = app_log_config\n else:\n args.log_config = os.path.join(startup, \"log.config\")\n\n conf = FLClientStarterConfiger(\n app_root=startup,\n client_config_file_name=args.fed_client,\n log_config_file_name=args.log_config,\n kv_list=args.set,\n )\n conf.configure()\n\n deployer = conf.base_deployer\n federated_client = deployer.create_fed_client()\n federated_client.status = ClientStatus.STARTING\n\n federated_client.token = token\n federated_client.client_name = client_name\n federated_client.fl_ctx.set_prop(FLContextKey.CLIENT_NAME, client_name, private=False)\n federated_client.fl_ctx.set_prop(EngineConstant.FL_TOKEN, token, private=False)\n federated_client.fl_ctx.set_prop(FLContextKey.WORKSPACE_ROOT, args.workspace, private=True)\n\n client_config_file_name = os.path.join(app_root, args.client_config)\n conf = ClientJsonConfigurator(\n config_file_name=client_config_file_name,\n )\n conf.configure()\n\n workspace = Workspace(args.workspace, client_name, config_folder)\n run_manager = ClientRunManager(\n client_name=client_name,\n run_num=int(run_number),\n workspace=workspace,\n client=federated_client,\n components=conf.runner_config.components,\n handlers=conf.runner_config.handlers,\n conf=conf,\n )\n federated_client.run_manager = run_manager\n\n with run_manager.new_context() as fl_ctx:\n fl_ctx.set_prop(FLContextKey.CLIENT_NAME, client_name, private=False)\n fl_ctx.set_prop(EngineConstant.FL_TOKEN, token, private=False)\n fl_ctx.set_prop(FLContextKey.WORKSPACE_ROOT, args.workspace, private=True)\n fl_ctx.set_prop(FLContextKey.ARGS, args, sticky=True)\n fl_ctx.set_prop(FLContextKey.APP_ROOT, app_root, private=True, sticky=True)\n fl_ctx.set_prop(FLContextKey.WORKSPACE_OBJECT, workspace, private=True)\n fl_ctx.set_prop(FLContextKey.SECURE_MODE, secure_train, private=True, sticky=True)\n\n client_runner = ClientRunner(config=conf.runner_config, run_num=int(run_number), engine=run_manager)\n run_manager.add_handler(client_runner)\n fl_ctx.set_prop(FLContextKey.RUNNER, client_runner, private=True)\n\n # # Start the thread for responding the inquire\n # federated_client.stop_listen = False\n # thread = threading.Thread(target=listen_command, args=[federated_client, int(listen_port), client_runner])\n # thread.start()\n # Start the command agent\n command_agent = CommandAgent(federated_client, int(listen_port), client_runner)\n command_agent.start(fl_ctx)\n\n federated_client.status = ClientStatus.STARTED\n client_runner.run(app_root, args)\n\n except BaseException as e:\n traceback.print_exc()\n print(\"FL client execution exception: \" + str(e))\n finally:\n # if federated_client:\n # federated_client.stop_listen = True\n # thread.join()\n if command_agent:\n command_agent.shutdown()\n if deployer:\n deployer.close()\n # address = ('localhost', 6000)\n # conn_client = Client(address, authkey='client process secret password'.encode())\n # conn_client.send('bye')\n\n\ndef remove_restart_file(args):\n \"\"\"To remove the restart.fl file.\n\n Args:\n args: command args\n\n \"\"\"\n restart_file = os.path.join(args.workspace, \"restart.fl\")\n if os.path.exists(restart_file):\n os.remove(restart_file)\n restart_file = os.path.join(args.workspace, \"shutdown.fl\")\n if os.path.exists(restart_file):\n os.remove(restart_file)\n\n\nif __name__ == \"__main__\":\n \"\"\"\n This is the program when starting the child process for running the NVIDIA FLARE executor.\n \"\"\"\n\n main()\n", "path": "nvflare/private/fed/app/client/worker_process.py"}], "after_files": [{"content": "# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Provides a command line interface for a federated client trainer.\"\"\"\n\nimport argparse\nimport os\nimport sys\nimport threading\nimport time\nimport traceback\n\nimport psutil\n\nfrom nvflare.apis.fl_constant import FLContextKey\nfrom nvflare.apis.workspace import Workspace\nfrom nvflare.fuel.sec.security_content_service import SecurityContentService\nfrom nvflare.fuel.utils.argument_utils import parse_vars\nfrom nvflare.private.defs import EngineConstant\nfrom nvflare.private.fed.app.fl_conf import FLClientStarterConfiger\nfrom nvflare.private.fed.client.client_json_config import ClientJsonConfigurator\nfrom nvflare.private.fed.client.client_run_manager import ClientRunManager\nfrom nvflare.private.fed.client.client_runner import ClientRunner\nfrom nvflare.private.fed.client.client_status import ClientStatus\nfrom nvflare.private.fed.client.command_agent import CommandAgent\n\n\ndef check_parent_alive(parent_pid, stop_event: threading.Event):\n while True:\n if stop_event.is_set():\n break\n if not psutil.pid_exists(parent_pid):\n # if parent is not alive, kill its worker process\n os.killpg(os.getpgid(os.getpid()), 9)\n break\n time.sleep(1)\n\n\ndef main():\n \"\"\"Worker process start program.\"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--workspace\", \"-m\", type=str, help=\"WORKSPACE folder\", required=True)\n parser.add_argument(\"--startup\", \"-w\", type=str, help=\"startup folder\", required=True)\n parser.add_argument(\n \"--fed_client\", \"-s\", type=str, help=\"an aggregation server specification json file\", required=True\n )\n parser.add_argument(\"--set\", metavar=\"KEY=VALUE\", nargs=\"*\")\n parser.add_argument(\"--local_rank\", type=int, default=0)\n args = parser.parse_args()\n kv_list = parse_vars(args.set)\n\n # get parent process id\n parent_pid = os.getppid()\n\n args.train_config = os.path.join(\"config\", \"config_train.json\")\n config_folder = kv_list.get(\"config_folder\", \"\")\n secure_train = kv_list.get(\"secure_train\", True)\n if config_folder == \"\":\n args.client_config = \"config_fed_client.json\"\n else:\n args.client_config = os.path.join(config_folder, \"config_fed_client.json\")\n args.config_folder = config_folder\n args.env = os.path.join(\"config\", \"environment.json\")\n\n try:\n remove_restart_file(args)\n except BaseException:\n print(\"Could not remove the restart.fl / shutdown.fl file. Please check your system before starting FL.\")\n sys.exit(-1)\n\n restart_file = os.path.join(args.workspace, \"restart.fl\")\n if os.path.exists(restart_file):\n os.remove(restart_file)\n print(\"starting the client .....\")\n\n deployer = None\n command_agent = None\n\n startup = os.path.join(args.workspace, \"startup\")\n SecurityContentService.initialize(content_folder=startup)\n\n federated_client = None\n thread = None\n stop_event = threading.Event()\n try:\n # start parent process checking thread\n thread = threading.Thread(target=check_parent_alive, args=(parent_pid, stop_event))\n thread.start()\n token_file = os.path.join(args.workspace, EngineConstant.CLIENT_TOKEN_FILE)\n with open(token_file, \"r\") as f:\n token = f.readline().strip()\n run_number = f.readline().strip()\n client_name = f.readline().strip()\n listen_port = f.readline().strip()\n print(\n \"token is: {} run_number is: {} client_name: {} listen_port: {}\".format(\n token, run_number, client_name, listen_port\n )\n )\n\n startup = args.startup\n app_root = os.path.join(args.workspace, \"run_\" + str(run_number), \"app_\" + client_name)\n\n app_log_config = os.path.join(app_root, config_folder, \"log.config\")\n if os.path.exists(app_log_config):\n args.log_config = app_log_config\n else:\n args.log_config = os.path.join(startup, \"log.config\")\n\n conf = FLClientStarterConfiger(\n app_root=startup,\n client_config_file_name=args.fed_client,\n log_config_file_name=args.log_config,\n kv_list=args.set,\n )\n conf.configure()\n\n deployer = conf.base_deployer\n federated_client = deployer.create_fed_client()\n federated_client.status = ClientStatus.STARTING\n\n federated_client.token = token\n federated_client.client_name = client_name\n federated_client.fl_ctx.set_prop(FLContextKey.CLIENT_NAME, client_name, private=False)\n federated_client.fl_ctx.set_prop(EngineConstant.FL_TOKEN, token, private=False)\n federated_client.fl_ctx.set_prop(FLContextKey.WORKSPACE_ROOT, args.workspace, private=True)\n\n client_config_file_name = os.path.join(app_root, args.client_config)\n conf = ClientJsonConfigurator(\n config_file_name=client_config_file_name,\n )\n conf.configure()\n\n workspace = Workspace(args.workspace, client_name, config_folder)\n run_manager = ClientRunManager(\n client_name=client_name,\n run_num=int(run_number),\n workspace=workspace,\n client=federated_client,\n components=conf.runner_config.components,\n handlers=conf.runner_config.handlers,\n conf=conf,\n )\n federated_client.run_manager = run_manager\n\n with run_manager.new_context() as fl_ctx:\n fl_ctx.set_prop(FLContextKey.CLIENT_NAME, client_name, private=False)\n fl_ctx.set_prop(EngineConstant.FL_TOKEN, token, private=False)\n fl_ctx.set_prop(FLContextKey.WORKSPACE_ROOT, args.workspace, private=True)\n fl_ctx.set_prop(FLContextKey.ARGS, args, sticky=True)\n fl_ctx.set_prop(FLContextKey.APP_ROOT, app_root, private=True, sticky=True)\n fl_ctx.set_prop(FLContextKey.WORKSPACE_OBJECT, workspace, private=True)\n fl_ctx.set_prop(FLContextKey.SECURE_MODE, secure_train, private=True, sticky=True)\n\n client_runner = ClientRunner(config=conf.runner_config, run_num=int(run_number), engine=run_manager)\n run_manager.add_handler(client_runner)\n fl_ctx.set_prop(FLContextKey.RUNNER, client_runner, private=True)\n\n # # Start the thread for responding the inquire\n # federated_client.stop_listen = False\n # thread = threading.Thread(target=listen_command, args=[federated_client, int(listen_port), client_runner])\n # thread.start()\n # Start the command agent\n command_agent = CommandAgent(federated_client, int(listen_port), client_runner)\n command_agent.start(fl_ctx)\n\n federated_client.status = ClientStatus.STARTED\n client_runner.run(app_root, args)\n except BaseException as e:\n traceback.print_exc()\n print(\"FL client execution exception: \" + str(e))\n finally:\n # if federated_client:\n # federated_client.stop_listen = True\n # thread.join()\n stop_event.set()\n if command_agent:\n command_agent.shutdown()\n if deployer:\n deployer.close()\n if federated_client:\n federated_client.close()\n if thread and thread.is_alive():\n thread.join()\n\n # address = ('localhost', 6000)\n # conn_client = Client(address, authkey='client process secret password'.encode())\n # conn_client.send('bye')\n\n\ndef remove_restart_file(args):\n \"\"\"To remove the restart.fl file.\n Args:\n args: command args\n \"\"\"\n restart_file = os.path.join(args.workspace, \"restart.fl\")\n if os.path.exists(restart_file):\n os.remove(restart_file)\n restart_file = os.path.join(args.workspace, \"shutdown.fl\")\n if os.path.exists(restart_file):\n os.remove(restart_file)\n\n\nif __name__ == \"__main__\":\n \"\"\"\n This is the program when starting the child process for running the NVIDIA FLARE executor.\n \"\"\"\n\n main()\n", "path": "nvflare/private/fed/app/client/worker_process.py"}]} | 2,559 | 909 |
gh_patches_debug_3782 | rasdani/github-patches | git_diff | bookwyrm-social__bookwyrm-2314 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Normalize stored ISNI
The `ISNI` field should always be in the same format (without spaces), but right now sometimes the field is stored with spaces between the digits. There should be validation when the author is saved that cleans this value, similar to how ISBNs are validated
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bookwyrm/models/author.py`
Content:
```
1 """ database schema for info about authors """
2 import re
3 from django.contrib.postgres.indexes import GinIndex
4 from django.core.cache import cache
5 from django.core.cache.utils import make_template_fragment_key
6 from django.db import models
7
8 from bookwyrm import activitypub
9 from bookwyrm.settings import DOMAIN
10
11 from .book import BookDataModel
12 from . import fields
13
14
15 class Author(BookDataModel):
16 """basic biographic info"""
17
18 wikipedia_link = fields.CharField(
19 max_length=255, blank=True, null=True, deduplication_field=True
20 )
21 isni = fields.CharField(
22 max_length=255, blank=True, null=True, deduplication_field=True
23 )
24 gutenberg_id = fields.CharField(
25 max_length=255, blank=True, null=True, deduplication_field=True
26 )
27 # idk probably other keys would be useful here?
28 born = fields.DateTimeField(blank=True, null=True)
29 died = fields.DateTimeField(blank=True, null=True)
30 name = fields.CharField(max_length=255)
31 aliases = fields.ArrayField(
32 models.CharField(max_length=255), blank=True, default=list
33 )
34 bio = fields.HtmlField(null=True, blank=True)
35
36 def save(self, *args, **kwargs):
37 """clear related template caches"""
38 # clear template caches
39 if self.id:
40 cache_keys = [
41 make_template_fragment_key("titleby", [book])
42 for book in self.book_set.values_list("id", flat=True)
43 ]
44 cache.delete_many(cache_keys)
45 return super().save(*args, **kwargs)
46
47 @property
48 def isni_link(self):
49 """generate the url from the isni id"""
50 clean_isni = re.sub(r"\s", "", self.isni)
51 return f"https://isni.org/isni/{clean_isni}"
52
53 @property
54 def openlibrary_link(self):
55 """generate the url from the openlibrary id"""
56 return f"https://openlibrary.org/authors/{self.openlibrary_key}"
57
58 def get_remote_id(self):
59 """editions and works both use "book" instead of model_name"""
60 return f"https://{DOMAIN}/author/{self.id}"
61
62 activity_serializer = activitypub.Author
63
64 class Meta:
65 """sets up postgres GIN index field"""
66
67 indexes = (GinIndex(fields=["search_vector"]),)
68
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bookwyrm/models/author.py b/bookwyrm/models/author.py
--- a/bookwyrm/models/author.py
+++ b/bookwyrm/models/author.py
@@ -42,6 +42,11 @@
for book in self.book_set.values_list("id", flat=True)
]
cache.delete_many(cache_keys)
+
+ # normalize isni format
+ if self.isni:
+ self.isni = re.sub(r"\s", "", self.isni)
+
return super().save(*args, **kwargs)
@property
| {"golden_diff": "diff --git a/bookwyrm/models/author.py b/bookwyrm/models/author.py\n--- a/bookwyrm/models/author.py\n+++ b/bookwyrm/models/author.py\n@@ -42,6 +42,11 @@\n for book in self.book_set.values_list(\"id\", flat=True)\n ]\n cache.delete_many(cache_keys)\n+\n+ # normalize isni format\n+ if self.isni:\n+ self.isni = re.sub(r\"\\s\", \"\", self.isni)\n+\n return super().save(*args, **kwargs)\n \n @property\n", "issue": "Normalize stored ISNI\nThe `ISNI` field should always be in the same format (without spaces), but right now sometimes the field is stored with spaces between the digits. There should be validation when the author is saved that cleans this value, similar to how ISBNs are validated\n", "before_files": [{"content": "\"\"\" database schema for info about authors \"\"\"\nimport re\nfrom django.contrib.postgres.indexes import GinIndex\nfrom django.core.cache import cache\nfrom django.core.cache.utils import make_template_fragment_key\nfrom django.db import models\n\nfrom bookwyrm import activitypub\nfrom bookwyrm.settings import DOMAIN\n\nfrom .book import BookDataModel\nfrom . import fields\n\n\nclass Author(BookDataModel):\n \"\"\"basic biographic info\"\"\"\n\n wikipedia_link = fields.CharField(\n max_length=255, blank=True, null=True, deduplication_field=True\n )\n isni = fields.CharField(\n max_length=255, blank=True, null=True, deduplication_field=True\n )\n gutenberg_id = fields.CharField(\n max_length=255, blank=True, null=True, deduplication_field=True\n )\n # idk probably other keys would be useful here?\n born = fields.DateTimeField(blank=True, null=True)\n died = fields.DateTimeField(blank=True, null=True)\n name = fields.CharField(max_length=255)\n aliases = fields.ArrayField(\n models.CharField(max_length=255), blank=True, default=list\n )\n bio = fields.HtmlField(null=True, blank=True)\n\n def save(self, *args, **kwargs):\n \"\"\"clear related template caches\"\"\"\n # clear template caches\n if self.id:\n cache_keys = [\n make_template_fragment_key(\"titleby\", [book])\n for book in self.book_set.values_list(\"id\", flat=True)\n ]\n cache.delete_many(cache_keys)\n return super().save(*args, **kwargs)\n\n @property\n def isni_link(self):\n \"\"\"generate the url from the isni id\"\"\"\n clean_isni = re.sub(r\"\\s\", \"\", self.isni)\n return f\"https://isni.org/isni/{clean_isni}\"\n\n @property\n def openlibrary_link(self):\n \"\"\"generate the url from the openlibrary id\"\"\"\n return f\"https://openlibrary.org/authors/{self.openlibrary_key}\"\n\n def get_remote_id(self):\n \"\"\"editions and works both use \"book\" instead of model_name\"\"\"\n return f\"https://{DOMAIN}/author/{self.id}\"\n\n activity_serializer = activitypub.Author\n\n class Meta:\n \"\"\"sets up postgres GIN index field\"\"\"\n\n indexes = (GinIndex(fields=[\"search_vector\"]),)\n", "path": "bookwyrm/models/author.py"}], "after_files": [{"content": "\"\"\" database schema for info about authors \"\"\"\nimport re\nfrom django.contrib.postgres.indexes import GinIndex\nfrom django.core.cache import cache\nfrom django.core.cache.utils import make_template_fragment_key\nfrom django.db import models\n\nfrom bookwyrm import activitypub\nfrom bookwyrm.settings import DOMAIN\n\nfrom .book import BookDataModel\nfrom . import fields\n\n\nclass Author(BookDataModel):\n \"\"\"basic biographic info\"\"\"\n\n wikipedia_link = fields.CharField(\n max_length=255, blank=True, null=True, deduplication_field=True\n )\n isni = fields.CharField(\n max_length=255, blank=True, null=True, deduplication_field=True\n )\n gutenberg_id = fields.CharField(\n max_length=255, blank=True, null=True, deduplication_field=True\n )\n # idk probably other keys would be useful here?\n born = fields.DateTimeField(blank=True, null=True)\n died = fields.DateTimeField(blank=True, null=True)\n name = fields.CharField(max_length=255)\n aliases = fields.ArrayField(\n models.CharField(max_length=255), blank=True, default=list\n )\n bio = fields.HtmlField(null=True, blank=True)\n\n def save(self, *args, **kwargs):\n \"\"\"clear related template caches\"\"\"\n # clear template caches\n if self.id:\n cache_keys = [\n make_template_fragment_key(\"titleby\", [book])\n for book in self.book_set.values_list(\"id\", flat=True)\n ]\n cache.delete_many(cache_keys)\n\n # normalize isni format\n if self.isni:\n self.isni = re.sub(r\"\\s\", \"\", self.isni)\n\n return super().save(*args, **kwargs)\n\n @property\n def isni_link(self):\n \"\"\"generate the url from the isni id\"\"\"\n clean_isni = re.sub(r\"\\s\", \"\", self.isni)\n return f\"https://isni.org/isni/{clean_isni}\"\n\n @property\n def openlibrary_link(self):\n \"\"\"generate the url from the openlibrary id\"\"\"\n return f\"https://openlibrary.org/authors/{self.openlibrary_key}\"\n\n def get_remote_id(self):\n \"\"\"editions and works both use \"book\" instead of model_name\"\"\"\n return f\"https://{DOMAIN}/author/{self.id}\"\n\n activity_serializer = activitypub.Author\n\n class Meta:\n \"\"\"sets up postgres GIN index field\"\"\"\n\n indexes = (GinIndex(fields=[\"search_vector\"]),)\n", "path": "bookwyrm/models/author.py"}]} | 959 | 127 |
gh_patches_debug_35845 | rasdani/github-patches | git_diff | dask__distributed-210 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
pandas is now a mandatory dependency for dscheduler
If pandas is not installed I get an infinite loop with the following logs:
```
tornado.httpclient.HTTPError: HTTP 500: Internal Server Error
bokeh.util.tornado - ERROR - Error thrown from periodic callback: HTTPError(500, 'Internal Server Error', HTTPResponse(_body=None,buffer=<_io.BytesIO object at 0x7f17202a1e08>,code=500,effective_url='http://localhost:9786/tasks.json',error=HTTPError(...),headers=<tornado.httputil.HTTPHeaders object at 0x7f17204ffe48>,reason='Internal Server Error',request=<tornado.httpclient.HTTPRequest object at 0x7f1720578da0>,request_time=0.005105733871459961,time_info={}))
distributed.utils - ERROR - No module named 'pandas'
Traceback (most recent call last):
File "/volatile/ogrisel/code/distributed/distributed/utils.py", line 229, in log_errors
yield
File "/volatile/ogrisel/code/distributed/distributed/http/scheduler.py", line 79, in get
from ..diagnostics.scheduler import tasks
File "/volatile/ogrisel/code/distributed/distributed/diagnostics/scheduler.py", line 6, in <module>
import pandas as pd
ImportError: No module named 'pandas'
```
If think this is not intended? The diagnostics callback should probably be disabled when pandas is not available, no?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `distributed/formats/csv.py`
Content:
```
1 from __future__ import print_function, division, absolute_import
2
3 from io import BytesIO
4
5 from dask import do
6 from dask.dataframe import from_imperative
7 import pandas as pd
8
9 from .compression import compressors, decompressors
10
11 from ..executor import default_executor, ensure_default_get
12 from ..utils import ensure_bytes, log_errors
13
14
15 def bytes_read_csv(b, header, kwargs):
16 """ Convert a block of bytes to a Pandas DataFrame
17
18 Parameters
19 ----------
20 b: bytestring
21 The content to be parsed with pandas.read_csv
22 header: bytestring
23 An optional header to prepend to b
24 kwargs: dict
25 A dictionary of keyword arguments to be passed to pandas.read_csv
26
27 See Also:
28 distributed.formats.csv.read_csv
29 """
30 with log_errors():
31 compression = kwargs.pop('compression', None)
32 b2 = decompressors[compression](b)
33 bio = BytesIO()
34 if header:
35 if not header.endswith(b'\n') and not header.endswith(b'\r'):
36 header = header + ensure_bytes(kwargs.get('lineterminator', b'\n'))
37 bio.write(header)
38 bio.write(b2)
39 bio.seek(0)
40 return pd.read_csv(bio, **kwargs)
41
42
43 def read_csv(block_lists, header, head, kwargs, lazy=True, collection=True,
44 executor=None):
45 """ Convert blocks of bytes to a dask.dataframe or other high-level object
46
47 This accepts a list of lists of futures/values of bytes where each list
48 corresponds to one file, and the futures/values of bytes concatenate to
49 comprise the entire file, in order.
50
51 Parameters
52 ----------
53 block_lists: list of lists of futures of bytes
54 The lists of bytestrings with each list corresponding to one logical file
55 header: bytestring
56 The header, found at the front of the first file, to be prepended to
57 all blocks
58 head: pd.DataFrame
59 An example Pandas DataFrame to be used for metadata
60 kwargs: dict
61 Keyword arguments to pass down to ``pd.read_csv``
62 lazy: boolean, optional (defaults to True)
63 collection: boolean, optional (defaults to True)
64
65 Returns
66 -------
67 A dask.dataframe, or list of futures or values, depending on the value of
68 lazy and collection.
69 """
70 executor = default_executor(executor)
71
72 dfs1 = [[do(bytes_read_csv)(blocks[0], '', kwargs)] +
73 [do(bytes_read_csv)(b, header, kwargs)
74 for b in blocks[1:]]
75 for blocks in block_lists]
76 dfs2 = sum(dfs1, [])
77
78 ensure_default_get(executor)
79
80 if collection:
81 result = from_imperative(dfs2, head)
82 else:
83 result = dfs2
84
85 if not lazy:
86 if collection:
87 result = executor.persist(result)
88 else:
89 result = executor.compute(result)
90
91 return result
92
```
Path: `distributed/diagnostics/scheduler.py`
Content:
```
1 from __future__ import print_function, division, absolute_import
2
3 from datetime import datetime
4 import os
5
6 import pandas as pd
7 from toolz import countby, concat, dissoc
8
9 from ..utils import key_split
10
11
12 def tasks(s):
13 """ Task and worker status of scheduler """
14 processing = sum(map(len, s.processing.values()))
15
16 return {'processing': processing,
17 'total': len(s.tasks),
18 'in-memory': len(s.who_has),
19 'ready': len(s.ready)
20 + sum(map(len, s.stacks.values())),
21 'waiting': len(s.waiting),
22 'failed': len(s.exceptions_blame)}
23
24
25 def workers(s):
26 """ Information about workers
27
28 Examples
29 --------
30 >>> workers(my_scheduler) # doctest: +SKIP
31 {'127.0.0.1': {'cores': 3,
32 'cpu': 0.0,
33 'last-seen': 0.003068,
34 'latency': 0.01584628690034151,
35 'ports': ['54871', '50943'],
36 'processing': {'inc': 2, 'add': 1},
37 'disk-read': 1234,
38 'disk-write': 1234,
39 'network-send': 1234,
40 'network-recv': 1234,
41 'memory': 16701911040,
42 'memory-percent': 85}}
43 """
44 hosts = {host: ['%s:%s' % (host, port) for port in d['ports']]
45 for host, d in s.host_info.items()}
46
47 processing = {host: countby(key_split, concat(s.processing[w] for w in addrs))
48 for host, addrs in hosts.items()}
49
50 now = datetime.now()
51
52 result = {}
53 for host, info in s.host_info.items():
54 info = dissoc(info, 'heartbeat', 'heartbeat-port')
55 info['processing'] = processing[host]
56 result[host] = info
57 info['ports'] = list(info['ports'])
58 if 'last-seen' in info:
59 info['last-seen'] = (now - info['last-seen']).total_seconds()
60
61 return result
62
63
64 def scheduler_progress_df(d):
65 """ Convert status response to DataFrame of total progress
66
67 Consumes dictionary from status.json route
68
69 Examples
70 --------
71 >>> d = {"ready": 5, "in-memory": 30, "waiting": 20,
72 ... "tasks": 70, "failed": 9,
73 ... "processing": 6,
74 ... "other-keys-are-fine-too": ''}
75
76 >>> scheduler_progress_df(d) # doctest: +SKIP
77 Count Progress
78 Tasks
79 waiting 20 +++++++++++
80 ready 5 ++
81 failed 9 +++++
82 processing 6 +++
83 in-memory 30 +++++++++++++++++
84 total 70 ++++++++++++++++++++++++++++++++++++++++
85 """
86 d = d.copy()
87 d['total'] = d.pop('tasks')
88 names = ['waiting', 'ready', 'failed', 'processing', 'in-memory', 'total']
89 df = pd.DataFrame(pd.Series({k: d[k] for k in names},
90 index=names, name='Count'))
91 if d['total']:
92 barlength = (40 * df.Count / d['total']).astype(int)
93 df['Progress'] = barlength.apply(lambda n: ('%-40s' % (n * '+').rstrip(' ')))
94 else:
95 df['Progress'] = 0
96
97 df.index.name = 'Tasks'
98
99 return df
100
101
102 def worker_status_df(d):
103 """ Status of workers as a Pandas DataFrame
104
105 Consumes data from status.json route.
106
107 Examples
108 --------
109 >>> d = {"other-keys-are-fine-too": '',
110 ... "ncores": {"192.168.1.107": 4,
111 ... "192.168.1.108": 4},
112 ... "processing": {"192.168.1.108": {'inc': 3, 'add': 1},
113 ... "192.168.1.107": {'inc': 2}},
114 ... "bytes": {"192.168.1.108": 1000,
115 ... "192.168.1.107": 2000}}
116
117 >>> worker_status_df(d)
118 Ncores Bytes Processing
119 Workers
120 192.168.1.107 4 2000 [inc]
121 192.168.1.108 4 1000 [add, inc]
122 """
123 names = ['ncores', 'bytes', 'processing']
124 df = pd.DataFrame({k: d[k] for k in names}, columns=names)
125 df['processing'] = df['processing'].apply(sorted)
126 df.columns = df.columns.map(str.title)
127 df.index.name = 'Workers'
128 df = df.sort_index()
129 return df
130
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/distributed/diagnostics/scheduler.py b/distributed/diagnostics/scheduler.py
--- a/distributed/diagnostics/scheduler.py
+++ b/distributed/diagnostics/scheduler.py
@@ -3,7 +3,6 @@
from datetime import datetime
import os
-import pandas as pd
from toolz import countby, concat, dissoc
from ..utils import key_split
@@ -83,6 +82,7 @@
in-memory 30 +++++++++++++++++
total 70 ++++++++++++++++++++++++++++++++++++++++
"""
+ import pandas as pd
d = d.copy()
d['total'] = d.pop('tasks')
names = ['waiting', 'ready', 'failed', 'processing', 'in-memory', 'total']
@@ -120,6 +120,7 @@
192.168.1.107 4 2000 [inc]
192.168.1.108 4 1000 [add, inc]
"""
+ import pandas as pd
names = ['ncores', 'bytes', 'processing']
df = pd.DataFrame({k: d[k] for k in names}, columns=names)
df['processing'] = df['processing'].apply(sorted)
diff --git a/distributed/formats/csv.py b/distributed/formats/csv.py
--- a/distributed/formats/csv.py
+++ b/distributed/formats/csv.py
@@ -3,8 +3,6 @@
from io import BytesIO
from dask import do
-from dask.dataframe import from_imperative
-import pandas as pd
from .compression import compressors, decompressors
@@ -27,6 +25,7 @@
See Also:
distributed.formats.csv.read_csv
"""
+ import pandas as pd
with log_errors():
compression = kwargs.pop('compression', None)
b2 = decompressors[compression](b)
@@ -67,6 +66,7 @@
A dask.dataframe, or list of futures or values, depending on the value of
lazy and collection.
"""
+ from dask.dataframe import from_imperative
executor = default_executor(executor)
dfs1 = [[do(bytes_read_csv)(blocks[0], '', kwargs)] +
| {"golden_diff": "diff --git a/distributed/diagnostics/scheduler.py b/distributed/diagnostics/scheduler.py\n--- a/distributed/diagnostics/scheduler.py\n+++ b/distributed/diagnostics/scheduler.py\n@@ -3,7 +3,6 @@\n from datetime import datetime\n import os\n \n-import pandas as pd\n from toolz import countby, concat, dissoc\n \n from ..utils import key_split\n@@ -83,6 +82,7 @@\n in-memory 30 +++++++++++++++++\n total 70 ++++++++++++++++++++++++++++++++++++++++\n \"\"\"\n+ import pandas as pd\n d = d.copy()\n d['total'] = d.pop('tasks')\n names = ['waiting', 'ready', 'failed', 'processing', 'in-memory', 'total']\n@@ -120,6 +120,7 @@\n 192.168.1.107 4 2000 [inc]\n 192.168.1.108 4 1000 [add, inc]\n \"\"\"\n+ import pandas as pd\n names = ['ncores', 'bytes', 'processing']\n df = pd.DataFrame({k: d[k] for k in names}, columns=names)\n df['processing'] = df['processing'].apply(sorted)\ndiff --git a/distributed/formats/csv.py b/distributed/formats/csv.py\n--- a/distributed/formats/csv.py\n+++ b/distributed/formats/csv.py\n@@ -3,8 +3,6 @@\n from io import BytesIO\n \n from dask import do\n-from dask.dataframe import from_imperative\n-import pandas as pd\n \n from .compression import compressors, decompressors\n \n@@ -27,6 +25,7 @@\n See Also:\n distributed.formats.csv.read_csv\n \"\"\"\n+ import pandas as pd\n with log_errors():\n compression = kwargs.pop('compression', None)\n b2 = decompressors[compression](b)\n@@ -67,6 +66,7 @@\n A dask.dataframe, or list of futures or values, depending on the value of\n lazy and collection.\n \"\"\"\n+ from dask.dataframe import from_imperative\n executor = default_executor(executor)\n \n dfs1 = [[do(bytes_read_csv)(blocks[0], '', kwargs)] +\n", "issue": "pandas is now a mandatory dependency for dscheduler\nIf pandas is not installed I get an infinite loop with the following logs:\n\n```\ntornado.httpclient.HTTPError: HTTP 500: Internal Server Error\nbokeh.util.tornado - ERROR - Error thrown from periodic callback: HTTPError(500, 'Internal Server Error', HTTPResponse(_body=None,buffer=<_io.BytesIO object at 0x7f17202a1e08>,code=500,effective_url='http://localhost:9786/tasks.json',error=HTTPError(...),headers=<tornado.httputil.HTTPHeaders object at 0x7f17204ffe48>,reason='Internal Server Error',request=<tornado.httpclient.HTTPRequest object at 0x7f1720578da0>,request_time=0.005105733871459961,time_info={}))\ndistributed.utils - ERROR - No module named 'pandas'\nTraceback (most recent call last):\n File \"/volatile/ogrisel/code/distributed/distributed/utils.py\", line 229, in log_errors\n yield\n File \"/volatile/ogrisel/code/distributed/distributed/http/scheduler.py\", line 79, in get\n from ..diagnostics.scheduler import tasks\n File \"/volatile/ogrisel/code/distributed/distributed/diagnostics/scheduler.py\", line 6, in <module>\n import pandas as pd\nImportError: No module named 'pandas'\n```\n\nIf think this is not intended? The diagnostics callback should probably be disabled when pandas is not available, no?\n\n", "before_files": [{"content": "from __future__ import print_function, division, absolute_import\n\nfrom io import BytesIO\n\nfrom dask import do\nfrom dask.dataframe import from_imperative\nimport pandas as pd\n\nfrom .compression import compressors, decompressors\n\nfrom ..executor import default_executor, ensure_default_get\nfrom ..utils import ensure_bytes, log_errors\n\n\ndef bytes_read_csv(b, header, kwargs):\n \"\"\" Convert a block of bytes to a Pandas DataFrame\n\n Parameters\n ----------\n b: bytestring\n The content to be parsed with pandas.read_csv\n header: bytestring\n An optional header to prepend to b\n kwargs: dict\n A dictionary of keyword arguments to be passed to pandas.read_csv\n\n See Also:\n distributed.formats.csv.read_csv\n \"\"\"\n with log_errors():\n compression = kwargs.pop('compression', None)\n b2 = decompressors[compression](b)\n bio = BytesIO()\n if header:\n if not header.endswith(b'\\n') and not header.endswith(b'\\r'):\n header = header + ensure_bytes(kwargs.get('lineterminator', b'\\n'))\n bio.write(header)\n bio.write(b2)\n bio.seek(0)\n return pd.read_csv(bio, **kwargs)\n\n\ndef read_csv(block_lists, header, head, kwargs, lazy=True, collection=True,\n executor=None):\n \"\"\" Convert blocks of bytes to a dask.dataframe or other high-level object\n\n This accepts a list of lists of futures/values of bytes where each list\n corresponds to one file, and the futures/values of bytes concatenate to\n comprise the entire file, in order.\n\n Parameters\n ----------\n block_lists: list of lists of futures of bytes\n The lists of bytestrings with each list corresponding to one logical file\n header: bytestring\n The header, found at the front of the first file, to be prepended to\n all blocks\n head: pd.DataFrame\n An example Pandas DataFrame to be used for metadata\n kwargs: dict\n Keyword arguments to pass down to ``pd.read_csv``\n lazy: boolean, optional (defaults to True)\n collection: boolean, optional (defaults to True)\n\n Returns\n -------\n A dask.dataframe, or list of futures or values, depending on the value of\n lazy and collection.\n \"\"\"\n executor = default_executor(executor)\n\n dfs1 = [[do(bytes_read_csv)(blocks[0], '', kwargs)] +\n [do(bytes_read_csv)(b, header, kwargs)\n for b in blocks[1:]]\n for blocks in block_lists]\n dfs2 = sum(dfs1, [])\n\n ensure_default_get(executor)\n\n if collection:\n result = from_imperative(dfs2, head)\n else:\n result = dfs2\n\n if not lazy:\n if collection:\n result = executor.persist(result)\n else:\n result = executor.compute(result)\n\n return result\n", "path": "distributed/formats/csv.py"}, {"content": "from __future__ import print_function, division, absolute_import\n\nfrom datetime import datetime\nimport os\n\nimport pandas as pd\nfrom toolz import countby, concat, dissoc\n\nfrom ..utils import key_split\n\n\ndef tasks(s):\n \"\"\" Task and worker status of scheduler \"\"\"\n processing = sum(map(len, s.processing.values()))\n\n return {'processing': processing,\n 'total': len(s.tasks),\n 'in-memory': len(s.who_has),\n 'ready': len(s.ready)\n + sum(map(len, s.stacks.values())),\n 'waiting': len(s.waiting),\n 'failed': len(s.exceptions_blame)}\n\n\ndef workers(s):\n \"\"\" Information about workers\n\n Examples\n --------\n >>> workers(my_scheduler) # doctest: +SKIP\n {'127.0.0.1': {'cores': 3,\n 'cpu': 0.0,\n 'last-seen': 0.003068,\n 'latency': 0.01584628690034151,\n 'ports': ['54871', '50943'],\n 'processing': {'inc': 2, 'add': 1},\n 'disk-read': 1234,\n 'disk-write': 1234,\n 'network-send': 1234,\n 'network-recv': 1234,\n 'memory': 16701911040,\n 'memory-percent': 85}}\n \"\"\"\n hosts = {host: ['%s:%s' % (host, port) for port in d['ports']]\n for host, d in s.host_info.items()}\n\n processing = {host: countby(key_split, concat(s.processing[w] for w in addrs))\n for host, addrs in hosts.items()}\n\n now = datetime.now()\n\n result = {}\n for host, info in s.host_info.items():\n info = dissoc(info, 'heartbeat', 'heartbeat-port')\n info['processing'] = processing[host]\n result[host] = info\n info['ports'] = list(info['ports'])\n if 'last-seen' in info:\n info['last-seen'] = (now - info['last-seen']).total_seconds()\n\n return result\n\n\ndef scheduler_progress_df(d):\n \"\"\" Convert status response to DataFrame of total progress\n\n Consumes dictionary from status.json route\n\n Examples\n --------\n >>> d = {\"ready\": 5, \"in-memory\": 30, \"waiting\": 20,\n ... \"tasks\": 70, \"failed\": 9,\n ... \"processing\": 6,\n ... \"other-keys-are-fine-too\": ''}\n\n >>> scheduler_progress_df(d) # doctest: +SKIP\n Count Progress\n Tasks\n waiting 20 +++++++++++\n ready 5 ++\n failed 9 +++++\n processing 6 +++\n in-memory 30 +++++++++++++++++\n total 70 ++++++++++++++++++++++++++++++++++++++++\n \"\"\"\n d = d.copy()\n d['total'] = d.pop('tasks')\n names = ['waiting', 'ready', 'failed', 'processing', 'in-memory', 'total']\n df = pd.DataFrame(pd.Series({k: d[k] for k in names},\n index=names, name='Count'))\n if d['total']:\n barlength = (40 * df.Count / d['total']).astype(int)\n df['Progress'] = barlength.apply(lambda n: ('%-40s' % (n * '+').rstrip(' ')))\n else:\n df['Progress'] = 0\n\n df.index.name = 'Tasks'\n\n return df\n\n\ndef worker_status_df(d):\n \"\"\" Status of workers as a Pandas DataFrame\n\n Consumes data from status.json route.\n\n Examples\n --------\n >>> d = {\"other-keys-are-fine-too\": '',\n ... \"ncores\": {\"192.168.1.107\": 4,\n ... \"192.168.1.108\": 4},\n ... \"processing\": {\"192.168.1.108\": {'inc': 3, 'add': 1},\n ... \"192.168.1.107\": {'inc': 2}},\n ... \"bytes\": {\"192.168.1.108\": 1000,\n ... \"192.168.1.107\": 2000}}\n\n >>> worker_status_df(d)\n Ncores Bytes Processing\n Workers\n 192.168.1.107 4 2000 [inc]\n 192.168.1.108 4 1000 [add, inc]\n \"\"\"\n names = ['ncores', 'bytes', 'processing']\n df = pd.DataFrame({k: d[k] for k in names}, columns=names)\n df['processing'] = df['processing'].apply(sorted)\n df.columns = df.columns.map(str.title)\n df.index.name = 'Workers'\n df = df.sort_index()\n return df\n", "path": "distributed/diagnostics/scheduler.py"}], "after_files": [{"content": "from __future__ import print_function, division, absolute_import\n\nfrom io import BytesIO\n\nfrom dask import do\n\nfrom .compression import compressors, decompressors\n\nfrom ..executor import default_executor, ensure_default_get\nfrom ..utils import ensure_bytes, log_errors\n\n\ndef bytes_read_csv(b, header, kwargs):\n \"\"\" Convert a block of bytes to a Pandas DataFrame\n\n Parameters\n ----------\n b: bytestring\n The content to be parsed with pandas.read_csv\n header: bytestring\n An optional header to prepend to b\n kwargs: dict\n A dictionary of keyword arguments to be passed to pandas.read_csv\n\n See Also:\n distributed.formats.csv.read_csv\n \"\"\"\n import pandas as pd\n with log_errors():\n compression = kwargs.pop('compression', None)\n b2 = decompressors[compression](b)\n bio = BytesIO()\n if header:\n if not header.endswith(b'\\n') and not header.endswith(b'\\r'):\n header = header + ensure_bytes(kwargs.get('lineterminator', b'\\n'))\n bio.write(header)\n bio.write(b2)\n bio.seek(0)\n return pd.read_csv(bio, **kwargs)\n\n\ndef read_csv(block_lists, header, head, kwargs, lazy=True, collection=True,\n executor=None):\n \"\"\" Convert blocks of bytes to a dask.dataframe or other high-level object\n\n This accepts a list of lists of futures/values of bytes where each list\n corresponds to one file, and the futures/values of bytes concatenate to\n comprise the entire file, in order.\n\n Parameters\n ----------\n block_lists: list of lists of futures of bytes\n The lists of bytestrings with each list corresponding to one logical file\n header: bytestring\n The header, found at the front of the first file, to be prepended to\n all blocks\n head: pd.DataFrame\n An example Pandas DataFrame to be used for metadata\n kwargs: dict\n Keyword arguments to pass down to ``pd.read_csv``\n lazy: boolean, optional (defaults to True)\n collection: boolean, optional (defaults to True)\n\n Returns\n -------\n A dask.dataframe, or list of futures or values, depending on the value of\n lazy and collection.\n \"\"\"\n from dask.dataframe import from_imperative\n executor = default_executor(executor)\n\n dfs1 = [[do(bytes_read_csv)(blocks[0], '', kwargs)] +\n [do(bytes_read_csv)(b, header, kwargs)\n for b in blocks[1:]]\n for blocks in block_lists]\n dfs2 = sum(dfs1, [])\n\n ensure_default_get(executor)\n\n if collection:\n result = from_imperative(dfs2, head)\n else:\n result = dfs2\n\n if not lazy:\n if collection:\n result = executor.persist(result)\n else:\n result = executor.compute(result)\n\n return result\n", "path": "distributed/formats/csv.py"}, {"content": "from __future__ import print_function, division, absolute_import\n\nfrom datetime import datetime\nimport os\n\nfrom toolz import countby, concat, dissoc\n\nfrom ..utils import key_split\n\n\ndef tasks(s):\n \"\"\" Task and worker status of scheduler \"\"\"\n processing = sum(map(len, s.processing.values()))\n\n return {'processing': processing,\n 'total': len(s.tasks),\n 'in-memory': len(s.who_has),\n 'ready': len(s.ready)\n + sum(map(len, s.stacks.values())),\n 'waiting': len(s.waiting),\n 'failed': len(s.exceptions_blame)}\n\n\ndef workers(s):\n \"\"\" Information about workers\n\n Examples\n --------\n >>> workers(my_scheduler) # doctest: +SKIP\n {'127.0.0.1': {'cores': 3,\n 'cpu': 0.0,\n 'last-seen': 0.003068,\n 'latency': 0.01584628690034151,\n 'ports': ['54871', '50943'],\n 'processing': {'inc': 2, 'add': 1},\n 'disk-read': 1234,\n 'disk-write': 1234,\n 'network-send': 1234,\n 'network-recv': 1234,\n 'memory': 16701911040,\n 'memory-percent': 85}}\n \"\"\"\n hosts = {host: ['%s:%s' % (host, port) for port in d['ports']]\n for host, d in s.host_info.items()}\n\n processing = {host: countby(key_split, concat(s.processing[w] for w in addrs))\n for host, addrs in hosts.items()}\n\n now = datetime.now()\n\n result = {}\n for host, info in s.host_info.items():\n info = dissoc(info, 'heartbeat', 'heartbeat-port')\n info['processing'] = processing[host]\n result[host] = info\n info['ports'] = list(info['ports'])\n if 'last-seen' in info:\n info['last-seen'] = (now - info['last-seen']).total_seconds()\n\n return result\n\n\ndef scheduler_progress_df(d):\n \"\"\" Convert status response to DataFrame of total progress\n\n Consumes dictionary from status.json route\n\n Examples\n --------\n >>> d = {\"ready\": 5, \"in-memory\": 30, \"waiting\": 20,\n ... \"tasks\": 70, \"failed\": 9,\n ... \"processing\": 6,\n ... \"other-keys-are-fine-too\": ''}\n\n >>> scheduler_progress_df(d) # doctest: +SKIP\n Count Progress\n Tasks\n waiting 20 +++++++++++\n ready 5 ++\n failed 9 +++++\n processing 6 +++\n in-memory 30 +++++++++++++++++\n total 70 ++++++++++++++++++++++++++++++++++++++++\n \"\"\"\n import pandas as pd\n d = d.copy()\n d['total'] = d.pop('tasks')\n names = ['waiting', 'ready', 'failed', 'processing', 'in-memory', 'total']\n df = pd.DataFrame(pd.Series({k: d[k] for k in names},\n index=names, name='Count'))\n if d['total']:\n barlength = (40 * df.Count / d['total']).astype(int)\n df['Progress'] = barlength.apply(lambda n: ('%-40s' % (n * '+').rstrip(' ')))\n else:\n df['Progress'] = 0\n\n df.index.name = 'Tasks'\n\n return df\n\n\ndef worker_status_df(d):\n \"\"\" Status of workers as a Pandas DataFrame\n\n Consumes data from status.json route.\n\n Examples\n --------\n >>> d = {\"other-keys-are-fine-too\": '',\n ... \"ncores\": {\"192.168.1.107\": 4,\n ... \"192.168.1.108\": 4},\n ... \"processing\": {\"192.168.1.108\": {'inc': 3, 'add': 1},\n ... \"192.168.1.107\": {'inc': 2}},\n ... \"bytes\": {\"192.168.1.108\": 1000,\n ... \"192.168.1.107\": 2000}}\n\n >>> worker_status_df(d)\n Ncores Bytes Processing\n Workers\n 192.168.1.107 4 2000 [inc]\n 192.168.1.108 4 1000 [add, inc]\n \"\"\"\n import pandas as pd\n names = ['ncores', 'bytes', 'processing']\n df = pd.DataFrame({k: d[k] for k in names}, columns=names)\n df['processing'] = df['processing'].apply(sorted)\n df.columns = df.columns.map(str.title)\n df.index.name = 'Workers'\n df = df.sort_index()\n return df\n", "path": "distributed/diagnostics/scheduler.py"}]} | 2,944 | 516 |
gh_patches_debug_25599 | rasdani/github-patches | git_diff | mdn__kuma-6973 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ISE when trying to submit a translation
**Summary**
_What is the problem?_
Submitting a translation in Italian for https://wiki.developer.mozilla.org/en-US/docs/Learn/Getting_started_with_the_web/Publishing_your_website$translate?tolocale=it fails and gives an ISE
**Steps To Reproduce (STR)**
1. Went to https://wiki.developer.mozilla.org/en-US/docs/Learn/Getting_started_with_the_web/Publishing_your_website$translate?tolocale=it
2. Added / Changed a bunch of words
3. Clicked "Publish"
**Actual behavior**
An ISE / error 500 happened, preventing to save the work.
**Expected behavior**
The localization is save, no error, the reader is able to read a new document
**Additional context**
* First report here: https://discourse.mozilla.org/t/50-internal-server-error-while-saving/58804
* I was not able to reproduce this on another locale (pl) for this page
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kuma/wiki/views/translate.py`
Content:
```
1 from urllib.parse import urlencode
2
3 from csp.decorators import csp_update
4 from django.conf import settings
5 from django.core.exceptions import ObjectDoesNotExist
6 from django.http import Http404, JsonResponse
7 from django.shortcuts import get_object_or_404, redirect, render
8 from django.utils.safestring import mark_safe
9 from django.utils.translation import gettext_lazy as _
10 from django.views.decorators.cache import never_cache
11
12 import kuma.wiki.content
13 from kuma.attachments.forms import AttachmentRevisionForm
14 from kuma.core.decorators import block_user_agents, ensure_wiki_domain, login_required
15 from kuma.core.i18n import get_language_mapping
16 from kuma.core.urlresolvers import reverse
17 from kuma.core.utils import get_object_or_none, smart_int, urlparams
18
19 from .utils import document_form_initial, split_slug
20 from ..decorators import check_readonly, prevent_indexing, process_document_path
21 from ..forms import DocumentForm, RevisionForm
22 from ..models import Document, Revision
23
24
25 @ensure_wiki_domain
26 @never_cache
27 @block_user_agents
28 @login_required
29 @process_document_path
30 def select_locale(request, document_slug, document_locale):
31 """
32 Select a locale to translate the document to.
33 """
34 doc = get_object_or_404(Document, locale=document_locale, slug=document_slug)
35 return render(request, "wiki/select_locale.html", {"document": doc})
36
37
38 @ensure_wiki_domain
39 @never_cache
40 @block_user_agents
41 @login_required
42 @csp_update(SCRIPT_SRC="'unsafe-eval'") # Required until CKEditor 4.7
43 @process_document_path
44 @check_readonly
45 @prevent_indexing
46 def translate(request, document_slug, document_locale):
47 """
48 Create a new translation of a wiki document.
49
50 * document_slug is for the default locale
51 * translation is to the request locale
52 """
53 # TODO: Refactor this view into two views? (new, edit)
54 # That might help reduce the headache-inducing branchiness.
55
56 # The parent document to translate from
57 try:
58 # Use '.all_objects' because the parent might have been soft deleted.
59 # And if we don't respect that fact, it would become impossible to
60 # edit a the child of it.
61 parent_doc = Document.all_objects.get(
62 locale=settings.WIKI_DEFAULT_LANGUAGE, slug=document_slug
63 )
64 except Document.DoesNotExist:
65 raise Http404("Parent document does not exist")
66
67 # Get the mapping here and now so it can be used for input validation
68 language_mapping = get_language_mapping()
69
70 # HACK: Seems weird, but sticking the translate-to locale in a query
71 # param is the best way to avoid the MindTouch-legacy locale
72 # redirection logic.
73 document_locale = request.GET.get("tolocale", document_locale)
74 if document_locale.lower() not in language_mapping:
75 # The 'tolocale' query string parameters aren't free-text. They're
76 # explicitly listed on the "Select language" page (`...$locales`)
77 # If a locale was entered that wasn't a link it's a user bug.
78 raise Http404
79
80 # Set a "Discard Changes" page
81 discard_href = ""
82
83 if settings.WIKI_DEFAULT_LANGUAGE == document_locale:
84 # Don't translate to the default language.
85 return redirect(
86 reverse(
87 "wiki.edit",
88 locale=settings.WIKI_DEFAULT_LANGUAGE,
89 args=[parent_doc.slug],
90 )
91 )
92
93 if not parent_doc.is_localizable:
94 message = _("You cannot translate this document.")
95 context = {"message": message}
96 return render(request, "handlers/400.html", context, status=400)
97
98 based_on_rev = parent_doc.current_or_latest_revision()
99
100 disclose_description = bool(request.GET.get("opendescription"))
101
102 try:
103 doc = parent_doc.translations.get(locale=document_locale)
104 slug_dict = split_slug(doc.slug)
105 except Document.DoesNotExist:
106 doc = None
107 disclose_description = True
108 slug_dict = split_slug(document_slug)
109
110 # Find the "real" parent topic, which is its translation
111 if parent_doc.parent_topic:
112 try:
113 parent_topic_translated_doc = parent_doc.parent_topic.translations.get(
114 locale=document_locale
115 )
116 slug_dict = split_slug(
117 parent_topic_translated_doc.slug + "/" + slug_dict["specific"]
118 )
119 except ObjectDoesNotExist:
120 pass
121
122 user_has_doc_perm = (not doc) or (doc and doc.allows_editing_by(request.user))
123
124 doc_form = None
125 if user_has_doc_perm:
126 if doc:
127 # If there's an existing doc, populate form from it.
128 discard_href = doc.get_absolute_url()
129 doc.slug = slug_dict["specific"]
130 doc_initial = document_form_initial(doc)
131 else:
132 # If no existing doc, bring over the original title and slug.
133 discard_href = parent_doc.get_absolute_url()
134 doc_initial = {"title": based_on_rev.title, "slug": slug_dict["specific"]}
135 doc_form = DocumentForm(initial=doc_initial, parent_slug=slug_dict["parent"])
136
137 initial = {
138 "based_on": based_on_rev.id,
139 "current_rev": doc.current_or_latest_revision().id if doc else None,
140 "comment": "",
141 "toc_depth": based_on_rev.toc_depth,
142 "localization_tags": ["inprogress"],
143 }
144 content = None
145 if not doc:
146 content = based_on_rev.content
147 if content:
148 # TODO: There will be no need to "filterEditorSafety" when the code
149 # that calls "clean_content" on Revision.save is deployed to
150 # production, AND the current revisions of all docs have had
151 # their content cleaned with "clean_content".
152 initial.update(
153 content=kuma.wiki.content.parse(content).filterEditorSafety().serialize()
154 )
155 instance = doc and doc.current_or_latest_revision()
156 rev_form = RevisionForm(
157 request=request,
158 instance=instance,
159 initial=initial,
160 parent_slug=slug_dict["parent"],
161 )
162
163 if request.method == "POST":
164 which_form = request.POST.get("form-type", "both")
165 doc_form_invalid = False
166
167 # Grab the posted slug value in case it's invalid
168 posted_slug = request.POST.get("slug", slug_dict["specific"])
169
170 if user_has_doc_perm and which_form in ["doc", "both"]:
171 disclose_description = True
172 post_data = request.POST.copy()
173
174 post_data.update({"locale": document_locale})
175
176 doc_form = DocumentForm(
177 post_data, instance=doc, parent_slug=slug_dict["parent"]
178 )
179 doc_form.instance.locale = document_locale
180 doc_form.instance.parent = parent_doc
181
182 if which_form == "both":
183 # Sending a new copy of post so the slug change above
184 # doesn't cause problems during validation
185 rev_form = RevisionForm(
186 request=request, data=post_data, parent_slug=slug_dict["parent"]
187 )
188
189 # If we are submitting the whole form, we need to check that
190 # the Revision is valid before saving the Document.
191 if doc_form.is_valid() and (which_form == "doc" or rev_form.is_valid()):
192
193 # If the document you're about to save already exists, as a
194 # soft-delete, then really delete it first.
195 for soft_deleted_document in Document.deleted_objects.filter(
196 locale=doc_form.cleaned_data["locale"],
197 slug=doc_form.cleaned_data["slug"],
198 ):
199 soft_deleted_document.delete(purge=True)
200
201 doc = doc_form.save(parent=parent_doc)
202
203 if which_form == "doc":
204 url = urlparams(doc.get_edit_url(), opendescription=1)
205 return redirect(url)
206 else:
207 doc_form.data["slug"] = posted_slug
208 doc_form_invalid = True
209
210 if doc and which_form in ["rev", "both"]:
211 post_data = request.POST.copy()
212 if "slug" not in post_data:
213 post_data["slug"] = posted_slug
214
215 # update the post data with the toc_depth of original
216 post_data["toc_depth"] = based_on_rev.toc_depth
217
218 # Pass in the locale for the akistmet "blog_lang".
219 post_data["locale"] = document_locale
220
221 rev_form = RevisionForm(
222 request=request, data=post_data, parent_slug=slug_dict["parent"]
223 )
224 rev_form.instance.document = doc # for rev_form.clean()
225
226 if rev_form.is_valid() and not doc_form_invalid:
227 parent_id = request.POST.get("parent_id", "")
228
229 # Attempt to set a parent
230 if parent_id:
231 try:
232 try:
233 parent_doc = Document.all_objects.get(id=parent_id)
234 except Document.DoesNotExist:
235 raise Http404("Parent document does not exist")
236 rev_form.instance.document.parent = parent_doc
237 doc.parent = parent_doc
238 rev_form.instance.based_on.document = doc.original
239 except Document.DoesNotExist:
240 pass
241
242 rev_form.save(doc)
243 # If this is an Ajax POST, then return a JsonResponse
244 if request.is_ajax():
245 data = {
246 "error": False,
247 "new_revision_id": rev_form.instance.id,
248 }
249
250 return JsonResponse(data)
251
252 # Construct the redirect URL, adding any needed parameters
253 url = doc.get_absolute_url()
254 params = {}
255 # Parameter for the document saved, so that we can delete the cached draft on load
256 params["rev_saved"] = request.POST.get("current_rev", "")
257 url = "%s?%s" % (url, urlencode(params))
258 return redirect(url)
259 else:
260 # If this is an Ajax POST, then return a JsonResponse with error
261 if request.is_ajax():
262 if "current_rev" in rev_form._errors:
263 # Make the error message safe so the '<' and '>' don't
264 # get turned into '<' and '>', respectively
265 rev_form.errors["current_rev"][0] = mark_safe(
266 rev_form.errors["current_rev"][0]
267 )
268 errors = [rev_form.errors[key][0] for key in rev_form.errors.keys()]
269 data = {
270 "error": True,
271 "error_message": errors,
272 "new_revision_id": rev_form.instance.id,
273 }
274 return JsonResponse(data=data)
275
276 if doc:
277 from_id = smart_int(request.GET.get("from"), None)
278 to_id = smart_int(request.GET.get("to"), None)
279
280 revision_from = get_object_or_none(Revision, pk=from_id, document=doc.parent)
281 revision_to = get_object_or_none(Revision, pk=to_id, document=doc.parent)
282 else:
283 revision_from = revision_to = None
284
285 parent_split = split_slug(parent_doc.slug)
286
287 language = language_mapping[document_locale.lower()]
288 default_locale = language_mapping[settings.WIKI_DEFAULT_LANGUAGE.lower()]
289
290 context = {
291 "parent": parent_doc,
292 "document": doc,
293 "document_form": doc_form,
294 "revision_form": rev_form,
295 "locale": document_locale,
296 "default_locale": default_locale,
297 "language": language,
298 "based_on": based_on_rev,
299 "disclose_description": disclose_description,
300 "discard_href": discard_href,
301 "attachment_form": AttachmentRevisionForm(),
302 "specific_slug": parent_split["specific"],
303 "parent_slug": parent_split["parent"],
304 "revision_from": revision_from,
305 "revision_to": revision_to,
306 }
307 return render(request, "wiki/translate.html", context)
308
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kuma/wiki/views/translate.py b/kuma/wiki/views/translate.py
--- a/kuma/wiki/views/translate.py
+++ b/kuma/wiki/views/translate.py
@@ -3,6 +3,7 @@
from csp.decorators import csp_update
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
+from django.db.models import Q
from django.http import Http404, JsonResponse
from django.shortcuts import get_object_or_404, redirect, render
from django.utils.safestring import mark_safe
@@ -192,10 +193,10 @@
# If the document you're about to save already exists, as a
# soft-delete, then really delete it first.
- for soft_deleted_document in Document.deleted_objects.filter(
- locale=doc_form.cleaned_data["locale"],
- slug=doc_form.cleaned_data["slug"],
- ):
+ previously_deleted_documents = Document.deleted_objects.filter(
+ locale=doc_form.cleaned_data["locale"]
+ ).filter(Q(slug=doc_form.cleaned_data["slug"]) | Q(parent=parent_doc))
+ for soft_deleted_document in previously_deleted_documents:
soft_deleted_document.delete(purge=True)
doc = doc_form.save(parent=parent_doc)
| {"golden_diff": "diff --git a/kuma/wiki/views/translate.py b/kuma/wiki/views/translate.py\n--- a/kuma/wiki/views/translate.py\n+++ b/kuma/wiki/views/translate.py\n@@ -3,6 +3,7 @@\n from csp.decorators import csp_update\n from django.conf import settings\n from django.core.exceptions import ObjectDoesNotExist\n+from django.db.models import Q\n from django.http import Http404, JsonResponse\n from django.shortcuts import get_object_or_404, redirect, render\n from django.utils.safestring import mark_safe\n@@ -192,10 +193,10 @@\n \n # If the document you're about to save already exists, as a\n # soft-delete, then really delete it first.\n- for soft_deleted_document in Document.deleted_objects.filter(\n- locale=doc_form.cleaned_data[\"locale\"],\n- slug=doc_form.cleaned_data[\"slug\"],\n- ):\n+ previously_deleted_documents = Document.deleted_objects.filter(\n+ locale=doc_form.cleaned_data[\"locale\"]\n+ ).filter(Q(slug=doc_form.cleaned_data[\"slug\"]) | Q(parent=parent_doc))\n+ for soft_deleted_document in previously_deleted_documents:\n soft_deleted_document.delete(purge=True)\n \n doc = doc_form.save(parent=parent_doc)\n", "issue": "ISE when trying to submit a translation\n**Summary**\r\n_What is the problem?_\r\nSubmitting a translation in Italian for https://wiki.developer.mozilla.org/en-US/docs/Learn/Getting_started_with_the_web/Publishing_your_website$translate?tolocale=it fails and gives an ISE\r\n\r\n**Steps To Reproduce (STR)**\r\n1. Went to https://wiki.developer.mozilla.org/en-US/docs/Learn/Getting_started_with_the_web/Publishing_your_website$translate?tolocale=it\r\n2. Added / Changed a bunch of words\r\n3. Clicked \"Publish\"\r\n\r\n\r\n**Actual behavior**\r\nAn ISE / error 500 happened, preventing to save the work.\r\n\r\n\r\n**Expected behavior**\r\nThe localization is save, no error, the reader is able to read a new document\r\n\r\n\r\n**Additional context**\r\n\r\n* First report here: https://discourse.mozilla.org/t/50-internal-server-error-while-saving/58804\r\n* I was not able to reproduce this on another locale (pl) for this page\r\n\n", "before_files": [{"content": "from urllib.parse import urlencode\n\nfrom csp.decorators import csp_update\nfrom django.conf import settings\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.http import Http404, JsonResponse\nfrom django.shortcuts import get_object_or_404, redirect, render\nfrom django.utils.safestring import mark_safe\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views.decorators.cache import never_cache\n\nimport kuma.wiki.content\nfrom kuma.attachments.forms import AttachmentRevisionForm\nfrom kuma.core.decorators import block_user_agents, ensure_wiki_domain, login_required\nfrom kuma.core.i18n import get_language_mapping\nfrom kuma.core.urlresolvers import reverse\nfrom kuma.core.utils import get_object_or_none, smart_int, urlparams\n\nfrom .utils import document_form_initial, split_slug\nfrom ..decorators import check_readonly, prevent_indexing, process_document_path\nfrom ..forms import DocumentForm, RevisionForm\nfrom ..models import Document, Revision\n\n\n@ensure_wiki_domain\n@never_cache\n@block_user_agents\n@login_required\n@process_document_path\ndef select_locale(request, document_slug, document_locale):\n \"\"\"\n Select a locale to translate the document to.\n \"\"\"\n doc = get_object_or_404(Document, locale=document_locale, slug=document_slug)\n return render(request, \"wiki/select_locale.html\", {\"document\": doc})\n\n\n@ensure_wiki_domain\n@never_cache\n@block_user_agents\n@login_required\n@csp_update(SCRIPT_SRC=\"'unsafe-eval'\") # Required until CKEditor 4.7\n@process_document_path\n@check_readonly\n@prevent_indexing\ndef translate(request, document_slug, document_locale):\n \"\"\"\n Create a new translation of a wiki document.\n\n * document_slug is for the default locale\n * translation is to the request locale\n \"\"\"\n # TODO: Refactor this view into two views? (new, edit)\n # That might help reduce the headache-inducing branchiness.\n\n # The parent document to translate from\n try:\n # Use '.all_objects' because the parent might have been soft deleted.\n # And if we don't respect that fact, it would become impossible to\n # edit a the child of it.\n parent_doc = Document.all_objects.get(\n locale=settings.WIKI_DEFAULT_LANGUAGE, slug=document_slug\n )\n except Document.DoesNotExist:\n raise Http404(\"Parent document does not exist\")\n\n # Get the mapping here and now so it can be used for input validation\n language_mapping = get_language_mapping()\n\n # HACK: Seems weird, but sticking the translate-to locale in a query\n # param is the best way to avoid the MindTouch-legacy locale\n # redirection logic.\n document_locale = request.GET.get(\"tolocale\", document_locale)\n if document_locale.lower() not in language_mapping:\n # The 'tolocale' query string parameters aren't free-text. They're\n # explicitly listed on the \"Select language\" page (`...$locales`)\n # If a locale was entered that wasn't a link it's a user bug.\n raise Http404\n\n # Set a \"Discard Changes\" page\n discard_href = \"\"\n\n if settings.WIKI_DEFAULT_LANGUAGE == document_locale:\n # Don't translate to the default language.\n return redirect(\n reverse(\n \"wiki.edit\",\n locale=settings.WIKI_DEFAULT_LANGUAGE,\n args=[parent_doc.slug],\n )\n )\n\n if not parent_doc.is_localizable:\n message = _(\"You cannot translate this document.\")\n context = {\"message\": message}\n return render(request, \"handlers/400.html\", context, status=400)\n\n based_on_rev = parent_doc.current_or_latest_revision()\n\n disclose_description = bool(request.GET.get(\"opendescription\"))\n\n try:\n doc = parent_doc.translations.get(locale=document_locale)\n slug_dict = split_slug(doc.slug)\n except Document.DoesNotExist:\n doc = None\n disclose_description = True\n slug_dict = split_slug(document_slug)\n\n # Find the \"real\" parent topic, which is its translation\n if parent_doc.parent_topic:\n try:\n parent_topic_translated_doc = parent_doc.parent_topic.translations.get(\n locale=document_locale\n )\n slug_dict = split_slug(\n parent_topic_translated_doc.slug + \"/\" + slug_dict[\"specific\"]\n )\n except ObjectDoesNotExist:\n pass\n\n user_has_doc_perm = (not doc) or (doc and doc.allows_editing_by(request.user))\n\n doc_form = None\n if user_has_doc_perm:\n if doc:\n # If there's an existing doc, populate form from it.\n discard_href = doc.get_absolute_url()\n doc.slug = slug_dict[\"specific\"]\n doc_initial = document_form_initial(doc)\n else:\n # If no existing doc, bring over the original title and slug.\n discard_href = parent_doc.get_absolute_url()\n doc_initial = {\"title\": based_on_rev.title, \"slug\": slug_dict[\"specific\"]}\n doc_form = DocumentForm(initial=doc_initial, parent_slug=slug_dict[\"parent\"])\n\n initial = {\n \"based_on\": based_on_rev.id,\n \"current_rev\": doc.current_or_latest_revision().id if doc else None,\n \"comment\": \"\",\n \"toc_depth\": based_on_rev.toc_depth,\n \"localization_tags\": [\"inprogress\"],\n }\n content = None\n if not doc:\n content = based_on_rev.content\n if content:\n # TODO: There will be no need to \"filterEditorSafety\" when the code\n # that calls \"clean_content\" on Revision.save is deployed to\n # production, AND the current revisions of all docs have had\n # their content cleaned with \"clean_content\".\n initial.update(\n content=kuma.wiki.content.parse(content).filterEditorSafety().serialize()\n )\n instance = doc and doc.current_or_latest_revision()\n rev_form = RevisionForm(\n request=request,\n instance=instance,\n initial=initial,\n parent_slug=slug_dict[\"parent\"],\n )\n\n if request.method == \"POST\":\n which_form = request.POST.get(\"form-type\", \"both\")\n doc_form_invalid = False\n\n # Grab the posted slug value in case it's invalid\n posted_slug = request.POST.get(\"slug\", slug_dict[\"specific\"])\n\n if user_has_doc_perm and which_form in [\"doc\", \"both\"]:\n disclose_description = True\n post_data = request.POST.copy()\n\n post_data.update({\"locale\": document_locale})\n\n doc_form = DocumentForm(\n post_data, instance=doc, parent_slug=slug_dict[\"parent\"]\n )\n doc_form.instance.locale = document_locale\n doc_form.instance.parent = parent_doc\n\n if which_form == \"both\":\n # Sending a new copy of post so the slug change above\n # doesn't cause problems during validation\n rev_form = RevisionForm(\n request=request, data=post_data, parent_slug=slug_dict[\"parent\"]\n )\n\n # If we are submitting the whole form, we need to check that\n # the Revision is valid before saving the Document.\n if doc_form.is_valid() and (which_form == \"doc\" or rev_form.is_valid()):\n\n # If the document you're about to save already exists, as a\n # soft-delete, then really delete it first.\n for soft_deleted_document in Document.deleted_objects.filter(\n locale=doc_form.cleaned_data[\"locale\"],\n slug=doc_form.cleaned_data[\"slug\"],\n ):\n soft_deleted_document.delete(purge=True)\n\n doc = doc_form.save(parent=parent_doc)\n\n if which_form == \"doc\":\n url = urlparams(doc.get_edit_url(), opendescription=1)\n return redirect(url)\n else:\n doc_form.data[\"slug\"] = posted_slug\n doc_form_invalid = True\n\n if doc and which_form in [\"rev\", \"both\"]:\n post_data = request.POST.copy()\n if \"slug\" not in post_data:\n post_data[\"slug\"] = posted_slug\n\n # update the post data with the toc_depth of original\n post_data[\"toc_depth\"] = based_on_rev.toc_depth\n\n # Pass in the locale for the akistmet \"blog_lang\".\n post_data[\"locale\"] = document_locale\n\n rev_form = RevisionForm(\n request=request, data=post_data, parent_slug=slug_dict[\"parent\"]\n )\n rev_form.instance.document = doc # for rev_form.clean()\n\n if rev_form.is_valid() and not doc_form_invalid:\n parent_id = request.POST.get(\"parent_id\", \"\")\n\n # Attempt to set a parent\n if parent_id:\n try:\n try:\n parent_doc = Document.all_objects.get(id=parent_id)\n except Document.DoesNotExist:\n raise Http404(\"Parent document does not exist\")\n rev_form.instance.document.parent = parent_doc\n doc.parent = parent_doc\n rev_form.instance.based_on.document = doc.original\n except Document.DoesNotExist:\n pass\n\n rev_form.save(doc)\n # If this is an Ajax POST, then return a JsonResponse\n if request.is_ajax():\n data = {\n \"error\": False,\n \"new_revision_id\": rev_form.instance.id,\n }\n\n return JsonResponse(data)\n\n # Construct the redirect URL, adding any needed parameters\n url = doc.get_absolute_url()\n params = {}\n # Parameter for the document saved, so that we can delete the cached draft on load\n params[\"rev_saved\"] = request.POST.get(\"current_rev\", \"\")\n url = \"%s?%s\" % (url, urlencode(params))\n return redirect(url)\n else:\n # If this is an Ajax POST, then return a JsonResponse with error\n if request.is_ajax():\n if \"current_rev\" in rev_form._errors:\n # Make the error message safe so the '<' and '>' don't\n # get turned into '<' and '>', respectively\n rev_form.errors[\"current_rev\"][0] = mark_safe(\n rev_form.errors[\"current_rev\"][0]\n )\n errors = [rev_form.errors[key][0] for key in rev_form.errors.keys()]\n data = {\n \"error\": True,\n \"error_message\": errors,\n \"new_revision_id\": rev_form.instance.id,\n }\n return JsonResponse(data=data)\n\n if doc:\n from_id = smart_int(request.GET.get(\"from\"), None)\n to_id = smart_int(request.GET.get(\"to\"), None)\n\n revision_from = get_object_or_none(Revision, pk=from_id, document=doc.parent)\n revision_to = get_object_or_none(Revision, pk=to_id, document=doc.parent)\n else:\n revision_from = revision_to = None\n\n parent_split = split_slug(parent_doc.slug)\n\n language = language_mapping[document_locale.lower()]\n default_locale = language_mapping[settings.WIKI_DEFAULT_LANGUAGE.lower()]\n\n context = {\n \"parent\": parent_doc,\n \"document\": doc,\n \"document_form\": doc_form,\n \"revision_form\": rev_form,\n \"locale\": document_locale,\n \"default_locale\": default_locale,\n \"language\": language,\n \"based_on\": based_on_rev,\n \"disclose_description\": disclose_description,\n \"discard_href\": discard_href,\n \"attachment_form\": AttachmentRevisionForm(),\n \"specific_slug\": parent_split[\"specific\"],\n \"parent_slug\": parent_split[\"parent\"],\n \"revision_from\": revision_from,\n \"revision_to\": revision_to,\n }\n return render(request, \"wiki/translate.html\", context)\n", "path": "kuma/wiki/views/translate.py"}], "after_files": [{"content": "from urllib.parse import urlencode\n\nfrom csp.decorators import csp_update\nfrom django.conf import settings\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.db.models import Q\nfrom django.http import Http404, JsonResponse\nfrom django.shortcuts import get_object_or_404, redirect, render\nfrom django.utils.safestring import mark_safe\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views.decorators.cache import never_cache\n\nimport kuma.wiki.content\nfrom kuma.attachments.forms import AttachmentRevisionForm\nfrom kuma.core.decorators import block_user_agents, ensure_wiki_domain, login_required\nfrom kuma.core.i18n import get_language_mapping\nfrom kuma.core.urlresolvers import reverse\nfrom kuma.core.utils import get_object_or_none, smart_int, urlparams\n\nfrom .utils import document_form_initial, split_slug\nfrom ..decorators import check_readonly, prevent_indexing, process_document_path\nfrom ..forms import DocumentForm, RevisionForm\nfrom ..models import Document, Revision\n\n\n@ensure_wiki_domain\n@never_cache\n@block_user_agents\n@login_required\n@process_document_path\ndef select_locale(request, document_slug, document_locale):\n \"\"\"\n Select a locale to translate the document to.\n \"\"\"\n doc = get_object_or_404(Document, locale=document_locale, slug=document_slug)\n return render(request, \"wiki/select_locale.html\", {\"document\": doc})\n\n\n@ensure_wiki_domain\n@never_cache\n@block_user_agents\n@login_required\n@csp_update(SCRIPT_SRC=\"'unsafe-eval'\") # Required until CKEditor 4.7\n@process_document_path\n@check_readonly\n@prevent_indexing\ndef translate(request, document_slug, document_locale):\n \"\"\"\n Create a new translation of a wiki document.\n\n * document_slug is for the default locale\n * translation is to the request locale\n \"\"\"\n # TODO: Refactor this view into two views? (new, edit)\n # That might help reduce the headache-inducing branchiness.\n\n # The parent document to translate from\n try:\n # Use '.all_objects' because the parent might have been soft deleted.\n # And if we don't respect that fact, it would become impossible to\n # edit a the child of it.\n parent_doc = Document.all_objects.get(\n locale=settings.WIKI_DEFAULT_LANGUAGE, slug=document_slug\n )\n except Document.DoesNotExist:\n raise Http404(\"Parent document does not exist\")\n\n # Get the mapping here and now so it can be used for input validation\n language_mapping = get_language_mapping()\n\n # HACK: Seems weird, but sticking the translate-to locale in a query\n # param is the best way to avoid the MindTouch-legacy locale\n # redirection logic.\n document_locale = request.GET.get(\"tolocale\", document_locale)\n if document_locale.lower() not in language_mapping:\n # The 'tolocale' query string parameters aren't free-text. They're\n # explicitly listed on the \"Select language\" page (`...$locales`)\n # If a locale was entered that wasn't a link it's a user bug.\n raise Http404\n\n # Set a \"Discard Changes\" page\n discard_href = \"\"\n\n if settings.WIKI_DEFAULT_LANGUAGE == document_locale:\n # Don't translate to the default language.\n return redirect(\n reverse(\n \"wiki.edit\",\n locale=settings.WIKI_DEFAULT_LANGUAGE,\n args=[parent_doc.slug],\n )\n )\n\n if not parent_doc.is_localizable:\n message = _(\"You cannot translate this document.\")\n context = {\"message\": message}\n return render(request, \"handlers/400.html\", context, status=400)\n\n based_on_rev = parent_doc.current_or_latest_revision()\n\n disclose_description = bool(request.GET.get(\"opendescription\"))\n\n try:\n doc = parent_doc.translations.get(locale=document_locale)\n slug_dict = split_slug(doc.slug)\n except Document.DoesNotExist:\n doc = None\n disclose_description = True\n slug_dict = split_slug(document_slug)\n\n # Find the \"real\" parent topic, which is its translation\n if parent_doc.parent_topic:\n try:\n parent_topic_translated_doc = parent_doc.parent_topic.translations.get(\n locale=document_locale\n )\n slug_dict = split_slug(\n parent_topic_translated_doc.slug + \"/\" + slug_dict[\"specific\"]\n )\n except ObjectDoesNotExist:\n pass\n\n user_has_doc_perm = (not doc) or (doc and doc.allows_editing_by(request.user))\n\n doc_form = None\n if user_has_doc_perm:\n if doc:\n # If there's an existing doc, populate form from it.\n discard_href = doc.get_absolute_url()\n doc.slug = slug_dict[\"specific\"]\n doc_initial = document_form_initial(doc)\n else:\n # If no existing doc, bring over the original title and slug.\n discard_href = parent_doc.get_absolute_url()\n doc_initial = {\"title\": based_on_rev.title, \"slug\": slug_dict[\"specific\"]}\n doc_form = DocumentForm(initial=doc_initial, parent_slug=slug_dict[\"parent\"])\n\n initial = {\n \"based_on\": based_on_rev.id,\n \"current_rev\": doc.current_or_latest_revision().id if doc else None,\n \"comment\": \"\",\n \"toc_depth\": based_on_rev.toc_depth,\n \"localization_tags\": [\"inprogress\"],\n }\n content = None\n if not doc:\n content = based_on_rev.content\n if content:\n # TODO: There will be no need to \"filterEditorSafety\" when the code\n # that calls \"clean_content\" on Revision.save is deployed to\n # production, AND the current revisions of all docs have had\n # their content cleaned with \"clean_content\".\n initial.update(\n content=kuma.wiki.content.parse(content).filterEditorSafety().serialize()\n )\n instance = doc and doc.current_or_latest_revision()\n rev_form = RevisionForm(\n request=request,\n instance=instance,\n initial=initial,\n parent_slug=slug_dict[\"parent\"],\n )\n\n if request.method == \"POST\":\n which_form = request.POST.get(\"form-type\", \"both\")\n doc_form_invalid = False\n\n # Grab the posted slug value in case it's invalid\n posted_slug = request.POST.get(\"slug\", slug_dict[\"specific\"])\n\n if user_has_doc_perm and which_form in [\"doc\", \"both\"]:\n disclose_description = True\n post_data = request.POST.copy()\n\n post_data.update({\"locale\": document_locale})\n\n doc_form = DocumentForm(\n post_data, instance=doc, parent_slug=slug_dict[\"parent\"]\n )\n doc_form.instance.locale = document_locale\n doc_form.instance.parent = parent_doc\n\n if which_form == \"both\":\n # Sending a new copy of post so the slug change above\n # doesn't cause problems during validation\n rev_form = RevisionForm(\n request=request, data=post_data, parent_slug=slug_dict[\"parent\"]\n )\n\n # If we are submitting the whole form, we need to check that\n # the Revision is valid before saving the Document.\n if doc_form.is_valid() and (which_form == \"doc\" or rev_form.is_valid()):\n\n # If the document you're about to save already exists, as a\n # soft-delete, then really delete it first.\n previously_deleted_documents = Document.deleted_objects.filter(\n locale=doc_form.cleaned_data[\"locale\"]\n ).filter(Q(slug=doc_form.cleaned_data[\"slug\"]) | Q(parent=parent_doc))\n for soft_deleted_document in previously_deleted_documents:\n soft_deleted_document.delete(purge=True)\n\n doc = doc_form.save(parent=parent_doc)\n\n if which_form == \"doc\":\n url = urlparams(doc.get_edit_url(), opendescription=1)\n return redirect(url)\n else:\n doc_form.data[\"slug\"] = posted_slug\n doc_form_invalid = True\n\n if doc and which_form in [\"rev\", \"both\"]:\n post_data = request.POST.copy()\n if \"slug\" not in post_data:\n post_data[\"slug\"] = posted_slug\n\n # update the post data with the toc_depth of original\n post_data[\"toc_depth\"] = based_on_rev.toc_depth\n\n # Pass in the locale for the akistmet \"blog_lang\".\n post_data[\"locale\"] = document_locale\n\n rev_form = RevisionForm(\n request=request, data=post_data, parent_slug=slug_dict[\"parent\"]\n )\n rev_form.instance.document = doc # for rev_form.clean()\n\n if rev_form.is_valid() and not doc_form_invalid:\n parent_id = request.POST.get(\"parent_id\", \"\")\n\n # Attempt to set a parent\n if parent_id:\n try:\n try:\n parent_doc = Document.all_objects.get(id=parent_id)\n except Document.DoesNotExist:\n raise Http404(\"Parent document does not exist\")\n rev_form.instance.document.parent = parent_doc\n doc.parent = parent_doc\n rev_form.instance.based_on.document = doc.original\n except Document.DoesNotExist:\n pass\n\n rev_form.save(doc)\n # If this is an Ajax POST, then return a JsonResponse\n if request.is_ajax():\n data = {\n \"error\": False,\n \"new_revision_id\": rev_form.instance.id,\n }\n\n return JsonResponse(data)\n\n # Construct the redirect URL, adding any needed parameters\n url = doc.get_absolute_url()\n params = {}\n # Parameter for the document saved, so that we can delete the cached draft on load\n params[\"rev_saved\"] = request.POST.get(\"current_rev\", \"\")\n url = \"%s?%s\" % (url, urlencode(params))\n return redirect(url)\n else:\n # If this is an Ajax POST, then return a JsonResponse with error\n if request.is_ajax():\n if \"current_rev\" in rev_form._errors:\n # Make the error message safe so the '<' and '>' don't\n # get turned into '<' and '>', respectively\n rev_form.errors[\"current_rev\"][0] = mark_safe(\n rev_form.errors[\"current_rev\"][0]\n )\n errors = [rev_form.errors[key][0] for key in rev_form.errors.keys()]\n data = {\n \"error\": True,\n \"error_message\": errors,\n \"new_revision_id\": rev_form.instance.id,\n }\n return JsonResponse(data=data)\n\n if doc:\n from_id = smart_int(request.GET.get(\"from\"), None)\n to_id = smart_int(request.GET.get(\"to\"), None)\n\n revision_from = get_object_or_none(Revision, pk=from_id, document=doc.parent)\n revision_to = get_object_or_none(Revision, pk=to_id, document=doc.parent)\n else:\n revision_from = revision_to = None\n\n parent_split = split_slug(parent_doc.slug)\n\n language = language_mapping[document_locale.lower()]\n default_locale = language_mapping[settings.WIKI_DEFAULT_LANGUAGE.lower()]\n\n context = {\n \"parent\": parent_doc,\n \"document\": doc,\n \"document_form\": doc_form,\n \"revision_form\": rev_form,\n \"locale\": document_locale,\n \"default_locale\": default_locale,\n \"language\": language,\n \"based_on\": based_on_rev,\n \"disclose_description\": disclose_description,\n \"discard_href\": discard_href,\n \"attachment_form\": AttachmentRevisionForm(),\n \"specific_slug\": parent_split[\"specific\"],\n \"parent_slug\": parent_split[\"parent\"],\n \"revision_from\": revision_from,\n \"revision_to\": revision_to,\n }\n return render(request, \"wiki/translate.html\", context)\n", "path": "kuma/wiki/views/translate.py"}]} | 3,797 | 275 |
gh_patches_debug_20722 | rasdani/github-patches | git_diff | netket__netket-470 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Getting acceptance of numpy metropolis_hastings
The method defining the acceptance for `numpy/metropolis_hastings` in v3.0 is
```python
@property
def acceptance(self):
"""The measured acceptance probability."""
return _mean(self._accepted_samples) / _mean(self._total_samples)
```
But `self._accepted_samples` and `self._total_samples` are ints. Don't know if this has been fixed in a PR, but if it's not, maybe it should be included.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `netket/stats/_sum_inplace.py`
Content:
```
1 from functools import singledispatch
2 import numpy as _np
3
4 from netket.utils import mpi_available as _mpi_available, n_nodes as _n_nodes
5
6 if _mpi_available:
7 from netket.utils import MPI_comm as _MPI_comm
8 from netket.utils import MPI as _MPI
9
10
11 @singledispatch
12 def sum_inplace(x):
13 """
14 Computes the elementwie sum of an array or a scalar across all MPI processes.
15 Attempts to perform this sum inplace if possible, but for some types a copy
16 might be returned.
17
18 Args:
19 a: The input array, which will usually be overwritten in place.
20 Returns:
21 out: The reduced array.
22 """
23 raise TypeError("Unknown type to perform dispatch upon: {}".format(type(x)))
24
25
26 #######
27 # Scalar
28 @sum_inplace.register(complex)
29 @sum_inplace.register(_np.float64)
30 @sum_inplace.register(_np.float32)
31 @sum_inplace.register(_np.complex64)
32 @sum_inplace.register(_np.complex128)
33 @sum_inplace.register(float)
34 def sum_inplace_scalar(a):
35 ar = _np.asarray(a)
36
37 if _n_nodes > 1:
38 _MPI_comm.Allreduce(_MPI.IN_PLACE, ar.reshape(-1), op=_MPI.SUM)
39
40 return ar
41
42
43 ##############
44 # Numpy Array
45 #
46 @sum_inplace.register(_np.ndarray)
47 def sum_inplace_MPI(a):
48 """
49 Computes the elementwise sum of a numpy array over all MPI processes.
50
51 Args:
52 a (numpy.ndarray): The input array, which will be overwritten in place.
53 """
54 if _n_nodes > 1:
55 _MPI_comm.Allreduce(_MPI.IN_PLACE, a.reshape(-1), op=_MPI.SUM)
56
57 return a
58
59
60 ##############
61 # Jax
62 #
63 from netket.utils import jax_available
64
65 if jax_available:
66 import numpy as _np
67 import jax
68
69 @sum_inplace.register(jax.interpreters.xla.DeviceArray)
70 def sum_inplace_jax(x):
71 if not isinstance(x, jax.interpreters.xla.DeviceArray):
72 raise TypeError(
73 "Argument to sum_inplace_jax must be a DeviceArray, got {}".format(
74 type(x)
75 )
76 )
77
78 if _n_nodes == 1:
79 return x
80
81 # This below only works on cpus...
82 # we should make this work for gpus too..
83 # TODO: unsafe_buffer_pointer is considered not yet definitive interface
84 ptr = x.block_until_ready().device_buffer.unsafe_buffer_pointer()
85
86 # The above is faster.
87 # This below should work more often, but might copy.
88 # Depending on future changes in jaxlib, we might have to switch to
89 # this below.
90 # see Google/jax #2123 and #1009
91 # _x = jax.xla._force(x.block_until_ready())
92 # ptr = _x.device_buffer.unsafe_buffer_pointer()
93
94 # using native numpy because jax's numpy does not have ctypeslib
95 data_pointer = _np.ctypeslib.ndpointer(x.dtype, shape=x.shape)
96
97 # wrap jax data into a standard numpy array which is handled by MPI
98 arr = data_pointer(ptr).contents
99 _MPI_comm.Allreduce(_MPI.IN_PLACE, arr.reshape(-1), op=_MPI.SUM)
100
101 return x
102
103 @sum_inplace.register(jax.interpreters.partial_eval.JaxprTracer)
104 @sum_inplace.register(jax.interpreters.ad.JVPTracer)
105 def sum_inplace_jax_jittracer(x):
106 if _n_nodes == 1:
107 return x
108 else:
109 raise RuntimError(
110 "Cannot jit through sum_inplace when running with multiple MPI processes."
111 )
112
```
Path: `netket/sampler/numpy/metropolis_hastings.py`
Content:
```
1 from ..abstract_sampler import AbstractSampler
2 from ...stats import mean as _mean
3 from netket import random as _random
4
5 import math
6 import numpy as _np
7 from numba import jit, int64, float64
8 from ..._jitclass import jitclass
9
10
11 class MetropolisHastings(AbstractSampler):
12 def __init__(self, machine, kernel, n_chains=16, sweep_size=None):
13
14 super().__init__(machine, n_chains)
15
16 self.n_chains = n_chains
17
18 self.sweep_size = sweep_size
19
20 self._kernel = kernel
21
22 self.machine_pow = 2.0
23 self.reset(True)
24
25 @property
26 def n_chains(self):
27 return self._n_chains
28
29 @n_chains.setter
30 def n_chains(self, n_chains):
31 if n_chains < 0:
32 raise ValueError("Expected a positive integer for n_chains ")
33
34 self._n_chains = n_chains
35
36 self._state = _np.zeros((n_chains, self._input_size))
37 self._state1 = _np.copy(self._state)
38
39 self._log_values = _np.zeros(n_chains, dtype=_np.complex128)
40 self._log_values_1 = _np.zeros(n_chains, dtype=_np.complex128)
41 self._log_prob_corr = _np.zeros(n_chains)
42
43 @property
44 def machine_pow(self):
45 return self._machine_pow
46
47 @machine_pow.setter
48 def machine_pow(self, m_power):
49 if not _np.isscalar(m_power):
50 raise ValueError("machine_pow should be a scalar.")
51 self._machine_pow = m_power
52
53 @property
54 def sweep_size(self):
55 return self._sweep_size
56
57 @sweep_size.setter
58 def sweep_size(self, sweep_size):
59 self._sweep_size = sweep_size if sweep_size != None else self._input_size
60 if self._sweep_size < 0:
61 raise ValueError("Expected a positive integer for sweep_size ")
62
63 def reset(self, init_random=False):
64 if init_random:
65 self._kernel.random_state(self._state)
66 self._log_values = self.machine.log_val(self._state, out=self._log_values)
67
68 self._accepted_samples = 0
69 self._total_samples = 0
70
71 @staticmethod
72 @jit(nopython=True)
73 def acceptance_kernel(
74 state, state1, log_values, log_values_1, log_prob_corr, machine_pow
75 ):
76 accepted = 0
77
78 for i in range(state.shape[0]):
79 prob = _np.exp(
80 machine_pow * (log_values_1[i] - log_values[i] + log_prob_corr[i]).real
81 )
82 assert not math.isnan(prob)
83
84 if prob > _random.uniform(0, 1):
85 log_values[i] = log_values_1[i]
86 state[i] = state1[i]
87 accepted += 1
88
89 return accepted
90
91 def __next__(self):
92
93 _log_val = self.machine.log_val
94 _acc_kernel = self.acceptance_kernel
95 _state = self._state
96 _state1 = self._state1
97 _log_values = self._log_values
98 _log_values_1 = self._log_values_1
99 _log_prob_corr = self._log_prob_corr
100 _machine_pow = self._machine_pow
101 _t_kernel = self._kernel.transition
102
103 accepted = 0
104
105 for sweep in range(self.sweep_size):
106
107 # Propose a new state using the transition kernel
108 _t_kernel(_state, _state1, _log_prob_corr)
109
110 _log_values_1 = _log_val(_state1, out=_log_values_1)
111
112 # Acceptance Kernel
113 accepted += _acc_kernel(
114 _state,
115 _state1,
116 _log_values,
117 _log_values_1,
118 _log_prob_corr,
119 _machine_pow,
120 )
121
122 self._total_samples += self.sweep_size * self.n_chains
123 self._accepted_samples += accepted
124
125 return self._state
126
127 @property
128 def acceptance(self):
129 """The measured acceptance probability."""
130 return _mean(self._accepted_samples) / _mean(self._total_samples)
131
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/netket/sampler/numpy/metropolis_hastings.py b/netket/sampler/numpy/metropolis_hastings.py
--- a/netket/sampler/numpy/metropolis_hastings.py
+++ b/netket/sampler/numpy/metropolis_hastings.py
@@ -1,7 +1,8 @@
from ..abstract_sampler import AbstractSampler
-from ...stats import mean as _mean
from netket import random as _random
+from netket.stats import sum_inplace as _sum_inplace
+
import math
import numpy as _np
from numba import jit, int64, float64
@@ -127,4 +128,4 @@
@property
def acceptance(self):
"""The measured acceptance probability."""
- return _mean(self._accepted_samples) / _mean(self._total_samples)
+ return _sum_inplace(self._accepted_samples) / _sum_inplace(self._total_samples)
diff --git a/netket/stats/_sum_inplace.py b/netket/stats/_sum_inplace.py
--- a/netket/stats/_sum_inplace.py
+++ b/netket/stats/_sum_inplace.py
@@ -31,6 +31,7 @@
@sum_inplace.register(_np.complex64)
@sum_inplace.register(_np.complex128)
@sum_inplace.register(float)
+@sum_inplace.register(int)
def sum_inplace_scalar(a):
ar = _np.asarray(a)
| {"golden_diff": "diff --git a/netket/sampler/numpy/metropolis_hastings.py b/netket/sampler/numpy/metropolis_hastings.py\n--- a/netket/sampler/numpy/metropolis_hastings.py\n+++ b/netket/sampler/numpy/metropolis_hastings.py\n@@ -1,7 +1,8 @@\n from ..abstract_sampler import AbstractSampler\n-from ...stats import mean as _mean\n from netket import random as _random\n \n+from netket.stats import sum_inplace as _sum_inplace\n+\n import math\n import numpy as _np\n from numba import jit, int64, float64\n@@ -127,4 +128,4 @@\n @property\n def acceptance(self):\n \"\"\"The measured acceptance probability.\"\"\"\n- return _mean(self._accepted_samples) / _mean(self._total_samples)\n+ return _sum_inplace(self._accepted_samples) / _sum_inplace(self._total_samples)\ndiff --git a/netket/stats/_sum_inplace.py b/netket/stats/_sum_inplace.py\n--- a/netket/stats/_sum_inplace.py\n+++ b/netket/stats/_sum_inplace.py\n@@ -31,6 +31,7 @@\n @sum_inplace.register(_np.complex64)\n @sum_inplace.register(_np.complex128)\n @sum_inplace.register(float)\n+@sum_inplace.register(int)\n def sum_inplace_scalar(a):\n ar = _np.asarray(a)\n", "issue": "Getting acceptance of numpy metropolis_hastings\nThe method defining the acceptance for `numpy/metropolis_hastings` in v3.0 is\r\n\r\n```python\r\n@property\r\ndef acceptance(self):\r\n \"\"\"The measured acceptance probability.\"\"\"\r\n return _mean(self._accepted_samples) / _mean(self._total_samples)\r\n```\r\n\r\nBut `self._accepted_samples` and `self._total_samples` are ints. Don't know if this has been fixed in a PR, but if it's not, maybe it should be included.\n", "before_files": [{"content": "from functools import singledispatch\nimport numpy as _np\n\nfrom netket.utils import mpi_available as _mpi_available, n_nodes as _n_nodes\n\nif _mpi_available:\n from netket.utils import MPI_comm as _MPI_comm\n from netket.utils import MPI as _MPI\n\n\n@singledispatch\ndef sum_inplace(x):\n \"\"\"\n Computes the elementwie sum of an array or a scalar across all MPI processes.\n Attempts to perform this sum inplace if possible, but for some types a copy\n might be returned.\n\n Args:\n a: The input array, which will usually be overwritten in place.\n Returns:\n out: The reduced array.\n \"\"\"\n raise TypeError(\"Unknown type to perform dispatch upon: {}\".format(type(x)))\n\n\n#######\n# Scalar\n@sum_inplace.register(complex)\n@sum_inplace.register(_np.float64)\n@sum_inplace.register(_np.float32)\n@sum_inplace.register(_np.complex64)\n@sum_inplace.register(_np.complex128)\n@sum_inplace.register(float)\ndef sum_inplace_scalar(a):\n ar = _np.asarray(a)\n\n if _n_nodes > 1:\n _MPI_comm.Allreduce(_MPI.IN_PLACE, ar.reshape(-1), op=_MPI.SUM)\n\n return ar\n\n\n##############\n# Numpy Array\n#\n@sum_inplace.register(_np.ndarray)\ndef sum_inplace_MPI(a):\n \"\"\"\n Computes the elementwise sum of a numpy array over all MPI processes.\n\n Args:\n a (numpy.ndarray): The input array, which will be overwritten in place.\n \"\"\"\n if _n_nodes > 1:\n _MPI_comm.Allreduce(_MPI.IN_PLACE, a.reshape(-1), op=_MPI.SUM)\n\n return a\n\n\n##############\n# Jax\n#\nfrom netket.utils import jax_available\n\nif jax_available:\n import numpy as _np\n import jax\n\n @sum_inplace.register(jax.interpreters.xla.DeviceArray)\n def sum_inplace_jax(x):\n if not isinstance(x, jax.interpreters.xla.DeviceArray):\n raise TypeError(\n \"Argument to sum_inplace_jax must be a DeviceArray, got {}\".format(\n type(x)\n )\n )\n\n if _n_nodes == 1:\n return x\n\n # This below only works on cpus...\n # we should make this work for gpus too..\n # TODO: unsafe_buffer_pointer is considered not yet definitive interface\n ptr = x.block_until_ready().device_buffer.unsafe_buffer_pointer()\n\n # The above is faster.\n # This below should work more often, but might copy.\n # Depending on future changes in jaxlib, we might have to switch to\n # this below.\n # see Google/jax #2123 and #1009\n # _x = jax.xla._force(x.block_until_ready())\n # ptr = _x.device_buffer.unsafe_buffer_pointer()\n\n # using native numpy because jax's numpy does not have ctypeslib\n data_pointer = _np.ctypeslib.ndpointer(x.dtype, shape=x.shape)\n\n # wrap jax data into a standard numpy array which is handled by MPI\n arr = data_pointer(ptr).contents\n _MPI_comm.Allreduce(_MPI.IN_PLACE, arr.reshape(-1), op=_MPI.SUM)\n\n return x\n\n @sum_inplace.register(jax.interpreters.partial_eval.JaxprTracer)\n @sum_inplace.register(jax.interpreters.ad.JVPTracer)\n def sum_inplace_jax_jittracer(x):\n if _n_nodes == 1:\n return x\n else:\n raise RuntimError(\n \"Cannot jit through sum_inplace when running with multiple MPI processes.\"\n )\n", "path": "netket/stats/_sum_inplace.py"}, {"content": "from ..abstract_sampler import AbstractSampler\nfrom ...stats import mean as _mean\nfrom netket import random as _random\n\nimport math\nimport numpy as _np\nfrom numba import jit, int64, float64\nfrom ..._jitclass import jitclass\n\n\nclass MetropolisHastings(AbstractSampler):\n def __init__(self, machine, kernel, n_chains=16, sweep_size=None):\n\n super().__init__(machine, n_chains)\n\n self.n_chains = n_chains\n\n self.sweep_size = sweep_size\n\n self._kernel = kernel\n\n self.machine_pow = 2.0\n self.reset(True)\n\n @property\n def n_chains(self):\n return self._n_chains\n\n @n_chains.setter\n def n_chains(self, n_chains):\n if n_chains < 0:\n raise ValueError(\"Expected a positive integer for n_chains \")\n\n self._n_chains = n_chains\n\n self._state = _np.zeros((n_chains, self._input_size))\n self._state1 = _np.copy(self._state)\n\n self._log_values = _np.zeros(n_chains, dtype=_np.complex128)\n self._log_values_1 = _np.zeros(n_chains, dtype=_np.complex128)\n self._log_prob_corr = _np.zeros(n_chains)\n\n @property\n def machine_pow(self):\n return self._machine_pow\n\n @machine_pow.setter\n def machine_pow(self, m_power):\n if not _np.isscalar(m_power):\n raise ValueError(\"machine_pow should be a scalar.\")\n self._machine_pow = m_power\n\n @property\n def sweep_size(self):\n return self._sweep_size\n\n @sweep_size.setter\n def sweep_size(self, sweep_size):\n self._sweep_size = sweep_size if sweep_size != None else self._input_size\n if self._sweep_size < 0:\n raise ValueError(\"Expected a positive integer for sweep_size \")\n\n def reset(self, init_random=False):\n if init_random:\n self._kernel.random_state(self._state)\n self._log_values = self.machine.log_val(self._state, out=self._log_values)\n\n self._accepted_samples = 0\n self._total_samples = 0\n\n @staticmethod\n @jit(nopython=True)\n def acceptance_kernel(\n state, state1, log_values, log_values_1, log_prob_corr, machine_pow\n ):\n accepted = 0\n\n for i in range(state.shape[0]):\n prob = _np.exp(\n machine_pow * (log_values_1[i] - log_values[i] + log_prob_corr[i]).real\n )\n assert not math.isnan(prob)\n\n if prob > _random.uniform(0, 1):\n log_values[i] = log_values_1[i]\n state[i] = state1[i]\n accepted += 1\n\n return accepted\n\n def __next__(self):\n\n _log_val = self.machine.log_val\n _acc_kernel = self.acceptance_kernel\n _state = self._state\n _state1 = self._state1\n _log_values = self._log_values\n _log_values_1 = self._log_values_1\n _log_prob_corr = self._log_prob_corr\n _machine_pow = self._machine_pow\n _t_kernel = self._kernel.transition\n\n accepted = 0\n\n for sweep in range(self.sweep_size):\n\n # Propose a new state using the transition kernel\n _t_kernel(_state, _state1, _log_prob_corr)\n\n _log_values_1 = _log_val(_state1, out=_log_values_1)\n\n # Acceptance Kernel\n accepted += _acc_kernel(\n _state,\n _state1,\n _log_values,\n _log_values_1,\n _log_prob_corr,\n _machine_pow,\n )\n\n self._total_samples += self.sweep_size * self.n_chains\n self._accepted_samples += accepted\n\n return self._state\n\n @property\n def acceptance(self):\n \"\"\"The measured acceptance probability.\"\"\"\n return _mean(self._accepted_samples) / _mean(self._total_samples)\n", "path": "netket/sampler/numpy/metropolis_hastings.py"}], "after_files": [{"content": "from functools import singledispatch\nimport numpy as _np\n\nfrom netket.utils import mpi_available as _mpi_available, n_nodes as _n_nodes\n\nif _mpi_available:\n from netket.utils import MPI_comm as _MPI_comm\n from netket.utils import MPI as _MPI\n\n\n@singledispatch\ndef sum_inplace(x):\n \"\"\"\n Computes the elementwie sum of an array or a scalar across all MPI processes.\n Attempts to perform this sum inplace if possible, but for some types a copy\n might be returned.\n\n Args:\n a: The input array, which will usually be overwritten in place.\n Returns:\n out: The reduced array.\n \"\"\"\n raise TypeError(\"Unknown type to perform dispatch upon: {}\".format(type(x)))\n\n\n#######\n# Scalar\n@sum_inplace.register(complex)\n@sum_inplace.register(_np.float64)\n@sum_inplace.register(_np.float32)\n@sum_inplace.register(_np.complex64)\n@sum_inplace.register(_np.complex128)\n@sum_inplace.register(float)\n@sum_inplace.register(int)\ndef sum_inplace_scalar(a):\n ar = _np.asarray(a)\n\n if _n_nodes > 1:\n _MPI_comm.Allreduce(_MPI.IN_PLACE, ar.reshape(-1), op=_MPI.SUM)\n\n return ar\n\n\n##############\n# Numpy Array\n#\n@sum_inplace.register(_np.ndarray)\ndef sum_inplace_MPI(a):\n \"\"\"\n Computes the elementwise sum of a numpy array over all MPI processes.\n\n Args:\n a (numpy.ndarray): The input array, which will be overwritten in place.\n \"\"\"\n if _n_nodes > 1:\n _MPI_comm.Allreduce(_MPI.IN_PLACE, a.reshape(-1), op=_MPI.SUM)\n\n return a\n\n\n##############\n# Jax\n#\nfrom netket.utils import jax_available\n\nif jax_available:\n import numpy as _np\n import jax\n\n @sum_inplace.register(jax.interpreters.xla.DeviceArray)\n def sum_inplace_jax(x):\n if not isinstance(x, jax.interpreters.xla.DeviceArray):\n raise TypeError(\n \"Argument to sum_inplace_jax must be a DeviceArray, got {}\".format(\n type(x)\n )\n )\n\n if _n_nodes == 1:\n return x\n\n # This below only works on cpus...\n # we should make this work for gpus too..\n # TODO: unsafe_buffer_pointer is considered not yet definitive interface\n ptr = x.block_until_ready().device_buffer.unsafe_buffer_pointer()\n\n # The above is faster.\n # This below should work more often, but might copy.\n # Depending on future changes in jaxlib, we might have to switch to\n # this below.\n # see Google/jax #2123 and #1009\n # _x = jax.xla._force(x.block_until_ready())\n # ptr = _x.device_buffer.unsafe_buffer_pointer()\n\n # using native numpy because jax's numpy does not have ctypeslib\n data_pointer = _np.ctypeslib.ndpointer(x.dtype, shape=x.shape)\n\n # wrap jax data into a standard numpy array which is handled by MPI\n arr = data_pointer(ptr).contents\n _MPI_comm.Allreduce(_MPI.IN_PLACE, arr.reshape(-1), op=_MPI.SUM)\n\n return x\n\n @sum_inplace.register(jax.interpreters.partial_eval.JaxprTracer)\n @sum_inplace.register(jax.interpreters.ad.JVPTracer)\n def sum_inplace_jax_jittracer(x):\n if _n_nodes == 1:\n return x\n else:\n raise RuntimError(\n \"Cannot jit through sum_inplace when running with multiple MPI processes.\"\n )\n", "path": "netket/stats/_sum_inplace.py"}, {"content": "from ..abstract_sampler import AbstractSampler\nfrom netket import random as _random\n\nfrom netket.stats import sum_inplace as _sum_inplace\n\nimport math\nimport numpy as _np\nfrom numba import jit, int64, float64\nfrom ..._jitclass import jitclass\n\n\nclass MetropolisHastings(AbstractSampler):\n def __init__(self, machine, kernel, n_chains=16, sweep_size=None):\n\n super().__init__(machine, n_chains)\n\n self.n_chains = n_chains\n\n self.sweep_size = sweep_size\n\n self._kernel = kernel\n\n self.machine_pow = 2.0\n self.reset(True)\n\n @property\n def n_chains(self):\n return self._n_chains\n\n @n_chains.setter\n def n_chains(self, n_chains):\n if n_chains < 0:\n raise ValueError(\"Expected a positive integer for n_chains \")\n\n self._n_chains = n_chains\n\n self._state = _np.zeros((n_chains, self._input_size))\n self._state1 = _np.copy(self._state)\n\n self._log_values = _np.zeros(n_chains, dtype=_np.complex128)\n self._log_values_1 = _np.zeros(n_chains, dtype=_np.complex128)\n self._log_prob_corr = _np.zeros(n_chains)\n\n @property\n def machine_pow(self):\n return self._machine_pow\n\n @machine_pow.setter\n def machine_pow(self, m_power):\n if not _np.isscalar(m_power):\n raise ValueError(\"machine_pow should be a scalar.\")\n self._machine_pow = m_power\n\n @property\n def sweep_size(self):\n return self._sweep_size\n\n @sweep_size.setter\n def sweep_size(self, sweep_size):\n self._sweep_size = sweep_size if sweep_size != None else self._input_size\n if self._sweep_size < 0:\n raise ValueError(\"Expected a positive integer for sweep_size \")\n\n def reset(self, init_random=False):\n if init_random:\n self._kernel.random_state(self._state)\n self._log_values = self.machine.log_val(self._state, out=self._log_values)\n\n self._accepted_samples = 0\n self._total_samples = 0\n\n @staticmethod\n @jit(nopython=True)\n def acceptance_kernel(\n state, state1, log_values, log_values_1, log_prob_corr, machine_pow\n ):\n accepted = 0\n\n for i in range(state.shape[0]):\n prob = _np.exp(\n machine_pow * (log_values_1[i] - log_values[i] + log_prob_corr[i]).real\n )\n assert not math.isnan(prob)\n\n if prob > _random.uniform(0, 1):\n log_values[i] = log_values_1[i]\n state[i] = state1[i]\n accepted += 1\n\n return accepted\n\n def __next__(self):\n\n _log_val = self.machine.log_val\n _acc_kernel = self.acceptance_kernel\n _state = self._state\n _state1 = self._state1\n _log_values = self._log_values\n _log_values_1 = self._log_values_1\n _log_prob_corr = self._log_prob_corr\n _machine_pow = self._machine_pow\n _t_kernel = self._kernel.transition\n\n accepted = 0\n\n for sweep in range(self.sweep_size):\n\n # Propose a new state using the transition kernel\n _t_kernel(_state, _state1, _log_prob_corr)\n\n _log_values_1 = _log_val(_state1, out=_log_values_1)\n\n # Acceptance Kernel\n accepted += _acc_kernel(\n _state,\n _state1,\n _log_values,\n _log_values_1,\n _log_prob_corr,\n _machine_pow,\n )\n\n self._total_samples += self.sweep_size * self.n_chains\n self._accepted_samples += accepted\n\n return self._state\n\n @property\n def acceptance(self):\n \"\"\"The measured acceptance probability.\"\"\"\n return _sum_inplace(self._accepted_samples) / _sum_inplace(self._total_samples)\n", "path": "netket/sampler/numpy/metropolis_hastings.py"}]} | 2,691 | 316 |
gh_patches_debug_10632 | rasdani/github-patches | git_diff | PaddlePaddle__models-477 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
text_classification reader error
Get follow error log while running `train.py` of text_classification model:
```
[INFO 2017-11-16 17:19:51,458 train.py:44] please wait to build the word dictionary ...
[INFO 2017-11-16 17:20:07,138 train.py:101] length of word dictionary is : 5147.
I1116 17:20:07.168130 24638 Util.cpp:166] commandline: --use_gpu=False --trainer_count=1
I1116 17:20:07.176143 24638 GradientMachine.cpp:94] Initing parameters..
I1116 17:20:07.186369 24638 GradientMachine.cpp:101] Init parameters done.
Traceback (most recent call last):
File "train.py", line 178, in <module>
main(args)
File "train.py", line 169, in main
model_save_dir=args.model_save_dir)
File "train.py", line 150, in train
num_passes=num_passes)
File "/home/work/wanghaoshuang/paddle/python/install/lib/python2.7/site-packages/paddle/v2/trainer.py", line 162, in train
for batch_id, data_batch in enumerate(reader()):
File "/home/work/wanghaoshuang/paddle/python/install/lib/python2.7/site-packages/paddle/v2/minibatch.py", line 33, in batch_reader
for instance in r:
File "/home/work/wanghaoshuang/paddle/python/install/lib/python2.7/site-packages/paddle/v2/reader/decorator.py", line 67, in data_reader
for e in reader():
TypeError: 'function' object is not iterable
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `text_classification/train.py`
Content:
```
1 import os
2 import sys
3 import gzip
4
5 import paddle.v2 as paddle
6
7 import reader
8 from utils import logger, parse_train_cmd, build_dict, load_dict
9 from network_conf import fc_net, convolution_net
10
11
12 def train(topology,
13 train_data_dir=None,
14 test_data_dir=None,
15 word_dict_path=None,
16 label_dict_path=None,
17 model_save_dir="models",
18 batch_size=32,
19 num_passes=10):
20 """
21 train dnn model
22
23
24 :params train_data_path: path of training data, if this parameter
25 is not specified, paddle.dataset.imdb will be used to run this example
26 :type train_data_path: str
27 :params test_data_path: path of testing data, if this parameter
28 is not specified, paddle.dataset.imdb will be used to run this example
29 :type test_data_path: str
30 :params word_dict_path: path of training data, if this parameter
31 is not specified, paddle.dataset.imdb will be used to run this example
32 :type word_dict_path: str
33 :params num_pass: train pass number
34 :type num_pass: int
35 """
36 if not os.path.exists(model_save_dir):
37 os.mkdir(model_save_dir)
38
39 use_default_data = (train_data_dir is None)
40
41 if use_default_data:
42 logger.info(("No training data are porivided, "
43 "use paddle.dataset.imdb to train the model."))
44 logger.info("please wait to build the word dictionary ...")
45
46 word_dict = paddle.dataset.imdb.word_dict()
47 train_reader = paddle.batch(
48 paddle.reader.shuffle(
49 lambda: paddle.dataset.imdb.train(word_dict), buf_size=1000),
50 batch_size=100)
51 test_reader = paddle.batch(
52 lambda: paddle.dataset.imdb.test(word_dict), batch_size=100)
53
54 class_num = 2
55 else:
56 if word_dict_path is None or not os.path.exists(word_dict_path):
57 logger.info(("word dictionary is not given, the dictionary "
58 "is automatically built from the training data."))
59
60 # build the word dictionary to map the original string-typed
61 # words into integer-typed index
62 build_dict(
63 data_dir=train_data_dir,
64 save_path=word_dict_path,
65 use_col=1,
66 cutoff_fre=5,
67 insert_extra_words=["<UNK>"])
68
69 if not os.path.exists(label_dict_path):
70 logger.info(("label dictionary is not given, the dictionary "
71 "is automatically built from the training data."))
72 # build the label dictionary to map the original string-typed
73 # label into integer-typed index
74 build_dict(
75 data_dir=train_data_dir, save_path=label_dict_path, use_col=0)
76
77 word_dict = load_dict(word_dict_path)
78
79 lbl_dict = load_dict(label_dict_path)
80 class_num = len(lbl_dict)
81 logger.info("class number is : %d." % (len(lbl_dict)))
82
83 train_reader = paddle.batch(
84 paddle.reader.shuffle(
85 reader.train_reader(train_data_dir, word_dict, lbl_dict),
86 buf_size=1000),
87 batch_size=batch_size)
88
89 if test_data_dir is not None:
90 # here, because training and testing data share a same format,
91 # we still use the reader.train_reader to read the testing data.
92 test_reader = paddle.batch(
93 paddle.reader.shuffle(
94 reader.train_reader(test_data_dir, word_dict, lbl_dict),
95 buf_size=1000),
96 batch_size=batch_size)
97 else:
98 test_reader = None
99
100 dict_dim = len(word_dict)
101 logger.info("length of word dictionary is : %d." % (dict_dim))
102
103 paddle.init(use_gpu=False, trainer_count=1)
104
105 # network config
106 cost, prob, label = topology(dict_dim, class_num)
107
108 # create parameters
109 parameters = paddle.parameters.create(cost)
110
111 # create optimizer
112 adam_optimizer = paddle.optimizer.Adam(
113 learning_rate=1e-3,
114 regularization=paddle.optimizer.L2Regularization(rate=1e-3),
115 model_average=paddle.optimizer.ModelAverage(average_window=0.5))
116
117 # create trainer
118 trainer = paddle.trainer.SGD(
119 cost=cost,
120 extra_layers=paddle.evaluator.auc(input=prob, label=label),
121 parameters=parameters,
122 update_equation=adam_optimizer)
123
124 # begin training network
125 feeding = {"word": 0, "label": 1}
126
127 def _event_handler(event):
128 """
129 Define end batch and end pass event handler
130 """
131 if isinstance(event, paddle.event.EndIteration):
132 if event.batch_id % 100 == 0:
133 logger.info("Pass %d, Batch %d, Cost %f, %s\n" % (
134 event.pass_id, event.batch_id, event.cost, event.metrics))
135
136 if isinstance(event, paddle.event.EndPass):
137 if test_reader is not None:
138 result = trainer.test(reader=test_reader, feeding=feeding)
139 logger.info("Test at Pass %d, %s \n" % (event.pass_id,
140 result.metrics))
141 with gzip.open(
142 os.path.join(model_save_dir, "dnn_params_pass_%05d.tar.gz" %
143 event.pass_id), "w") as f:
144 trainer.save_parameter_to_tar(f)
145
146 trainer.train(
147 reader=train_reader,
148 event_handler=_event_handler,
149 feeding=feeding,
150 num_passes=num_passes)
151
152 logger.info("Training has finished.")
153
154
155 def main(args):
156 if args.nn_type == "dnn":
157 topology = fc_net
158 elif args.nn_type == "cnn":
159 topology = convolution_net
160
161 train(
162 topology=topology,
163 train_data_dir=args.train_data_dir,
164 test_data_dir=args.test_data_dir,
165 word_dict_path=args.word_dict,
166 label_dict_path=args.label_dict,
167 batch_size=args.batch_size,
168 num_passes=args.num_passes,
169 model_save_dir=args.model_save_dir)
170
171
172 if __name__ == "__main__":
173 args = parse_train_cmd()
174 if args.train_data_dir is not None:
175 assert args.word_dict and args.label_dict, (
176 "the parameter train_data_dir, word_dict_path, and label_dict_path "
177 "should be set at the same time.")
178 main(args)
179
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/text_classification/train.py b/text_classification/train.py
--- a/text_classification/train.py
+++ b/text_classification/train.py
@@ -46,10 +46,10 @@
word_dict = paddle.dataset.imdb.word_dict()
train_reader = paddle.batch(
paddle.reader.shuffle(
- lambda: paddle.dataset.imdb.train(word_dict), buf_size=1000),
+ lambda: paddle.dataset.imdb.train(word_dict)(), buf_size=1000),
batch_size=100)
test_reader = paddle.batch(
- lambda: paddle.dataset.imdb.test(word_dict), batch_size=100)
+ lambda: paddle.dataset.imdb.test(word_dict)(), batch_size=100)
class_num = 2
else:
| {"golden_diff": "diff --git a/text_classification/train.py b/text_classification/train.py\n--- a/text_classification/train.py\n+++ b/text_classification/train.py\n@@ -46,10 +46,10 @@\n word_dict = paddle.dataset.imdb.word_dict()\n train_reader = paddle.batch(\n paddle.reader.shuffle(\n- lambda: paddle.dataset.imdb.train(word_dict), buf_size=1000),\n+ lambda: paddle.dataset.imdb.train(word_dict)(), buf_size=1000),\n batch_size=100)\n test_reader = paddle.batch(\n- lambda: paddle.dataset.imdb.test(word_dict), batch_size=100)\n+ lambda: paddle.dataset.imdb.test(word_dict)(), batch_size=100)\n \n class_num = 2\n else:\n", "issue": "text_classification reader error\nGet follow error log while running `train.py` of text_classification model:\r\n```\r\n[INFO 2017-11-16 17:19:51,458 train.py:44] please wait to build the word dictionary ...\r\n[INFO 2017-11-16 17:20:07,138 train.py:101] length of word dictionary is : 5147.\r\nI1116 17:20:07.168130 24638 Util.cpp:166] commandline: --use_gpu=False --trainer_count=1\r\nI1116 17:20:07.176143 24638 GradientMachine.cpp:94] Initing parameters..\r\nI1116 17:20:07.186369 24638 GradientMachine.cpp:101] Init parameters done.\r\nTraceback (most recent call last):\r\n File \"train.py\", line 178, in <module>\r\n main(args)\r\n File \"train.py\", line 169, in main\r\n model_save_dir=args.model_save_dir)\r\n File \"train.py\", line 150, in train\r\n num_passes=num_passes)\r\n File \"/home/work/wanghaoshuang/paddle/python/install/lib/python2.7/site-packages/paddle/v2/trainer.py\", line 162, in train\r\n for batch_id, data_batch in enumerate(reader()):\r\n File \"/home/work/wanghaoshuang/paddle/python/install/lib/python2.7/site-packages/paddle/v2/minibatch.py\", line 33, in batch_reader\r\n for instance in r:\r\n File \"/home/work/wanghaoshuang/paddle/python/install/lib/python2.7/site-packages/paddle/v2/reader/decorator.py\", line 67, in data_reader\r\n for e in reader():\r\nTypeError: 'function' object is not iterable\r\n```\n", "before_files": [{"content": "import os\nimport sys\nimport gzip\n\nimport paddle.v2 as paddle\n\nimport reader\nfrom utils import logger, parse_train_cmd, build_dict, load_dict\nfrom network_conf import fc_net, convolution_net\n\n\ndef train(topology,\n train_data_dir=None,\n test_data_dir=None,\n word_dict_path=None,\n label_dict_path=None,\n model_save_dir=\"models\",\n batch_size=32,\n num_passes=10):\n \"\"\"\n train dnn model\n\n\n :params train_data_path: path of training data, if this parameter\n is not specified, paddle.dataset.imdb will be used to run this example\n :type train_data_path: str\n :params test_data_path: path of testing data, if this parameter\n is not specified, paddle.dataset.imdb will be used to run this example\n :type test_data_path: str\n :params word_dict_path: path of training data, if this parameter\n is not specified, paddle.dataset.imdb will be used to run this example\n :type word_dict_path: str\n :params num_pass: train pass number\n :type num_pass: int\n \"\"\"\n if not os.path.exists(model_save_dir):\n os.mkdir(model_save_dir)\n\n use_default_data = (train_data_dir is None)\n\n if use_default_data:\n logger.info((\"No training data are porivided, \"\n \"use paddle.dataset.imdb to train the model.\"))\n logger.info(\"please wait to build the word dictionary ...\")\n\n word_dict = paddle.dataset.imdb.word_dict()\n train_reader = paddle.batch(\n paddle.reader.shuffle(\n lambda: paddle.dataset.imdb.train(word_dict), buf_size=1000),\n batch_size=100)\n test_reader = paddle.batch(\n lambda: paddle.dataset.imdb.test(word_dict), batch_size=100)\n\n class_num = 2\n else:\n if word_dict_path is None or not os.path.exists(word_dict_path):\n logger.info((\"word dictionary is not given, the dictionary \"\n \"is automatically built from the training data.\"))\n\n # build the word dictionary to map the original string-typed\n # words into integer-typed index\n build_dict(\n data_dir=train_data_dir,\n save_path=word_dict_path,\n use_col=1,\n cutoff_fre=5,\n insert_extra_words=[\"<UNK>\"])\n\n if not os.path.exists(label_dict_path):\n logger.info((\"label dictionary is not given, the dictionary \"\n \"is automatically built from the training data.\"))\n # build the label dictionary to map the original string-typed\n # label into integer-typed index\n build_dict(\n data_dir=train_data_dir, save_path=label_dict_path, use_col=0)\n\n word_dict = load_dict(word_dict_path)\n\n lbl_dict = load_dict(label_dict_path)\n class_num = len(lbl_dict)\n logger.info(\"class number is : %d.\" % (len(lbl_dict)))\n\n train_reader = paddle.batch(\n paddle.reader.shuffle(\n reader.train_reader(train_data_dir, word_dict, lbl_dict),\n buf_size=1000),\n batch_size=batch_size)\n\n if test_data_dir is not None:\n # here, because training and testing data share a same format,\n # we still use the reader.train_reader to read the testing data.\n test_reader = paddle.batch(\n paddle.reader.shuffle(\n reader.train_reader(test_data_dir, word_dict, lbl_dict),\n buf_size=1000),\n batch_size=batch_size)\n else:\n test_reader = None\n\n dict_dim = len(word_dict)\n logger.info(\"length of word dictionary is : %d.\" % (dict_dim))\n\n paddle.init(use_gpu=False, trainer_count=1)\n\n # network config\n cost, prob, label = topology(dict_dim, class_num)\n\n # create parameters\n parameters = paddle.parameters.create(cost)\n\n # create optimizer\n adam_optimizer = paddle.optimizer.Adam(\n learning_rate=1e-3,\n regularization=paddle.optimizer.L2Regularization(rate=1e-3),\n model_average=paddle.optimizer.ModelAverage(average_window=0.5))\n\n # create trainer\n trainer = paddle.trainer.SGD(\n cost=cost,\n extra_layers=paddle.evaluator.auc(input=prob, label=label),\n parameters=parameters,\n update_equation=adam_optimizer)\n\n # begin training network\n feeding = {\"word\": 0, \"label\": 1}\n\n def _event_handler(event):\n \"\"\"\n Define end batch and end pass event handler\n \"\"\"\n if isinstance(event, paddle.event.EndIteration):\n if event.batch_id % 100 == 0:\n logger.info(\"Pass %d, Batch %d, Cost %f, %s\\n\" % (\n event.pass_id, event.batch_id, event.cost, event.metrics))\n\n if isinstance(event, paddle.event.EndPass):\n if test_reader is not None:\n result = trainer.test(reader=test_reader, feeding=feeding)\n logger.info(\"Test at Pass %d, %s \\n\" % (event.pass_id,\n result.metrics))\n with gzip.open(\n os.path.join(model_save_dir, \"dnn_params_pass_%05d.tar.gz\" %\n event.pass_id), \"w\") as f:\n trainer.save_parameter_to_tar(f)\n\n trainer.train(\n reader=train_reader,\n event_handler=_event_handler,\n feeding=feeding,\n num_passes=num_passes)\n\n logger.info(\"Training has finished.\")\n\n\ndef main(args):\n if args.nn_type == \"dnn\":\n topology = fc_net\n elif args.nn_type == \"cnn\":\n topology = convolution_net\n\n train(\n topology=topology,\n train_data_dir=args.train_data_dir,\n test_data_dir=args.test_data_dir,\n word_dict_path=args.word_dict,\n label_dict_path=args.label_dict,\n batch_size=args.batch_size,\n num_passes=args.num_passes,\n model_save_dir=args.model_save_dir)\n\n\nif __name__ == \"__main__\":\n args = parse_train_cmd()\n if args.train_data_dir is not None:\n assert args.word_dict and args.label_dict, (\n \"the parameter train_data_dir, word_dict_path, and label_dict_path \"\n \"should be set at the same time.\")\n main(args)\n", "path": "text_classification/train.py"}], "after_files": [{"content": "import os\nimport sys\nimport gzip\n\nimport paddle.v2 as paddle\n\nimport reader\nfrom utils import logger, parse_train_cmd, build_dict, load_dict\nfrom network_conf import fc_net, convolution_net\n\n\ndef train(topology,\n train_data_dir=None,\n test_data_dir=None,\n word_dict_path=None,\n label_dict_path=None,\n model_save_dir=\"models\",\n batch_size=32,\n num_passes=10):\n \"\"\"\n train dnn model\n\n\n :params train_data_path: path of training data, if this parameter\n is not specified, paddle.dataset.imdb will be used to run this example\n :type train_data_path: str\n :params test_data_path: path of testing data, if this parameter\n is not specified, paddle.dataset.imdb will be used to run this example\n :type test_data_path: str\n :params word_dict_path: path of training data, if this parameter\n is not specified, paddle.dataset.imdb will be used to run this example\n :type word_dict_path: str\n :params num_pass: train pass number\n :type num_pass: int\n \"\"\"\n if not os.path.exists(model_save_dir):\n os.mkdir(model_save_dir)\n\n use_default_data = (train_data_dir is None)\n\n if use_default_data:\n logger.info((\"No training data are porivided, \"\n \"use paddle.dataset.imdb to train the model.\"))\n logger.info(\"please wait to build the word dictionary ...\")\n\n word_dict = paddle.dataset.imdb.word_dict()\n train_reader = paddle.batch(\n paddle.reader.shuffle(\n lambda: paddle.dataset.imdb.train(word_dict)(), buf_size=1000),\n batch_size=100)\n test_reader = paddle.batch(\n lambda: paddle.dataset.imdb.test(word_dict)(), batch_size=100)\n\n class_num = 2\n else:\n if word_dict_path is None or not os.path.exists(word_dict_path):\n logger.info((\"word dictionary is not given, the dictionary \"\n \"is automatically built from the training data.\"))\n\n # build the word dictionary to map the original string-typed\n # words into integer-typed index\n build_dict(\n data_dir=train_data_dir,\n save_path=word_dict_path,\n use_col=1,\n cutoff_fre=5,\n insert_extra_words=[\"<UNK>\"])\n\n if not os.path.exists(label_dict_path):\n logger.info((\"label dictionary is not given, the dictionary \"\n \"is automatically built from the training data.\"))\n # build the label dictionary to map the original string-typed\n # label into integer-typed index\n build_dict(\n data_dir=train_data_dir, save_path=label_dict_path, use_col=0)\n\n word_dict = load_dict(word_dict_path)\n\n lbl_dict = load_dict(label_dict_path)\n class_num = len(lbl_dict)\n logger.info(\"class number is : %d.\" % (len(lbl_dict)))\n\n train_reader = paddle.batch(\n paddle.reader.shuffle(\n reader.train_reader(train_data_dir, word_dict, lbl_dict),\n buf_size=1000),\n batch_size=batch_size)\n\n if test_data_dir is not None:\n # here, because training and testing data share a same format,\n # we still use the reader.train_reader to read the testing data.\n test_reader = paddle.batch(\n paddle.reader.shuffle(\n reader.train_reader(test_data_dir, word_dict, lbl_dict),\n buf_size=1000),\n batch_size=batch_size)\n else:\n test_reader = None\n\n dict_dim = len(word_dict)\n logger.info(\"length of word dictionary is : %d.\" % (dict_dim))\n\n paddle.init(use_gpu=False, trainer_count=1)\n\n # network config\n cost, prob, label = topology(dict_dim, class_num)\n\n # create parameters\n parameters = paddle.parameters.create(cost)\n\n # create optimizer\n adam_optimizer = paddle.optimizer.Adam(\n learning_rate=1e-3,\n regularization=paddle.optimizer.L2Regularization(rate=1e-3),\n model_average=paddle.optimizer.ModelAverage(average_window=0.5))\n\n # create trainer\n trainer = paddle.trainer.SGD(\n cost=cost,\n extra_layers=paddle.evaluator.auc(input=prob, label=label),\n parameters=parameters,\n update_equation=adam_optimizer)\n\n # begin training network\n feeding = {\"word\": 0, \"label\": 1}\n\n def _event_handler(event):\n \"\"\"\n Define end batch and end pass event handler\n \"\"\"\n if isinstance(event, paddle.event.EndIteration):\n if event.batch_id % 100 == 0:\n logger.info(\"Pass %d, Batch %d, Cost %f, %s\\n\" % (\n event.pass_id, event.batch_id, event.cost, event.metrics))\n\n if isinstance(event, paddle.event.EndPass):\n if test_reader is not None:\n result = trainer.test(reader=test_reader, feeding=feeding)\n logger.info(\"Test at Pass %d, %s \\n\" % (event.pass_id,\n result.metrics))\n with gzip.open(\n os.path.join(model_save_dir, \"dnn_params_pass_%05d.tar.gz\" %\n event.pass_id), \"w\") as f:\n trainer.save_parameter_to_tar(f)\n\n trainer.train(\n reader=train_reader,\n event_handler=_event_handler,\n feeding=feeding,\n num_passes=num_passes)\n\n logger.info(\"Training has finished.\")\n\n\ndef main(args):\n if args.nn_type == \"dnn\":\n topology = fc_net\n elif args.nn_type == \"cnn\":\n topology = convolution_net\n\n train(\n topology=topology,\n train_data_dir=args.train_data_dir,\n test_data_dir=args.test_data_dir,\n word_dict_path=args.word_dict,\n label_dict_path=args.label_dict,\n batch_size=args.batch_size,\n num_passes=args.num_passes,\n model_save_dir=args.model_save_dir)\n\n\nif __name__ == \"__main__\":\n args = parse_train_cmd()\n if args.train_data_dir is not None:\n assert args.word_dict and args.label_dict, (\n \"the parameter train_data_dir, word_dict_path, and label_dict_path \"\n \"should be set at the same time.\")\n main(args)\n", "path": "text_classification/train.py"}]} | 2,530 | 171 |
gh_patches_debug_2665 | rasdani/github-patches | git_diff | opsdroid__opsdroid-946 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
PyPI deployments are failing
Looks like PyPI deployments are failing. `v0.15.1` and `v0.15.2` haven't gone out.
```
HTTPError: 400 Client Error: The description failed to render in the default format of reStructuredText. See https://pypi.org/help/#description-content-type for more information. for url: https://upload.pypi.org/legacy/
```
PyPI deployments are failing
Looks like PyPI deployments are failing. `v0.15.1` and `v0.15.2` haven't gone out.
```
HTTPError: 400 Client Error: The description failed to render in the default format of reStructuredText. See https://pypi.org/help/#description-content-type for more information. for url: https://upload.pypi.org/legacy/
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python3
2 import os
3 from setuptools import setup, find_packages
4 from setuptools.command.build_py import build_py
5 from setuptools.command.sdist import sdist
6 from setuptools.command.develop import develop
7 import versioneer
8
9 PACKAGE_NAME = 'opsdroid'
10 HERE = os.path.abspath(os.path.dirname(__file__))
11 README = open(os.path.join(HERE, 'README.md'), encoding="utf8").read()
12
13 PACKAGES = find_packages(exclude=['tests', 'tests.*', 'modules',
14 'modules.*', 'docs', 'docs.*'])
15
16
17 # For now we simply define the install_requires based on the contents
18 # of requirements.txt. In the future, install_requires may become much
19 # looser than the (automatically) resolved requirements.txt.
20 with open(os.path.join(HERE, 'requirements.txt'), 'r') as fh:
21 REQUIRES = [line.strip() for line in fh]
22
23
24 class Develop(develop):
25 """Custom `develop` command to always build mo files on install -e."""
26
27 def run(self):
28 self.run_command('compile_catalog')
29 develop.run(self) # old style class
30
31
32 class BuildPy(build_py):
33 """Custom `build_py` command to always build mo files for wheels."""
34
35 def run(self):
36 self.run_command('compile_catalog')
37 build_py.run(self) # old style class
38
39
40 class Sdist(sdist):
41 """Custom `sdist` command to ensure that mo files are always created."""
42
43 def run(self):
44 self.run_command('compile_catalog')
45 sdist.run(self) # old style class
46
47
48 setup(
49 name=PACKAGE_NAME,
50 version=versioneer.get_version(),
51 license='Apache License 2.0',
52 url='https://opsdroid.github.io/',
53 download_url='https://github.com/opsdroid/opsdroid/releases',
54 author='Jacob Tomlinson',
55 author_email='[email protected]',
56 description='An open source ChatOps bot framework.',
57 long_description=README,
58 packages=PACKAGES,
59 include_package_data=True,
60 zip_safe=False,
61 platforms='any',
62 classifiers=[
63 'Development Status :: 4 - Beta',
64 'Environment :: Console',
65 'Framework :: AsyncIO',
66 'Intended Audience :: Developers',
67 'Intended Audience :: System Administrators',
68 'Intended Audience :: Information Technology',
69 'License :: OSI Approved :: Apache Software License',
70 'Programming Language :: Python',
71 'Programming Language :: Python :: 3',
72 'Programming Language :: Python :: 3 :: Only',
73 'Programming Language :: Python :: 3.5',
74 'Programming Language :: Python :: 3.6',
75 'Programming Language :: Python :: 3.7',
76 'Topic :: Communications :: Chat',
77 'Topic :: Scientific/Engineering :: Artificial Intelligence',
78 'Topic :: Software Development :: Libraries :: Python Modules'
79 ],
80 install_requires=REQUIRES,
81 test_suite='tests',
82 keywords=[
83 'bot',
84 'bot-framework',
85 'opsdroid',
86 'botkit',
87 'python3',
88 'asyncio',
89 'chatops',
90 'devops',
91 'nlu'
92 ],
93 setup_requires=['Babel'],
94 cmdclass=versioneer.get_cmdclass({'sdist': Sdist,
95 'build_py': BuildPy,
96 'develop': Develop}),
97 entry_points={
98 'console_scripts': [
99 'opsdroid = opsdroid.__main__:main'
100 ]
101 },
102 )
103
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -55,6 +55,7 @@
author_email='[email protected]',
description='An open source ChatOps bot framework.',
long_description=README,
+ long_description_content_type='text/markdown',
packages=PACKAGES,
include_package_data=True,
zip_safe=False,
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -55,6 +55,7 @@\n author_email='[email protected]',\n description='An open source ChatOps bot framework.',\n long_description=README,\n+ long_description_content_type='text/markdown',\n packages=PACKAGES,\n include_package_data=True,\n zip_safe=False,\n", "issue": "PyPI deployments are failing\nLooks like PyPI deployments are failing. `v0.15.1` and `v0.15.2` haven't gone out.\r\n\r\n```\r\nHTTPError: 400 Client Error: The description failed to render in the default format of reStructuredText. See https://pypi.org/help/#description-content-type for more information. for url: https://upload.pypi.org/legacy/\r\n```\nPyPI deployments are failing\nLooks like PyPI deployments are failing. `v0.15.1` and `v0.15.2` haven't gone out.\r\n\r\n```\r\nHTTPError: 400 Client Error: The description failed to render in the default format of reStructuredText. See https://pypi.org/help/#description-content-type for more information. for url: https://upload.pypi.org/legacy/\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python3\nimport os\nfrom setuptools import setup, find_packages\nfrom setuptools.command.build_py import build_py\nfrom setuptools.command.sdist import sdist\nfrom setuptools.command.develop import develop\nimport versioneer\n\nPACKAGE_NAME = 'opsdroid'\nHERE = os.path.abspath(os.path.dirname(__file__))\nREADME = open(os.path.join(HERE, 'README.md'), encoding=\"utf8\").read()\n\nPACKAGES = find_packages(exclude=['tests', 'tests.*', 'modules',\n 'modules.*', 'docs', 'docs.*'])\n\n\n# For now we simply define the install_requires based on the contents\n# of requirements.txt. In the future, install_requires may become much\n# looser than the (automatically) resolved requirements.txt.\nwith open(os.path.join(HERE, 'requirements.txt'), 'r') as fh:\n REQUIRES = [line.strip() for line in fh]\n\n\nclass Develop(develop):\n \"\"\"Custom `develop` command to always build mo files on install -e.\"\"\"\n\n def run(self):\n self.run_command('compile_catalog')\n develop.run(self) # old style class\n\n\nclass BuildPy(build_py):\n \"\"\"Custom `build_py` command to always build mo files for wheels.\"\"\"\n\n def run(self):\n self.run_command('compile_catalog')\n build_py.run(self) # old style class\n\n\nclass Sdist(sdist):\n \"\"\"Custom `sdist` command to ensure that mo files are always created.\"\"\"\n\n def run(self):\n self.run_command('compile_catalog')\n sdist.run(self) # old style class\n\n\nsetup(\n name=PACKAGE_NAME,\n version=versioneer.get_version(),\n license='Apache License 2.0',\n url='https://opsdroid.github.io/',\n download_url='https://github.com/opsdroid/opsdroid/releases',\n author='Jacob Tomlinson',\n author_email='[email protected]',\n description='An open source ChatOps bot framework.',\n long_description=README,\n packages=PACKAGES,\n include_package_data=True,\n zip_safe=False,\n platforms='any',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Console',\n 'Framework :: AsyncIO',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Intended Audience :: Information Technology',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3 :: Only',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Communications :: Chat',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Software Development :: Libraries :: Python Modules'\n ],\n install_requires=REQUIRES,\n test_suite='tests',\n keywords=[\n 'bot',\n 'bot-framework',\n 'opsdroid',\n 'botkit',\n 'python3',\n 'asyncio',\n 'chatops',\n 'devops',\n 'nlu'\n ],\n setup_requires=['Babel'],\n cmdclass=versioneer.get_cmdclass({'sdist': Sdist,\n 'build_py': BuildPy,\n 'develop': Develop}),\n entry_points={\n 'console_scripts': [\n 'opsdroid = opsdroid.__main__:main'\n ]\n },\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python3\nimport os\nfrom setuptools import setup, find_packages\nfrom setuptools.command.build_py import build_py\nfrom setuptools.command.sdist import sdist\nfrom setuptools.command.develop import develop\nimport versioneer\n\nPACKAGE_NAME = 'opsdroid'\nHERE = os.path.abspath(os.path.dirname(__file__))\nREADME = open(os.path.join(HERE, 'README.md'), encoding=\"utf8\").read()\n\nPACKAGES = find_packages(exclude=['tests', 'tests.*', 'modules',\n 'modules.*', 'docs', 'docs.*'])\n\n\n# For now we simply define the install_requires based on the contents\n# of requirements.txt. In the future, install_requires may become much\n# looser than the (automatically) resolved requirements.txt.\nwith open(os.path.join(HERE, 'requirements.txt'), 'r') as fh:\n REQUIRES = [line.strip() for line in fh]\n\n\nclass Develop(develop):\n \"\"\"Custom `develop` command to always build mo files on install -e.\"\"\"\n\n def run(self):\n self.run_command('compile_catalog')\n develop.run(self) # old style class\n\n\nclass BuildPy(build_py):\n \"\"\"Custom `build_py` command to always build mo files for wheels.\"\"\"\n\n def run(self):\n self.run_command('compile_catalog')\n build_py.run(self) # old style class\n\n\nclass Sdist(sdist):\n \"\"\"Custom `sdist` command to ensure that mo files are always created.\"\"\"\n\n def run(self):\n self.run_command('compile_catalog')\n sdist.run(self) # old style class\n\n\nsetup(\n name=PACKAGE_NAME,\n version=versioneer.get_version(),\n license='Apache License 2.0',\n url='https://opsdroid.github.io/',\n download_url='https://github.com/opsdroid/opsdroid/releases',\n author='Jacob Tomlinson',\n author_email='[email protected]',\n description='An open source ChatOps bot framework.',\n long_description=README,\n long_description_content_type='text/markdown',\n packages=PACKAGES,\n include_package_data=True,\n zip_safe=False,\n platforms='any',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Console',\n 'Framework :: AsyncIO',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Intended Audience :: Information Technology',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3 :: Only',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Communications :: Chat',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Software Development :: Libraries :: Python Modules'\n ],\n install_requires=REQUIRES,\n test_suite='tests',\n keywords=[\n 'bot',\n 'bot-framework',\n 'opsdroid',\n 'botkit',\n 'python3',\n 'asyncio',\n 'chatops',\n 'devops',\n 'nlu'\n ],\n setup_requires=['Babel'],\n cmdclass=versioneer.get_cmdclass({'sdist': Sdist,\n 'build_py': BuildPy,\n 'develop': Develop}),\n entry_points={\n 'console_scripts': [\n 'opsdroid = opsdroid.__main__:main'\n ]\n },\n)\n", "path": "setup.py"}]} | 1,394 | 89 |
gh_patches_debug_64778 | rasdani/github-patches | git_diff | SCons__scons-3556 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
['SUBSTFILEPREFIX', 'TEXTFILESUFFIX']
**Describe the bug**
From git head, just now.
File: src/engine/SCons/Tool/textfile.py
Line 165:
_text_varlist = _common_varlist + ['TEXTFILEPREFIX', 'TEXTFILESUFFIX']
Line 174:
_subst_varlist = _common_varlist + ['SUBSTFILEPREFIX', 'TEXTFILESUFFIX']
Looks like a cur/paste/edit error. My guess, from the rest of the code, is that
'TEXTFILESUFFIX' should be 'SUBSTFILESUFFIX' on line 174
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/engine/SCons/Tool/textfile.py`
Content:
```
1 # -*- python -*-
2 #
3 # __COPYRIGHT__
4 #
5 # Permission is hereby granted, free of charge, to any person obtaining
6 # a copy of this software and associated documentation files (the
7 # "Software"), to deal in the Software without restriction, including
8 # without limitation the rights to use, copy, modify, merge, publish,
9 # distribute, sublicense, and/or sell copies of the Software, and to
10 # permit persons to whom the Software is furnished to do so, subject to
11 # the following conditions:
12 #
13 # The above copyright notice and this permission notice shall be included
14 # in all copies or substantial portions of the Software.
15 #
16 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
17 # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
18 # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
19 # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
20 # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
21 # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
22 # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
23 #
24
25 __doc__ = """
26 Textfile/Substfile builder for SCons.
27
28 Create file 'target' which typically is a textfile. The 'source'
29 may be any combination of strings, Nodes, or lists of same. A
30 'linesep' will be put between any part written and defaults to
31 os.linesep.
32
33 The only difference between the Textfile builder and the Substfile
34 builder is that strings are converted to Value() nodes for the
35 former and File() nodes for the latter. To insert files in the
36 former or strings in the latter, wrap them in a File() or Value(),
37 respectively.
38
39 The values of SUBST_DICT first have any construction variables
40 expanded (its keys are not expanded). If a value of SUBST_DICT is
41 a python callable function, it is called and the result is expanded
42 as the value. Values are substituted in a "random" order; if any
43 substitution could be further expanded by another substitution, it
44 is unpredictable whether the expansion will occur.
45 """
46
47 __revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
48
49 import SCons
50
51 import os
52 import re
53
54 from SCons.Node import Node
55 from SCons.Node.Python import Value
56 from SCons.Util import is_String, is_Sequence, is_Dict, to_bytes
57
58
59 TEXTFILE_FILE_WRITE_MODE = 'w'
60
61 LINESEP = '\n'
62
63 def _do_subst(node, subs):
64 """
65 Fetch the node contents and replace all instances of the keys with
66 their values. For example, if subs is
67 {'%VERSION%': '1.2345', '%BASE%': 'MyProg', '%prefix%': '/bin'},
68 then all instances of %VERSION% in the file will be replaced with
69 1.2345 and so forth.
70 """
71 contents = node.get_text_contents()
72 if subs:
73 for (k, val) in subs:
74 contents = contents.replace(k, val)
75
76 if 'b' in TEXTFILE_FILE_WRITE_MODE:
77 try:
78 contents = bytearray(contents, 'utf-8')
79 except UnicodeDecodeError:
80 # contents is already utf-8 encoded python 2 str i.e. a byte array
81 contents = bytearray(contents)
82
83 return contents
84
85
86 def _action(target, source, env):
87
88 # prepare the line separator
89 linesep = env['LINESEPARATOR']
90 if linesep is None:
91 linesep = LINESEP # os.linesep
92 elif is_String(linesep):
93 pass
94 elif isinstance(linesep, Value):
95 linesep = linesep.get_text_contents()
96 else:
97 raise SCons.Errors.UserError('unexpected type/class for LINESEPARATOR: %s'
98 % repr(linesep), None)
99
100 if 'b' in TEXTFILE_FILE_WRITE_MODE:
101 linesep = to_bytes(linesep)
102
103 # create a dictionary to use for the substitutions
104 if 'SUBST_DICT' not in env:
105 subs = None # no substitutions
106 else:
107 subst_dict = env['SUBST_DICT']
108 if is_Dict(subst_dict):
109 subst_dict = list(subst_dict.items())
110 elif is_Sequence(subst_dict):
111 pass
112 else:
113 raise SCons.Errors.UserError('SUBST_DICT must be dict or sequence')
114 subs = []
115 for (k, value) in subst_dict:
116 if callable(value):
117 value = value()
118 if is_String(value):
119 value = env.subst(value)
120 else:
121 value = str(value)
122 subs.append((k, value))
123
124 # write the file
125 try:
126 target_file = open(target[0].get_path(), TEXTFILE_FILE_WRITE_MODE, newline='')
127 except (OSError, IOError):
128 raise SCons.Errors.UserError("Can't write target file %s" % target[0])
129
130 # separate lines by 'linesep' only if linesep is not empty
131 lsep = None
132 for line in source:
133 if lsep:
134 target_file.write(lsep)
135
136 target_file.write(_do_subst(line, subs))
137 lsep = linesep
138 target_file.close()
139
140
141 def _strfunc(target, source, env):
142 return "Creating '%s'" % target[0]
143
144
145 def _convert_list_R(newlist, sources):
146 for elem in sources:
147 if is_Sequence(elem):
148 _convert_list_R(newlist, elem)
149 elif isinstance(elem, Node):
150 newlist.append(elem)
151 else:
152 newlist.append(Value(elem))
153
154
155 def _convert_list(target, source, env):
156 if len(target) != 1:
157 raise SCons.Errors.UserError("Only one target file allowed")
158 newlist = []
159 _convert_list_R(newlist, source)
160 return target, newlist
161
162
163 _common_varlist = ['SUBST_DICT', 'LINESEPARATOR']
164
165 _text_varlist = _common_varlist + ['TEXTFILEPREFIX', 'TEXTFILESUFFIX']
166 _text_builder = SCons.Builder.Builder(
167 action=SCons.Action.Action(_action, _strfunc, varlist=_text_varlist),
168 source_factory=Value,
169 emitter=_convert_list,
170 prefix='$TEXTFILEPREFIX',
171 suffix='$TEXTFILESUFFIX',
172 )
173
174 _subst_varlist = _common_varlist + ['SUBSTFILEPREFIX', 'TEXTFILESUFFIX']
175 _subst_builder = SCons.Builder.Builder(
176 action=SCons.Action.Action(_action, _strfunc, varlist=_subst_varlist),
177 source_factory=SCons.Node.FS.File,
178 emitter=_convert_list,
179 prefix='$SUBSTFILEPREFIX',
180 suffix='$SUBSTFILESUFFIX',
181 src_suffix=['.in'],
182 )
183
184
185 def generate(env):
186 env['LINESEPARATOR'] = LINESEP # os.linesep
187 env['BUILDERS']['Textfile'] = _text_builder
188 env['TEXTFILEPREFIX'] = ''
189 env['TEXTFILESUFFIX'] = '.txt'
190 env['BUILDERS']['Substfile'] = _subst_builder
191 env['SUBSTFILEPREFIX'] = ''
192 env['SUBSTFILESUFFIX'] = ''
193
194
195 def exists(env):
196 return 1
197
198 # Local Variables:
199 # tab-width:4
200 # indent-tabs-mode:nil
201 # End:
202 # vim: set expandtab tabstop=4 shiftwidth=4:
203
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/engine/SCons/Tool/textfile.py b/src/engine/SCons/Tool/textfile.py
--- a/src/engine/SCons/Tool/textfile.py
+++ b/src/engine/SCons/Tool/textfile.py
@@ -171,7 +171,7 @@
suffix='$TEXTFILESUFFIX',
)
-_subst_varlist = _common_varlist + ['SUBSTFILEPREFIX', 'TEXTFILESUFFIX']
+_subst_varlist = _common_varlist + ['SUBSTFILEPREFIX', 'SUBSTFILESUFFIX']
_subst_builder = SCons.Builder.Builder(
action=SCons.Action.Action(_action, _strfunc, varlist=_subst_varlist),
source_factory=SCons.Node.FS.File,
| {"golden_diff": "diff --git a/src/engine/SCons/Tool/textfile.py b/src/engine/SCons/Tool/textfile.py\n--- a/src/engine/SCons/Tool/textfile.py\n+++ b/src/engine/SCons/Tool/textfile.py\n@@ -171,7 +171,7 @@\n suffix='$TEXTFILESUFFIX',\n )\n \n-_subst_varlist = _common_varlist + ['SUBSTFILEPREFIX', 'TEXTFILESUFFIX']\n+_subst_varlist = _common_varlist + ['SUBSTFILEPREFIX', 'SUBSTFILESUFFIX']\n _subst_builder = SCons.Builder.Builder(\n action=SCons.Action.Action(_action, _strfunc, varlist=_subst_varlist),\n source_factory=SCons.Node.FS.File,\n", "issue": "['SUBSTFILEPREFIX', 'TEXTFILESUFFIX']\n\r\n**Describe the bug**\r\nFrom git head, just now. \r\nFile: src/engine/SCons/Tool/textfile.py\r\nLine 165:\r\n_text_varlist = _common_varlist + ['TEXTFILEPREFIX', 'TEXTFILESUFFIX']\r\n\r\nLine 174:\r\n_subst_varlist = _common_varlist + ['SUBSTFILEPREFIX', 'TEXTFILESUFFIX']\r\n\r\nLooks like a cur/paste/edit error. My guess, from the rest of the code, is that\r\n 'TEXTFILESUFFIX' should be 'SUBSTFILESUFFIX' on line 174\r\n\n", "before_files": [{"content": "# -*- python -*-\n#\n# __COPYRIGHT__\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be included\n# in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE\n# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION\n# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n#\n\n__doc__ = \"\"\"\nTextfile/Substfile builder for SCons.\n\n Create file 'target' which typically is a textfile. The 'source'\n may be any combination of strings, Nodes, or lists of same. A\n 'linesep' will be put between any part written and defaults to\n os.linesep.\n\n The only difference between the Textfile builder and the Substfile\n builder is that strings are converted to Value() nodes for the\n former and File() nodes for the latter. To insert files in the\n former or strings in the latter, wrap them in a File() or Value(),\n respectively.\n\n The values of SUBST_DICT first have any construction variables\n expanded (its keys are not expanded). If a value of SUBST_DICT is\n a python callable function, it is called and the result is expanded\n as the value. Values are substituted in a \"random\" order; if any\n substitution could be further expanded by another substitution, it\n is unpredictable whether the expansion will occur.\n\"\"\"\n\n__revision__ = \"__FILE__ __REVISION__ __DATE__ __DEVELOPER__\"\n\nimport SCons\n\nimport os\nimport re\n\nfrom SCons.Node import Node\nfrom SCons.Node.Python import Value\nfrom SCons.Util import is_String, is_Sequence, is_Dict, to_bytes\n\n\nTEXTFILE_FILE_WRITE_MODE = 'w'\n\nLINESEP = '\\n'\n\ndef _do_subst(node, subs):\n \"\"\"\n Fetch the node contents and replace all instances of the keys with\n their values. For example, if subs is\n {'%VERSION%': '1.2345', '%BASE%': 'MyProg', '%prefix%': '/bin'},\n then all instances of %VERSION% in the file will be replaced with\n 1.2345 and so forth.\n \"\"\"\n contents = node.get_text_contents()\n if subs:\n for (k, val) in subs:\n contents = contents.replace(k, val)\n\n if 'b' in TEXTFILE_FILE_WRITE_MODE:\n try:\n contents = bytearray(contents, 'utf-8')\n except UnicodeDecodeError:\n # contents is already utf-8 encoded python 2 str i.e. a byte array\n contents = bytearray(contents)\n\n return contents\n\n\ndef _action(target, source, env):\n\n # prepare the line separator\n linesep = env['LINESEPARATOR']\n if linesep is None:\n linesep = LINESEP # os.linesep\n elif is_String(linesep):\n pass\n elif isinstance(linesep, Value):\n linesep = linesep.get_text_contents()\n else:\n raise SCons.Errors.UserError('unexpected type/class for LINESEPARATOR: %s'\n % repr(linesep), None)\n\n if 'b' in TEXTFILE_FILE_WRITE_MODE:\n linesep = to_bytes(linesep)\n\n # create a dictionary to use for the substitutions\n if 'SUBST_DICT' not in env:\n subs = None # no substitutions\n else:\n subst_dict = env['SUBST_DICT']\n if is_Dict(subst_dict):\n subst_dict = list(subst_dict.items())\n elif is_Sequence(subst_dict):\n pass\n else:\n raise SCons.Errors.UserError('SUBST_DICT must be dict or sequence')\n subs = []\n for (k, value) in subst_dict:\n if callable(value):\n value = value()\n if is_String(value):\n value = env.subst(value)\n else:\n value = str(value)\n subs.append((k, value))\n\n # write the file\n try:\n target_file = open(target[0].get_path(), TEXTFILE_FILE_WRITE_MODE, newline='')\n except (OSError, IOError):\n raise SCons.Errors.UserError(\"Can't write target file %s\" % target[0])\n\n # separate lines by 'linesep' only if linesep is not empty\n lsep = None\n for line in source:\n if lsep:\n target_file.write(lsep)\n\n target_file.write(_do_subst(line, subs))\n lsep = linesep\n target_file.close()\n\n\ndef _strfunc(target, source, env):\n return \"Creating '%s'\" % target[0]\n\n\ndef _convert_list_R(newlist, sources):\n for elem in sources:\n if is_Sequence(elem):\n _convert_list_R(newlist, elem)\n elif isinstance(elem, Node):\n newlist.append(elem)\n else:\n newlist.append(Value(elem))\n\n\ndef _convert_list(target, source, env):\n if len(target) != 1:\n raise SCons.Errors.UserError(\"Only one target file allowed\")\n newlist = []\n _convert_list_R(newlist, source)\n return target, newlist\n\n\n_common_varlist = ['SUBST_DICT', 'LINESEPARATOR']\n\n_text_varlist = _common_varlist + ['TEXTFILEPREFIX', 'TEXTFILESUFFIX']\n_text_builder = SCons.Builder.Builder(\n action=SCons.Action.Action(_action, _strfunc, varlist=_text_varlist),\n source_factory=Value,\n emitter=_convert_list,\n prefix='$TEXTFILEPREFIX',\n suffix='$TEXTFILESUFFIX',\n)\n\n_subst_varlist = _common_varlist + ['SUBSTFILEPREFIX', 'TEXTFILESUFFIX']\n_subst_builder = SCons.Builder.Builder(\n action=SCons.Action.Action(_action, _strfunc, varlist=_subst_varlist),\n source_factory=SCons.Node.FS.File,\n emitter=_convert_list,\n prefix='$SUBSTFILEPREFIX',\n suffix='$SUBSTFILESUFFIX',\n src_suffix=['.in'],\n)\n\n\ndef generate(env):\n env['LINESEPARATOR'] = LINESEP # os.linesep\n env['BUILDERS']['Textfile'] = _text_builder\n env['TEXTFILEPREFIX'] = ''\n env['TEXTFILESUFFIX'] = '.txt'\n env['BUILDERS']['Substfile'] = _subst_builder\n env['SUBSTFILEPREFIX'] = ''\n env['SUBSTFILESUFFIX'] = ''\n\n\ndef exists(env):\n return 1\n\n# Local Variables:\n# tab-width:4\n# indent-tabs-mode:nil\n# End:\n# vim: set expandtab tabstop=4 shiftwidth=4:\n", "path": "src/engine/SCons/Tool/textfile.py"}], "after_files": [{"content": "# -*- python -*-\n#\n# __COPYRIGHT__\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be included\n# in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE\n# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION\n# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n#\n\n__doc__ = \"\"\"\nTextfile/Substfile builder for SCons.\n\n Create file 'target' which typically is a textfile. The 'source'\n may be any combination of strings, Nodes, or lists of same. A\n 'linesep' will be put between any part written and defaults to\n os.linesep.\n\n The only difference between the Textfile builder and the Substfile\n builder is that strings are converted to Value() nodes for the\n former and File() nodes for the latter. To insert files in the\n former or strings in the latter, wrap them in a File() or Value(),\n respectively.\n\n The values of SUBST_DICT first have any construction variables\n expanded (its keys are not expanded). If a value of SUBST_DICT is\n a python callable function, it is called and the result is expanded\n as the value. Values are substituted in a \"random\" order; if any\n substitution could be further expanded by another substitution, it\n is unpredictable whether the expansion will occur.\n\"\"\"\n\n__revision__ = \"__FILE__ __REVISION__ __DATE__ __DEVELOPER__\"\n\nimport SCons\n\nimport os\nimport re\n\nfrom SCons.Node import Node\nfrom SCons.Node.Python import Value\nfrom SCons.Util import is_String, is_Sequence, is_Dict, to_bytes\n\n\nTEXTFILE_FILE_WRITE_MODE = 'w'\n\nLINESEP = '\\n'\n\ndef _do_subst(node, subs):\n \"\"\"\n Fetch the node contents and replace all instances of the keys with\n their values. For example, if subs is\n {'%VERSION%': '1.2345', '%BASE%': 'MyProg', '%prefix%': '/bin'},\n then all instances of %VERSION% in the file will be replaced with\n 1.2345 and so forth.\n \"\"\"\n contents = node.get_text_contents()\n if subs:\n for (k, val) in subs:\n contents = contents.replace(k, val)\n\n if 'b' in TEXTFILE_FILE_WRITE_MODE:\n try:\n contents = bytearray(contents, 'utf-8')\n except UnicodeDecodeError:\n # contents is already utf-8 encoded python 2 str i.e. a byte array\n contents = bytearray(contents)\n\n return contents\n\n\ndef _action(target, source, env):\n\n # prepare the line separator\n linesep = env['LINESEPARATOR']\n if linesep is None:\n linesep = LINESEP # os.linesep\n elif is_String(linesep):\n pass\n elif isinstance(linesep, Value):\n linesep = linesep.get_text_contents()\n else:\n raise SCons.Errors.UserError('unexpected type/class for LINESEPARATOR: %s'\n % repr(linesep), None)\n\n if 'b' in TEXTFILE_FILE_WRITE_MODE:\n linesep = to_bytes(linesep)\n\n # create a dictionary to use for the substitutions\n if 'SUBST_DICT' not in env:\n subs = None # no substitutions\n else:\n subst_dict = env['SUBST_DICT']\n if is_Dict(subst_dict):\n subst_dict = list(subst_dict.items())\n elif is_Sequence(subst_dict):\n pass\n else:\n raise SCons.Errors.UserError('SUBST_DICT must be dict or sequence')\n subs = []\n for (k, value) in subst_dict:\n if callable(value):\n value = value()\n if is_String(value):\n value = env.subst(value)\n else:\n value = str(value)\n subs.append((k, value))\n\n # write the file\n try:\n target_file = open(target[0].get_path(), TEXTFILE_FILE_WRITE_MODE, newline='')\n except (OSError, IOError):\n raise SCons.Errors.UserError(\"Can't write target file %s\" % target[0])\n\n # separate lines by 'linesep' only if linesep is not empty\n lsep = None\n for line in source:\n if lsep:\n target_file.write(lsep)\n\n target_file.write(_do_subst(line, subs))\n lsep = linesep\n target_file.close()\n\n\ndef _strfunc(target, source, env):\n return \"Creating '%s'\" % target[0]\n\n\ndef _convert_list_R(newlist, sources):\n for elem in sources:\n if is_Sequence(elem):\n _convert_list_R(newlist, elem)\n elif isinstance(elem, Node):\n newlist.append(elem)\n else:\n newlist.append(Value(elem))\n\n\ndef _convert_list(target, source, env):\n if len(target) != 1:\n raise SCons.Errors.UserError(\"Only one target file allowed\")\n newlist = []\n _convert_list_R(newlist, source)\n return target, newlist\n\n\n_common_varlist = ['SUBST_DICT', 'LINESEPARATOR']\n\n_text_varlist = _common_varlist + ['TEXTFILEPREFIX', 'TEXTFILESUFFIX']\n_text_builder = SCons.Builder.Builder(\n action=SCons.Action.Action(_action, _strfunc, varlist=_text_varlist),\n source_factory=Value,\n emitter=_convert_list,\n prefix='$TEXTFILEPREFIX',\n suffix='$TEXTFILESUFFIX',\n)\n\n_subst_varlist = _common_varlist + ['SUBSTFILEPREFIX', 'SUBSTFILESUFFIX']\n_subst_builder = SCons.Builder.Builder(\n action=SCons.Action.Action(_action, _strfunc, varlist=_subst_varlist),\n source_factory=SCons.Node.FS.File,\n emitter=_convert_list,\n prefix='$SUBSTFILEPREFIX',\n suffix='$SUBSTFILESUFFIX',\n src_suffix=['.in'],\n)\n\n\ndef generate(env):\n env['LINESEPARATOR'] = LINESEP # os.linesep\n env['BUILDERS']['Textfile'] = _text_builder\n env['TEXTFILEPREFIX'] = ''\n env['TEXTFILESUFFIX'] = '.txt'\n env['BUILDERS']['Substfile'] = _subst_builder\n env['SUBSTFILEPREFIX'] = ''\n env['SUBSTFILESUFFIX'] = ''\n\n\ndef exists(env):\n return 1\n\n# Local Variables:\n# tab-width:4\n# indent-tabs-mode:nil\n# End:\n# vim: set expandtab tabstop=4 shiftwidth=4:\n", "path": "src/engine/SCons/Tool/textfile.py"}]} | 2,505 | 153 |
gh_patches_debug_16005 | rasdani/github-patches | git_diff | paperless-ngx__paperless-ngx-2566 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Added Last x Days not including documents added today
### Description
The "Added" filter is not including documents added today when using any of the "Last x" filter options. I have a "Recently Added" view which does not include the most recent documents that were just processed. If I change the filter to a specific date, for example, `After: 1/1/2023`, then the documents are included in the view. If I change to "Last 7 days", "Last Month", "Last 3 Months" or "Last Year", then the most recent document in the view is one I scanned 3 days ago.
### Steps to reproduce
1) Add a new document to Paperless and let it complete processing
2) Open a document view
3) Set the sort to "Added" and descending (z-a)
4) Set the Added filter to "Last 7 days"
- The document added does not show in the view
5) Set the Added filter to "After: 1/1/2023"
- The document added shows in the view
### Webserver logs
```bash
No server messages logged pertaining to the issue
```
### Browser logs
```bash
No browser messages logged pertaining to the issue
```
### Paperless-ngx version
1.12.2
### Host OS
Debian 11 x86_64
### Installation method
Docker - official image
### Browser
Chrome
### Configuration changes
PAPERLESS_TIME_ZONE set to "America/Denver", Date order set to "MDY"
### Other
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/documents/index.py`
Content:
```
1 import logging
2 import math
3 import os
4 from contextlib import contextmanager
5
6 from dateutil.parser import isoparse
7 from django.conf import settings
8 from documents.models import Comment
9 from documents.models import Document
10 from whoosh import classify
11 from whoosh import highlight
12 from whoosh import query
13 from whoosh.fields import BOOLEAN
14 from whoosh.fields import DATETIME
15 from whoosh.fields import KEYWORD
16 from whoosh.fields import NUMERIC
17 from whoosh.fields import Schema
18 from whoosh.fields import TEXT
19 from whoosh.highlight import HtmlFormatter
20 from whoosh.index import create_in
21 from whoosh.index import exists_in
22 from whoosh.index import open_dir
23 from whoosh.qparser import MultifieldParser
24 from whoosh.qparser.dateparse import DateParserPlugin
25 from whoosh.searching import ResultsPage
26 from whoosh.searching import Searcher
27 from whoosh.writing import AsyncWriter
28
29 logger = logging.getLogger("paperless.index")
30
31
32 def get_schema():
33 return Schema(
34 id=NUMERIC(stored=True, unique=True),
35 title=TEXT(sortable=True),
36 content=TEXT(),
37 asn=NUMERIC(sortable=True, signed=False),
38 correspondent=TEXT(sortable=True),
39 correspondent_id=NUMERIC(),
40 has_correspondent=BOOLEAN(),
41 tag=KEYWORD(commas=True, scorable=True, lowercase=True),
42 tag_id=KEYWORD(commas=True, scorable=True),
43 has_tag=BOOLEAN(),
44 type=TEXT(sortable=True),
45 type_id=NUMERIC(),
46 has_type=BOOLEAN(),
47 created=DATETIME(sortable=True),
48 modified=DATETIME(sortable=True),
49 added=DATETIME(sortable=True),
50 path=TEXT(sortable=True),
51 path_id=NUMERIC(),
52 has_path=BOOLEAN(),
53 comments=TEXT(),
54 )
55
56
57 def open_index(recreate=False):
58 try:
59 if exists_in(settings.INDEX_DIR) and not recreate:
60 return open_dir(settings.INDEX_DIR, schema=get_schema())
61 except Exception:
62 logger.exception("Error while opening the index, recreating.")
63
64 if not os.path.isdir(settings.INDEX_DIR):
65 os.makedirs(settings.INDEX_DIR, exist_ok=True)
66 return create_in(settings.INDEX_DIR, get_schema())
67
68
69 @contextmanager
70 def open_index_writer(optimize=False):
71 writer = AsyncWriter(open_index())
72
73 try:
74 yield writer
75 except Exception as e:
76 logger.exception(str(e))
77 writer.cancel()
78 finally:
79 writer.commit(optimize=optimize)
80
81
82 @contextmanager
83 def open_index_searcher():
84 searcher = open_index().searcher()
85
86 try:
87 yield searcher
88 finally:
89 searcher.close()
90
91
92 def update_document(writer, doc):
93 tags = ",".join([t.name for t in doc.tags.all()])
94 tags_ids = ",".join([str(t.id) for t in doc.tags.all()])
95 comments = ",".join([str(c.comment) for c in Comment.objects.filter(document=doc)])
96 writer.update_document(
97 id=doc.pk,
98 title=doc.title,
99 content=doc.content,
100 correspondent=doc.correspondent.name if doc.correspondent else None,
101 correspondent_id=doc.correspondent.id if doc.correspondent else None,
102 has_correspondent=doc.correspondent is not None,
103 tag=tags if tags else None,
104 tag_id=tags_ids if tags_ids else None,
105 has_tag=len(tags) > 0,
106 type=doc.document_type.name if doc.document_type else None,
107 type_id=doc.document_type.id if doc.document_type else None,
108 has_type=doc.document_type is not None,
109 created=doc.created,
110 added=doc.added,
111 asn=doc.archive_serial_number,
112 modified=doc.modified,
113 path=doc.storage_path.name if doc.storage_path else None,
114 path_id=doc.storage_path.id if doc.storage_path else None,
115 has_path=doc.storage_path is not None,
116 comments=comments,
117 )
118
119
120 def remove_document(writer, doc):
121 remove_document_by_id(writer, doc.pk)
122
123
124 def remove_document_by_id(writer, doc_id):
125 writer.delete_by_term("id", doc_id)
126
127
128 def add_or_update_document(document):
129 with open_index_writer() as writer:
130 update_document(writer, document)
131
132
133 def remove_document_from_index(document):
134 with open_index_writer() as writer:
135 remove_document(writer, document)
136
137
138 class DelayedQuery:
139 def _get_query(self):
140 raise NotImplementedError()
141
142 def _get_query_filter(self):
143 criterias = []
144 for k, v in self.query_params.items():
145 if k == "correspondent__id":
146 criterias.append(query.Term("correspondent_id", v))
147 elif k == "tags__id__all":
148 for tag_id in v.split(","):
149 criterias.append(query.Term("tag_id", tag_id))
150 elif k == "tags__id__none":
151 for tag_id in v.split(","):
152 criterias.append(query.Not(query.Term("tag_id", tag_id)))
153 elif k == "document_type__id":
154 criterias.append(query.Term("type_id", v))
155 elif k == "correspondent__isnull":
156 criterias.append(query.Term("has_correspondent", v == "false"))
157 elif k == "is_tagged":
158 criterias.append(query.Term("has_tag", v == "true"))
159 elif k == "document_type__isnull":
160 criterias.append(query.Term("has_type", v == "false"))
161 elif k == "created__date__lt":
162 criterias.append(
163 query.DateRange("created", start=None, end=isoparse(v)),
164 )
165 elif k == "created__date__gt":
166 criterias.append(
167 query.DateRange("created", start=isoparse(v), end=None),
168 )
169 elif k == "added__date__gt":
170 criterias.append(query.DateRange("added", start=isoparse(v), end=None))
171 elif k == "added__date__lt":
172 criterias.append(query.DateRange("added", start=None, end=isoparse(v)))
173 elif k == "storage_path__id":
174 criterias.append(query.Term("path_id", v))
175 elif k == "storage_path__isnull":
176 criterias.append(query.Term("has_path", v == "false"))
177
178 if len(criterias) > 0:
179 return query.And(criterias)
180 else:
181 return None
182
183 def _get_query_sortedby(self):
184 if "ordering" not in self.query_params:
185 return None, False
186
187 field: str = self.query_params["ordering"]
188
189 sort_fields_map = {
190 "created": "created",
191 "modified": "modified",
192 "added": "added",
193 "title": "title",
194 "correspondent__name": "correspondent",
195 "document_type__name": "type",
196 "archive_serial_number": "asn",
197 }
198
199 if field.startswith("-"):
200 field = field[1:]
201 reverse = True
202 else:
203 reverse = False
204
205 if field not in sort_fields_map:
206 return None, False
207 else:
208 return sort_fields_map[field], reverse
209
210 def __init__(self, searcher: Searcher, query_params, page_size):
211 self.searcher = searcher
212 self.query_params = query_params
213 self.page_size = page_size
214 self.saved_results = dict()
215 self.first_score = None
216
217 def __len__(self):
218 page = self[0:1]
219 return len(page)
220
221 def __getitem__(self, item):
222 if item.start in self.saved_results:
223 return self.saved_results[item.start]
224
225 q, mask = self._get_query()
226 sortedby, reverse = self._get_query_sortedby()
227
228 page: ResultsPage = self.searcher.search_page(
229 q,
230 mask=mask,
231 filter=self._get_query_filter(),
232 pagenum=math.floor(item.start / self.page_size) + 1,
233 pagelen=self.page_size,
234 sortedby=sortedby,
235 reverse=reverse,
236 )
237 page.results.fragmenter = highlight.ContextFragmenter(surround=50)
238 page.results.formatter = HtmlFormatter(tagname="span", between=" ... ")
239
240 if not self.first_score and len(page.results) > 0 and sortedby is None:
241 self.first_score = page.results[0].score
242
243 page.results.top_n = list(
244 map(
245 lambda hit: (
246 (hit[0] / self.first_score) if self.first_score else None,
247 hit[1],
248 ),
249 page.results.top_n,
250 ),
251 )
252
253 self.saved_results[item.start] = page
254
255 return page
256
257
258 class DelayedFullTextQuery(DelayedQuery):
259 def _get_query(self):
260 q_str = self.query_params["query"]
261 qp = MultifieldParser(
262 ["content", "title", "correspondent", "tag", "type", "comments"],
263 self.searcher.ixreader.schema,
264 )
265 qp.add_plugin(DateParserPlugin())
266 q = qp.parse(q_str)
267
268 corrected = self.searcher.correct_query(q, q_str)
269 if corrected.query != q:
270 corrected.query = corrected.string
271
272 return q, None
273
274
275 class DelayedMoreLikeThisQuery(DelayedQuery):
276 def _get_query(self):
277 more_like_doc_id = int(self.query_params["more_like_id"])
278 content = Document.objects.get(id=more_like_doc_id).content
279
280 docnum = self.searcher.document_number(id=more_like_doc_id)
281 kts = self.searcher.key_terms_from_text(
282 "content",
283 content,
284 numterms=20,
285 model=classify.Bo1Model,
286 normalize=False,
287 )
288 q = query.Or(
289 [query.Term("content", word, boost=weight) for word, weight in kts],
290 )
291 mask = {docnum}
292
293 return q, mask
294
295
296 def autocomplete(ix, term, limit=10):
297 with ix.reader() as reader:
298 terms = []
299 for (score, t) in reader.most_distinctive_terms(
300 "content",
301 number=limit,
302 prefix=term.lower(),
303 ):
304 terms.append(t)
305 return terms
306
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/documents/index.py b/src/documents/index.py
--- a/src/documents/index.py
+++ b/src/documents/index.py
@@ -5,6 +5,7 @@
from dateutil.parser import isoparse
from django.conf import settings
+from django.utils import timezone
from documents.models import Comment
from documents.models import Document
from whoosh import classify
@@ -262,7 +263,7 @@
["content", "title", "correspondent", "tag", "type", "comments"],
self.searcher.ixreader.schema,
)
- qp.add_plugin(DateParserPlugin())
+ qp.add_plugin(DateParserPlugin(basedate=timezone.now()))
q = qp.parse(q_str)
corrected = self.searcher.correct_query(q, q_str)
| {"golden_diff": "diff --git a/src/documents/index.py b/src/documents/index.py\n--- a/src/documents/index.py\n+++ b/src/documents/index.py\n@@ -5,6 +5,7 @@\n \n from dateutil.parser import isoparse\n from django.conf import settings\n+from django.utils import timezone\n from documents.models import Comment\n from documents.models import Document\n from whoosh import classify\n@@ -262,7 +263,7 @@\n [\"content\", \"title\", \"correspondent\", \"tag\", \"type\", \"comments\"],\n self.searcher.ixreader.schema,\n )\n- qp.add_plugin(DateParserPlugin())\n+ qp.add_plugin(DateParserPlugin(basedate=timezone.now()))\n q = qp.parse(q_str)\n \n corrected = self.searcher.correct_query(q, q_str)\n", "issue": "Added Last x Days not including documents added today\n### Description\n\nThe \"Added\" filter is not including documents added today when using any of the \"Last x\" filter options. I have a \"Recently Added\" view which does not include the most recent documents that were just processed. If I change the filter to a specific date, for example, `After: 1/1/2023`, then the documents are included in the view. If I change to \"Last 7 days\", \"Last Month\", \"Last 3 Months\" or \"Last Year\", then the most recent document in the view is one I scanned 3 days ago. \n\n### Steps to reproduce\n\n1) Add a new document to Paperless and let it complete processing\r\n2) Open a document view\r\n3) Set the sort to \"Added\" and descending (z-a)\r\n4) Set the Added filter to \"Last 7 days\"\r\n - The document added does not show in the view\r\n 5) Set the Added filter to \"After: 1/1/2023\"\r\n - The document added shows in the view\n\n### Webserver logs\n\n```bash\nNo server messages logged pertaining to the issue\n```\n\n\n### Browser logs\n\n```bash\nNo browser messages logged pertaining to the issue\n```\n\n\n### Paperless-ngx version\n\n1.12.2\n\n### Host OS\n\nDebian 11 x86_64\n\n### Installation method\n\nDocker - official image\n\n### Browser\n\nChrome\n\n### Configuration changes\n\nPAPERLESS_TIME_ZONE set to \"America/Denver\", Date order set to \"MDY\"\n\n### Other\n\n_No response_\n", "before_files": [{"content": "import logging\nimport math\nimport os\nfrom contextlib import contextmanager\n\nfrom dateutil.parser import isoparse\nfrom django.conf import settings\nfrom documents.models import Comment\nfrom documents.models import Document\nfrom whoosh import classify\nfrom whoosh import highlight\nfrom whoosh import query\nfrom whoosh.fields import BOOLEAN\nfrom whoosh.fields import DATETIME\nfrom whoosh.fields import KEYWORD\nfrom whoosh.fields import NUMERIC\nfrom whoosh.fields import Schema\nfrom whoosh.fields import TEXT\nfrom whoosh.highlight import HtmlFormatter\nfrom whoosh.index import create_in\nfrom whoosh.index import exists_in\nfrom whoosh.index import open_dir\nfrom whoosh.qparser import MultifieldParser\nfrom whoosh.qparser.dateparse import DateParserPlugin\nfrom whoosh.searching import ResultsPage\nfrom whoosh.searching import Searcher\nfrom whoosh.writing import AsyncWriter\n\nlogger = logging.getLogger(\"paperless.index\")\n\n\ndef get_schema():\n return Schema(\n id=NUMERIC(stored=True, unique=True),\n title=TEXT(sortable=True),\n content=TEXT(),\n asn=NUMERIC(sortable=True, signed=False),\n correspondent=TEXT(sortable=True),\n correspondent_id=NUMERIC(),\n has_correspondent=BOOLEAN(),\n tag=KEYWORD(commas=True, scorable=True, lowercase=True),\n tag_id=KEYWORD(commas=True, scorable=True),\n has_tag=BOOLEAN(),\n type=TEXT(sortable=True),\n type_id=NUMERIC(),\n has_type=BOOLEAN(),\n created=DATETIME(sortable=True),\n modified=DATETIME(sortable=True),\n added=DATETIME(sortable=True),\n path=TEXT(sortable=True),\n path_id=NUMERIC(),\n has_path=BOOLEAN(),\n comments=TEXT(),\n )\n\n\ndef open_index(recreate=False):\n try:\n if exists_in(settings.INDEX_DIR) and not recreate:\n return open_dir(settings.INDEX_DIR, schema=get_schema())\n except Exception:\n logger.exception(\"Error while opening the index, recreating.\")\n\n if not os.path.isdir(settings.INDEX_DIR):\n os.makedirs(settings.INDEX_DIR, exist_ok=True)\n return create_in(settings.INDEX_DIR, get_schema())\n\n\n@contextmanager\ndef open_index_writer(optimize=False):\n writer = AsyncWriter(open_index())\n\n try:\n yield writer\n except Exception as e:\n logger.exception(str(e))\n writer.cancel()\n finally:\n writer.commit(optimize=optimize)\n\n\n@contextmanager\ndef open_index_searcher():\n searcher = open_index().searcher()\n\n try:\n yield searcher\n finally:\n searcher.close()\n\n\ndef update_document(writer, doc):\n tags = \",\".join([t.name for t in doc.tags.all()])\n tags_ids = \",\".join([str(t.id) for t in doc.tags.all()])\n comments = \",\".join([str(c.comment) for c in Comment.objects.filter(document=doc)])\n writer.update_document(\n id=doc.pk,\n title=doc.title,\n content=doc.content,\n correspondent=doc.correspondent.name if doc.correspondent else None,\n correspondent_id=doc.correspondent.id if doc.correspondent else None,\n has_correspondent=doc.correspondent is not None,\n tag=tags if tags else None,\n tag_id=tags_ids if tags_ids else None,\n has_tag=len(tags) > 0,\n type=doc.document_type.name if doc.document_type else None,\n type_id=doc.document_type.id if doc.document_type else None,\n has_type=doc.document_type is not None,\n created=doc.created,\n added=doc.added,\n asn=doc.archive_serial_number,\n modified=doc.modified,\n path=doc.storage_path.name if doc.storage_path else None,\n path_id=doc.storage_path.id if doc.storage_path else None,\n has_path=doc.storage_path is not None,\n comments=comments,\n )\n\n\ndef remove_document(writer, doc):\n remove_document_by_id(writer, doc.pk)\n\n\ndef remove_document_by_id(writer, doc_id):\n writer.delete_by_term(\"id\", doc_id)\n\n\ndef add_or_update_document(document):\n with open_index_writer() as writer:\n update_document(writer, document)\n\n\ndef remove_document_from_index(document):\n with open_index_writer() as writer:\n remove_document(writer, document)\n\n\nclass DelayedQuery:\n def _get_query(self):\n raise NotImplementedError()\n\n def _get_query_filter(self):\n criterias = []\n for k, v in self.query_params.items():\n if k == \"correspondent__id\":\n criterias.append(query.Term(\"correspondent_id\", v))\n elif k == \"tags__id__all\":\n for tag_id in v.split(\",\"):\n criterias.append(query.Term(\"tag_id\", tag_id))\n elif k == \"tags__id__none\":\n for tag_id in v.split(\",\"):\n criterias.append(query.Not(query.Term(\"tag_id\", tag_id)))\n elif k == \"document_type__id\":\n criterias.append(query.Term(\"type_id\", v))\n elif k == \"correspondent__isnull\":\n criterias.append(query.Term(\"has_correspondent\", v == \"false\"))\n elif k == \"is_tagged\":\n criterias.append(query.Term(\"has_tag\", v == \"true\"))\n elif k == \"document_type__isnull\":\n criterias.append(query.Term(\"has_type\", v == \"false\"))\n elif k == \"created__date__lt\":\n criterias.append(\n query.DateRange(\"created\", start=None, end=isoparse(v)),\n )\n elif k == \"created__date__gt\":\n criterias.append(\n query.DateRange(\"created\", start=isoparse(v), end=None),\n )\n elif k == \"added__date__gt\":\n criterias.append(query.DateRange(\"added\", start=isoparse(v), end=None))\n elif k == \"added__date__lt\":\n criterias.append(query.DateRange(\"added\", start=None, end=isoparse(v)))\n elif k == \"storage_path__id\":\n criterias.append(query.Term(\"path_id\", v))\n elif k == \"storage_path__isnull\":\n criterias.append(query.Term(\"has_path\", v == \"false\"))\n\n if len(criterias) > 0:\n return query.And(criterias)\n else:\n return None\n\n def _get_query_sortedby(self):\n if \"ordering\" not in self.query_params:\n return None, False\n\n field: str = self.query_params[\"ordering\"]\n\n sort_fields_map = {\n \"created\": \"created\",\n \"modified\": \"modified\",\n \"added\": \"added\",\n \"title\": \"title\",\n \"correspondent__name\": \"correspondent\",\n \"document_type__name\": \"type\",\n \"archive_serial_number\": \"asn\",\n }\n\n if field.startswith(\"-\"):\n field = field[1:]\n reverse = True\n else:\n reverse = False\n\n if field not in sort_fields_map:\n return None, False\n else:\n return sort_fields_map[field], reverse\n\n def __init__(self, searcher: Searcher, query_params, page_size):\n self.searcher = searcher\n self.query_params = query_params\n self.page_size = page_size\n self.saved_results = dict()\n self.first_score = None\n\n def __len__(self):\n page = self[0:1]\n return len(page)\n\n def __getitem__(self, item):\n if item.start in self.saved_results:\n return self.saved_results[item.start]\n\n q, mask = self._get_query()\n sortedby, reverse = self._get_query_sortedby()\n\n page: ResultsPage = self.searcher.search_page(\n q,\n mask=mask,\n filter=self._get_query_filter(),\n pagenum=math.floor(item.start / self.page_size) + 1,\n pagelen=self.page_size,\n sortedby=sortedby,\n reverse=reverse,\n )\n page.results.fragmenter = highlight.ContextFragmenter(surround=50)\n page.results.formatter = HtmlFormatter(tagname=\"span\", between=\" ... \")\n\n if not self.first_score and len(page.results) > 0 and sortedby is None:\n self.first_score = page.results[0].score\n\n page.results.top_n = list(\n map(\n lambda hit: (\n (hit[0] / self.first_score) if self.first_score else None,\n hit[1],\n ),\n page.results.top_n,\n ),\n )\n\n self.saved_results[item.start] = page\n\n return page\n\n\nclass DelayedFullTextQuery(DelayedQuery):\n def _get_query(self):\n q_str = self.query_params[\"query\"]\n qp = MultifieldParser(\n [\"content\", \"title\", \"correspondent\", \"tag\", \"type\", \"comments\"],\n self.searcher.ixreader.schema,\n )\n qp.add_plugin(DateParserPlugin())\n q = qp.parse(q_str)\n\n corrected = self.searcher.correct_query(q, q_str)\n if corrected.query != q:\n corrected.query = corrected.string\n\n return q, None\n\n\nclass DelayedMoreLikeThisQuery(DelayedQuery):\n def _get_query(self):\n more_like_doc_id = int(self.query_params[\"more_like_id\"])\n content = Document.objects.get(id=more_like_doc_id).content\n\n docnum = self.searcher.document_number(id=more_like_doc_id)\n kts = self.searcher.key_terms_from_text(\n \"content\",\n content,\n numterms=20,\n model=classify.Bo1Model,\n normalize=False,\n )\n q = query.Or(\n [query.Term(\"content\", word, boost=weight) for word, weight in kts],\n )\n mask = {docnum}\n\n return q, mask\n\n\ndef autocomplete(ix, term, limit=10):\n with ix.reader() as reader:\n terms = []\n for (score, t) in reader.most_distinctive_terms(\n \"content\",\n number=limit,\n prefix=term.lower(),\n ):\n terms.append(t)\n return terms\n", "path": "src/documents/index.py"}], "after_files": [{"content": "import logging\nimport math\nimport os\nfrom contextlib import contextmanager\n\nfrom dateutil.parser import isoparse\nfrom django.conf import settings\nfrom django.utils import timezone\nfrom documents.models import Comment\nfrom documents.models import Document\nfrom whoosh import classify\nfrom whoosh import highlight\nfrom whoosh import query\nfrom whoosh.fields import BOOLEAN\nfrom whoosh.fields import DATETIME\nfrom whoosh.fields import KEYWORD\nfrom whoosh.fields import NUMERIC\nfrom whoosh.fields import Schema\nfrom whoosh.fields import TEXT\nfrom whoosh.highlight import HtmlFormatter\nfrom whoosh.index import create_in\nfrom whoosh.index import exists_in\nfrom whoosh.index import open_dir\nfrom whoosh.qparser import MultifieldParser\nfrom whoosh.qparser.dateparse import DateParserPlugin\nfrom whoosh.searching import ResultsPage\nfrom whoosh.searching import Searcher\nfrom whoosh.writing import AsyncWriter\n\nlogger = logging.getLogger(\"paperless.index\")\n\n\ndef get_schema():\n return Schema(\n id=NUMERIC(stored=True, unique=True),\n title=TEXT(sortable=True),\n content=TEXT(),\n asn=NUMERIC(sortable=True, signed=False),\n correspondent=TEXT(sortable=True),\n correspondent_id=NUMERIC(),\n has_correspondent=BOOLEAN(),\n tag=KEYWORD(commas=True, scorable=True, lowercase=True),\n tag_id=KEYWORD(commas=True, scorable=True),\n has_tag=BOOLEAN(),\n type=TEXT(sortable=True),\n type_id=NUMERIC(),\n has_type=BOOLEAN(),\n created=DATETIME(sortable=True),\n modified=DATETIME(sortable=True),\n added=DATETIME(sortable=True),\n path=TEXT(sortable=True),\n path_id=NUMERIC(),\n has_path=BOOLEAN(),\n comments=TEXT(),\n )\n\n\ndef open_index(recreate=False):\n try:\n if exists_in(settings.INDEX_DIR) and not recreate:\n return open_dir(settings.INDEX_DIR, schema=get_schema())\n except Exception:\n logger.exception(\"Error while opening the index, recreating.\")\n\n if not os.path.isdir(settings.INDEX_DIR):\n os.makedirs(settings.INDEX_DIR, exist_ok=True)\n return create_in(settings.INDEX_DIR, get_schema())\n\n\n@contextmanager\ndef open_index_writer(optimize=False):\n writer = AsyncWriter(open_index())\n\n try:\n yield writer\n except Exception as e:\n logger.exception(str(e))\n writer.cancel()\n finally:\n writer.commit(optimize=optimize)\n\n\n@contextmanager\ndef open_index_searcher():\n searcher = open_index().searcher()\n\n try:\n yield searcher\n finally:\n searcher.close()\n\n\ndef update_document(writer, doc):\n tags = \",\".join([t.name for t in doc.tags.all()])\n tags_ids = \",\".join([str(t.id) for t in doc.tags.all()])\n comments = \",\".join([str(c.comment) for c in Comment.objects.filter(document=doc)])\n writer.update_document(\n id=doc.pk,\n title=doc.title,\n content=doc.content,\n correspondent=doc.correspondent.name if doc.correspondent else None,\n correspondent_id=doc.correspondent.id if doc.correspondent else None,\n has_correspondent=doc.correspondent is not None,\n tag=tags if tags else None,\n tag_id=tags_ids if tags_ids else None,\n has_tag=len(tags) > 0,\n type=doc.document_type.name if doc.document_type else None,\n type_id=doc.document_type.id if doc.document_type else None,\n has_type=doc.document_type is not None,\n created=doc.created,\n added=doc.added,\n asn=doc.archive_serial_number,\n modified=doc.modified,\n path=doc.storage_path.name if doc.storage_path else None,\n path_id=doc.storage_path.id if doc.storage_path else None,\n has_path=doc.storage_path is not None,\n comments=comments,\n )\n\n\ndef remove_document(writer, doc):\n remove_document_by_id(writer, doc.pk)\n\n\ndef remove_document_by_id(writer, doc_id):\n writer.delete_by_term(\"id\", doc_id)\n\n\ndef add_or_update_document(document):\n with open_index_writer() as writer:\n update_document(writer, document)\n\n\ndef remove_document_from_index(document):\n with open_index_writer() as writer:\n remove_document(writer, document)\n\n\nclass DelayedQuery:\n def _get_query(self):\n raise NotImplementedError()\n\n def _get_query_filter(self):\n criterias = []\n for k, v in self.query_params.items():\n if k == \"correspondent__id\":\n criterias.append(query.Term(\"correspondent_id\", v))\n elif k == \"tags__id__all\":\n for tag_id in v.split(\",\"):\n criterias.append(query.Term(\"tag_id\", tag_id))\n elif k == \"tags__id__none\":\n for tag_id in v.split(\",\"):\n criterias.append(query.Not(query.Term(\"tag_id\", tag_id)))\n elif k == \"document_type__id\":\n criterias.append(query.Term(\"type_id\", v))\n elif k == \"correspondent__isnull\":\n criterias.append(query.Term(\"has_correspondent\", v == \"false\"))\n elif k == \"is_tagged\":\n criterias.append(query.Term(\"has_tag\", v == \"true\"))\n elif k == \"document_type__isnull\":\n criterias.append(query.Term(\"has_type\", v == \"false\"))\n elif k == \"created__date__lt\":\n criterias.append(\n query.DateRange(\"created\", start=None, end=isoparse(v)),\n )\n elif k == \"created__date__gt\":\n criterias.append(\n query.DateRange(\"created\", start=isoparse(v), end=None),\n )\n elif k == \"added__date__gt\":\n criterias.append(query.DateRange(\"added\", start=isoparse(v), end=None))\n elif k == \"added__date__lt\":\n criterias.append(query.DateRange(\"added\", start=None, end=isoparse(v)))\n elif k == \"storage_path__id\":\n criterias.append(query.Term(\"path_id\", v))\n elif k == \"storage_path__isnull\":\n criterias.append(query.Term(\"has_path\", v == \"false\"))\n\n if len(criterias) > 0:\n return query.And(criterias)\n else:\n return None\n\n def _get_query_sortedby(self):\n if \"ordering\" not in self.query_params:\n return None, False\n\n field: str = self.query_params[\"ordering\"]\n\n sort_fields_map = {\n \"created\": \"created\",\n \"modified\": \"modified\",\n \"added\": \"added\",\n \"title\": \"title\",\n \"correspondent__name\": \"correspondent\",\n \"document_type__name\": \"type\",\n \"archive_serial_number\": \"asn\",\n }\n\n if field.startswith(\"-\"):\n field = field[1:]\n reverse = True\n else:\n reverse = False\n\n if field not in sort_fields_map:\n return None, False\n else:\n return sort_fields_map[field], reverse\n\n def __init__(self, searcher: Searcher, query_params, page_size):\n self.searcher = searcher\n self.query_params = query_params\n self.page_size = page_size\n self.saved_results = dict()\n self.first_score = None\n\n def __len__(self):\n page = self[0:1]\n return len(page)\n\n def __getitem__(self, item):\n if item.start in self.saved_results:\n return self.saved_results[item.start]\n\n q, mask = self._get_query()\n sortedby, reverse = self._get_query_sortedby()\n\n page: ResultsPage = self.searcher.search_page(\n q,\n mask=mask,\n filter=self._get_query_filter(),\n pagenum=math.floor(item.start / self.page_size) + 1,\n pagelen=self.page_size,\n sortedby=sortedby,\n reverse=reverse,\n )\n page.results.fragmenter = highlight.ContextFragmenter(surround=50)\n page.results.formatter = HtmlFormatter(tagname=\"span\", between=\" ... \")\n\n if not self.first_score and len(page.results) > 0 and sortedby is None:\n self.first_score = page.results[0].score\n\n page.results.top_n = list(\n map(\n lambda hit: (\n (hit[0] / self.first_score) if self.first_score else None,\n hit[1],\n ),\n page.results.top_n,\n ),\n )\n\n self.saved_results[item.start] = page\n\n return page\n\n\nclass DelayedFullTextQuery(DelayedQuery):\n def _get_query(self):\n q_str = self.query_params[\"query\"]\n qp = MultifieldParser(\n [\"content\", \"title\", \"correspondent\", \"tag\", \"type\", \"comments\"],\n self.searcher.ixreader.schema,\n )\n qp.add_plugin(DateParserPlugin(basedate=timezone.now()))\n q = qp.parse(q_str)\n\n corrected = self.searcher.correct_query(q, q_str)\n if corrected.query != q:\n corrected.query = corrected.string\n\n return q, None\n\n\nclass DelayedMoreLikeThisQuery(DelayedQuery):\n def _get_query(self):\n more_like_doc_id = int(self.query_params[\"more_like_id\"])\n content = Document.objects.get(id=more_like_doc_id).content\n\n docnum = self.searcher.document_number(id=more_like_doc_id)\n kts = self.searcher.key_terms_from_text(\n \"content\",\n content,\n numterms=20,\n model=classify.Bo1Model,\n normalize=False,\n )\n q = query.Or(\n [query.Term(\"content\", word, boost=weight) for word, weight in kts],\n )\n mask = {docnum}\n\n return q, mask\n\n\ndef autocomplete(ix, term, limit=10):\n with ix.reader() as reader:\n terms = []\n for (score, t) in reader.most_distinctive_terms(\n \"content\",\n number=limit,\n prefix=term.lower(),\n ):\n terms.append(t)\n return terms\n", "path": "src/documents/index.py"}]} | 3,628 | 170 |
gh_patches_debug_27416 | rasdani/github-patches | git_diff | biolab__orange3-text-380 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Tweet Profiler source available?
<!--
This is an issue template. Please fill in the relevant details in the
sections below.
-->
##### Text version
0.3.0
<!-- From menu _Options→Add-ons→Orange3-Text_ or code `orangecontrib.text.version.full_version` -->
##### Orange version
<!-- From menu _Help→About→Version_ or code `Orange.version.full_version` -->
3.13
##### Additional info (worksheets, data, screenshots, ...)
Is the server code for the Tweet Profiler available somewhere? I just started working with the text addon recently and wanted to read how you implemented it and found out that it runs on a server.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `doc/conf.py`
Content:
```
1 #!/usr/bin/env python3
2 # -*- coding: utf-8 -*-
3 #
4 # Orange3 Text Mining documentation build configuration file, created by
5 # sphinx-quickstart on Fri May 8 15:18:26 2015.
6 #
7 # This file is execfile()d with the current directory set to its
8 # containing dir.
9 #
10 # Note that not all possible configuration values are present in this
11 # autogenerated file.
12 #
13 # All configuration values have a default; values that are commented out
14 # serve to show the default.
15
16 import sys
17 import os
18
19 # If extensions (or scripting to document with autodoc) are in another directory,
20 # add these directories to sys.path here. If the directory is relative to the
21 # documentation root, use os.path.abspath to make it absolute, like shown here.
22 sys.path.insert(0, os.path.abspath('..'))
23
24 # -- General configuration ------------------------------------------------
25
26 # If your documentation needs a minimal Sphinx version, state it here.
27 #needs_sphinx = '1.0'
28
29 # Add any Sphinx extension module names here, as strings. They can be
30 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
31 # ones.
32 extensions = [
33 'sphinx.ext.autodoc',
34 'sphinx.ext.doctest',
35 'sphinx.ext.todo',
36 'sphinx.ext.napoleon',
37 'sphinx.ext.viewcode',
38 'sphinx.ext.intersphinx',
39 ]
40
41 # Add any paths that contain templates here, relative to this directory.
42 # templates_path = ['_templates']
43
44 # The suffix of source filenames.
45 source_suffix = '.rst'
46
47 # The encoding of source files.
48 source_encoding = 'utf-8'
49
50 # The master toctree document.
51 master_doc = 'index'
52
53 # General information about the project.
54 project = 'Orange3 Text Mining'
55 copyright = '2015, Laboratory of Bioinformatics, Faculty of Computer Science, University of Ljubljana'
56
57 # The version info for the project you're documenting, acts as replacement for
58 # |version| and |release|, also used in various other places throughout the
59 # built documents.
60 #
61 # The short X.Y version.
62 # version = '0.1'
63 # The full version, including alpha/beta/rc tags.
64 # release = '0.1.1'
65
66 # The language for content autogenerated by Sphinx. Refer to documentation
67 # for a list of supported languages.
68 #language = None
69
70 # There are two options for replacing |today|: either, you set today to some
71 # non-false value, then it is used:
72 #today = ''
73 # Else, today_fmt is used as the format for a strftime call.
74 #today_fmt = '%B %d, %Y'
75
76 # List of patterns, relative to source directory, that match files and
77 # directories to ignore when looking for source files.
78 exclude_patterns = ['_build']
79
80 # The reST default role (used for this markup: `text`) to use for all
81 # documents.
82 #default_role = None
83
84 # If true, '()' will be appended to :func: etc. cross-reference text.
85 #add_function_parentheses = True
86
87 # If true, the current module name will be prepended to all description
88 # unit titles (such as .. function::).
89 #add_module_names = True
90
91 # If true, sectionauthor and moduleauthor directives will be shown in the
92 # output. They are ignored by default.
93 #show_authors = False
94
95 # The name of the Pygments (syntax highlighting) style to use.
96 pygments_style = 'sphinx'
97
98 # A list of ignored prefixes for module index sorting.
99 #modindex_common_prefix = []
100
101 # If true, keep warnings as "system message" paragraphs in the built documents.
102 #keep_warnings = False
103
104
105 # -- Options for HTML output ----------------------------------------------
106
107 # The theme to use for HTML and HTML Help pages. See the documentation for
108 # a list of builtin themes.
109 html_theme = 'nature'
110
111 # Theme options are theme-specific and customize the look and feel of a theme
112 # further. For a list of options available for each theme, see the
113 # documentation.
114 #html_theme_options = {}
115
116 # Add any paths that contain custom themes here, relative to this directory.
117 #html_theme_path = []
118
119 # The name for this set of Sphinx documents. If None, it defaults to
120 # "<project> v<release> documentation".
121 #html_title = None
122
123 # A shorter title for the navigation bar. Default is the same as html_title.
124 #html_short_title = None
125
126 # The name of an image file (relative to this directory) to place at the top
127 # of the sidebar.
128 #html_logo = None
129
130 # The name of an image file (within the static path) to use as favicon of the
131 # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
132 # pixels large.
133 #html_favicon = None
134
135 # Add any paths that contain custom static files (such as style sheets) here,
136 # relative to this directory. They are copied after the builtin static files,
137 # so a file named "default.css" will overwrite the builtin "default.css".
138 # html_static_path = ['_static']
139
140 # Add any extra paths that contain custom files (such as robots.txt or
141 # .htaccess) here, relative to this directory. These files are copied
142 # directly to the root of the documentation.
143 #html_extra_path = []
144
145 # If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
146 # using the given strftime format.
147 #html_last_updated_fmt = '%b %d, %Y'
148
149 # If true, SmartyPants will be used to convert quotes and dashes to
150 # typographically correct entities.
151 #html_use_smartypants = True
152
153 # Custom sidebar templates, maps document names to template names.
154 #html_sidebars = {}
155
156 # Additional templates that should be rendered to pages, maps page names to
157 # template names.
158 #html_additional_pages = {}
159
160 # If false, no module index is generated.
161 #html_domain_indices = True
162
163 # If false, no index is generated.
164 #html_use_index = True
165
166 # If true, the index is split into individual pages for each letter.
167 #html_split_index = False
168
169 # If true, links to the reST sources are added to the pages.
170 #html_show_sourcelink = True
171
172 # If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
173 #html_show_sphinx = True
174
175 # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
176 #html_show_copyright = True
177
178 # If true, an OpenSearch description file will be output, and all pages will
179 # contain a <link> tag referring to it. The value of this option must be the
180 # base URL from which the finished HTML is served.
181 #html_use_opensearch = ''
182
183 # This is the file name suffix for HTML files (e.g. ".xhtml").
184 #html_file_suffix = None
185
186 # Output file base name for HTML help builder.
187 htmlhelp_basename = 'Orange3TextMiningdoc'
188
189
190 # -- Options for LaTeX output ---------------------------------------------
191
192 latex_elements = {
193 # The paper size ('letterpaper' or 'a4paper').
194 #'papersize': 'letterpaper',
195
196 # The font size ('10pt', '11pt' or '12pt').
197 #'pointsize': '10pt',
198
199 # Additional stuff for the LaTeX preamble.
200 #'preamble': '',
201 }
202
203 # Grouping the document tree into LaTeX files. List of tuples
204 # (source start file, target name, title,
205 # author, documentclass [howto, manual, or own class]).
206 latex_documents = [
207 ('index', 'Orange3TextMining.tex', 'Orange3 Text Mining Documentation',
208 'Biolab', 'manual'),
209 ]
210
211 # The name of an image file (relative to this directory) to place at the top of
212 # the title page.
213 #latex_logo = None
214
215 # For "manual" documents, if this is true, then toplevel headings are parts,
216 # not chapters.
217 #latex_use_parts = False
218
219 # If true, show page references after internal links.
220 #latex_show_pagerefs = False
221
222 # If true, show URL addresses after external links.
223 #latex_show_urls = False
224
225 # Documents to append as an appendix to all manuals.
226 #latex_appendices = []
227
228 # If false, no module index is generated.
229 #latex_domain_indices = True
230
231
232 # -- Options for manual page output ---------------------------------------
233
234 # One entry per manual page. List of tuples
235 # (source start file, name, description, authors, manual section).
236 man_pages = [
237 ('index', 'orange3textmining', 'Orange3 Text Mining Documentation',
238 ['Laboratory of Bioinformatics, Faculty of Computer Science, University of Ljubljana'], 1)
239 ]
240
241 # If true, show URL addresses after external links.
242 #man_show_urls = False
243
244
245 # -- Options for Texinfo output -------------------------------------------
246
247 # Grouping the document tree into Texinfo files. List of tuples
248 # (source start file, target name, title, author,
249 # dir menu entry, description, category)
250 texinfo_documents = [
251 ('index', 'Orange3TextMining', 'Orange3 Text Mining Documentation',
252 'Laboratory of Bioinformatics, Faculty of Computer Science, University of Ljubljana', 'Orange3TextMining', 'One line description of project.',
253 'Miscellaneous'),
254 ]
255
256 # Documents to append as an appendix to all manuals.
257 #texinfo_appendices = []
258
259 # If false, no module index is generated.
260 #texinfo_domain_indices = True
261
262 # How to display URL addresses: 'footnote', 'no', or 'inline'.
263 #texinfo_show_urls = 'footnote'
264
265 # If true, do not generate a @detailmenu in the "Top" node's menu.
266 #texinfo_no_detailmenu = False
267
268 # Mapping for external documentation in Sphinx.
269 #intersphinx_mapping = {'orange3': ('../../orange3/doc/visual-programming/build/html',
270 # '../../orange3/doc/visual-programming/build/html/objects.inv')}
271
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/doc/conf.py b/doc/conf.py
--- a/doc/conf.py
+++ b/doc/conf.py
@@ -52,7 +52,7 @@
# General information about the project.
project = 'Orange3 Text Mining'
-copyright = '2015, Laboratory of Bioinformatics, Faculty of Computer Science, University of Ljubljana'
+copyright = '2018, Laboratory of Bioinformatics, Faculty of Computer Science, University of Ljubljana'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
@@ -106,7 +106,7 @@
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
-html_theme = 'nature'
+html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
@@ -268,3 +268,6 @@
# Mapping for external documentation in Sphinx.
#intersphinx_mapping = {'orange3': ('../../orange3/doc/visual-programming/build/html',
# '../../orange3/doc/visual-programming/build/html/objects.inv')}
+
+def setup(app):
+ app.add_stylesheet('style.css')
| {"golden_diff": "diff --git a/doc/conf.py b/doc/conf.py\n--- a/doc/conf.py\n+++ b/doc/conf.py\n@@ -52,7 +52,7 @@\n \n # General information about the project.\n project = 'Orange3 Text Mining'\n-copyright = '2015, Laboratory of Bioinformatics, Faculty of Computer Science, University of Ljubljana'\n+copyright = '2018, Laboratory of Bioinformatics, Faculty of Computer Science, University of Ljubljana'\n \n # The version info for the project you're documenting, acts as replacement for\n # |version| and |release|, also used in various other places throughout the\n@@ -106,7 +106,7 @@\n \n # The theme to use for HTML and HTML Help pages. See the documentation for\n # a list of builtin themes.\n-html_theme = 'nature'\n+html_theme = 'alabaster'\n \n # Theme options are theme-specific and customize the look and feel of a theme\n # further. For a list of options available for each theme, see the\n@@ -268,3 +268,6 @@\n # Mapping for external documentation in Sphinx.\n #intersphinx_mapping = {'orange3': ('../../orange3/doc/visual-programming/build/html',\n # '../../orange3/doc/visual-programming/build/html/objects.inv')}\n+\n+def setup(app):\n+ app.add_stylesheet('style.css')\n", "issue": "Tweet Profiler source available?\n<!--\r\nThis is an issue template. Please fill in the relevant details in the\r\nsections below.\r\n-->\r\n\r\n##### Text version\r\n\r\n0.3.0\r\n<!-- From menu _Options\u2192Add-ons\u2192Orange3-Text_ or code `orangecontrib.text.version.full_version` -->\r\n\r\n\r\n##### Orange version\r\n<!-- From menu _Help\u2192About\u2192Version_ or code `Orange.version.full_version` -->\r\n3.13\r\n\r\n##### Additional info (worksheets, data, screenshots, ...)\r\n\r\nIs the server code for the Tweet Profiler available somewhere? I just started working with the text addon recently and wanted to read how you implemented it and found out that it runs on a server.\n", "before_files": [{"content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Orange3 Text Mining documentation build configuration file, created by\n# sphinx-quickstart on Fri May 8 15:18:26 2015.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport sys\nimport os\n\n# If extensions (or scripting to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\nsys.path.insert(0, os.path.abspath('..'))\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.doctest',\n 'sphinx.ext.todo',\n 'sphinx.ext.napoleon',\n 'sphinx.ext.viewcode',\n 'sphinx.ext.intersphinx',\n]\n\n# Add any paths that contain templates here, relative to this directory.\n# templates_path = ['_templates']\n\n# The suffix of source filenames.\nsource_suffix = '.rst'\n\n# The encoding of source files.\nsource_encoding = 'utf-8'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = 'Orange3 Text Mining'\ncopyright = '2015, Laboratory of Bioinformatics, Faculty of Computer Science, University of Ljubljana'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\n# version = '0.1'\n# The full version, including alpha/beta/rc tags.\n# release = '0.1.1'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#language = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = ['_build']\n\n# The reST default role (used for this markup: `text`) to use for all\n# documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# A list of ignored prefixes for module index sorting.\n#modindex_common_prefix = []\n\n# If true, keep warnings as \"system message\" paragraphs in the built documents.\n#keep_warnings = False\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = 'nature'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#html_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\n#html_theme_path = []\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n#html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n#html_logo = None\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n#html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\n# html_static_path = ['_static']\n\n# Add any extra paths that contain custom files (such as robots.txt or\n# .htaccess) here, relative to this directory. These files are copied\n# directly to the root of the documentation.\n#html_extra_path = []\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n#html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n#html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_domain_indices = True\n\n# If false, no index is generated.\n#html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n#html_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n#html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n#html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = None\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'Orange3TextMiningdoc'\n\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n# The paper size ('letterpaper' or 'a4paper').\n#'papersize': 'letterpaper',\n\n# The font size ('10pt', '11pt' or '12pt').\n#'pointsize': '10pt',\n\n# Additional stuff for the LaTeX preamble.\n#'preamble': '',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n ('index', 'Orange3TextMining.tex', 'Orange3 Text Mining Documentation',\n 'Biolab', 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# If true, show page references after internal links.\n#latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n#latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_domain_indices = True\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n ('index', 'orange3textmining', 'Orange3 Text Mining Documentation',\n ['Laboratory of Bioinformatics, Faculty of Computer Science, University of Ljubljana'], 1)\n]\n\n# If true, show URL addresses after external links.\n#man_show_urls = False\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n ('index', 'Orange3TextMining', 'Orange3 Text Mining Documentation',\n 'Laboratory of Bioinformatics, Faculty of Computer Science, University of Ljubljana', 'Orange3TextMining', 'One line description of project.',\n 'Miscellaneous'),\n]\n\n# Documents to append as an appendix to all manuals.\n#texinfo_appendices = []\n\n# If false, no module index is generated.\n#texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n#texinfo_show_urls = 'footnote'\n\n# If true, do not generate a @detailmenu in the \"Top\" node's menu.\n#texinfo_no_detailmenu = False\n\n# Mapping for external documentation in Sphinx.\n#intersphinx_mapping = {'orange3': ('../../orange3/doc/visual-programming/build/html',\n# '../../orange3/doc/visual-programming/build/html/objects.inv')}\n", "path": "doc/conf.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Orange3 Text Mining documentation build configuration file, created by\n# sphinx-quickstart on Fri May 8 15:18:26 2015.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport sys\nimport os\n\n# If extensions (or scripting to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\nsys.path.insert(0, os.path.abspath('..'))\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.doctest',\n 'sphinx.ext.todo',\n 'sphinx.ext.napoleon',\n 'sphinx.ext.viewcode',\n 'sphinx.ext.intersphinx',\n]\n\n# Add any paths that contain templates here, relative to this directory.\n# templates_path = ['_templates']\n\n# The suffix of source filenames.\nsource_suffix = '.rst'\n\n# The encoding of source files.\nsource_encoding = 'utf-8'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = 'Orange3 Text Mining'\ncopyright = '2018, Laboratory of Bioinformatics, Faculty of Computer Science, University of Ljubljana'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\n# version = '0.1'\n# The full version, including alpha/beta/rc tags.\n# release = '0.1.1'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#language = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = ['_build']\n\n# The reST default role (used for this markup: `text`) to use for all\n# documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# A list of ignored prefixes for module index sorting.\n#modindex_common_prefix = []\n\n# If true, keep warnings as \"system message\" paragraphs in the built documents.\n#keep_warnings = False\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = 'alabaster'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#html_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\n#html_theme_path = []\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n#html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n#html_logo = None\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n#html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\n# html_static_path = ['_static']\n\n# Add any extra paths that contain custom files (such as robots.txt or\n# .htaccess) here, relative to this directory. These files are copied\n# directly to the root of the documentation.\n#html_extra_path = []\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n#html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n#html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_domain_indices = True\n\n# If false, no index is generated.\n#html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n#html_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n#html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n#html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = None\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'Orange3TextMiningdoc'\n\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n# The paper size ('letterpaper' or 'a4paper').\n#'papersize': 'letterpaper',\n\n# The font size ('10pt', '11pt' or '12pt').\n#'pointsize': '10pt',\n\n# Additional stuff for the LaTeX preamble.\n#'preamble': '',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n ('index', 'Orange3TextMining.tex', 'Orange3 Text Mining Documentation',\n 'Biolab', 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# If true, show page references after internal links.\n#latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n#latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_domain_indices = True\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n ('index', 'orange3textmining', 'Orange3 Text Mining Documentation',\n ['Laboratory of Bioinformatics, Faculty of Computer Science, University of Ljubljana'], 1)\n]\n\n# If true, show URL addresses after external links.\n#man_show_urls = False\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n ('index', 'Orange3TextMining', 'Orange3 Text Mining Documentation',\n 'Laboratory of Bioinformatics, Faculty of Computer Science, University of Ljubljana', 'Orange3TextMining', 'One line description of project.',\n 'Miscellaneous'),\n]\n\n# Documents to append as an appendix to all manuals.\n#texinfo_appendices = []\n\n# If false, no module index is generated.\n#texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n#texinfo_show_urls = 'footnote'\n\n# If true, do not generate a @detailmenu in the \"Top\" node's menu.\n#texinfo_no_detailmenu = False\n\n# Mapping for external documentation in Sphinx.\n#intersphinx_mapping = {'orange3': ('../../orange3/doc/visual-programming/build/html',\n# '../../orange3/doc/visual-programming/build/html/objects.inv')}\n\ndef setup(app):\n app.add_stylesheet('style.css')\n", "path": "doc/conf.py"}]} | 3,312 | 304 |
gh_patches_debug_26907 | rasdani/github-patches | git_diff | google__turbinia-696 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Generate version data from tags and commits
Today we have a hard-coded value in `turbinia/__init__.py`, but it would be nice to generate the version number from the current TAG for releases and from the git commit id when there is no TAG (ie. when running from master or a different branch).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 #
4 # Copyright 2017 Google Inc.
5 #
6 # Licensed under the Apache License, Version 2.0 (the "License");
7 # you may not use this file except in compliance with the License.
8 # You may obtain a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS,
14 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 # See the License for the specific language governing permissions and
16 # limitations under the License.
17 """This is the setup file for the project."""
18
19 # yapf: disable
20
21 from __future__ import unicode_literals
22
23 import sys
24
25 from setuptools import find_packages
26 from setuptools import setup
27
28
29 # make sure turbinia is in path
30 sys.path.insert(0, '.')
31
32 import turbinia # pylint: disable=wrong-import-position
33
34 turbinia_description = (
35 'Turbinia is an open-source framework for deploying, managing, and running'
36 'forensic workloads on cloud platforms. It is intended to automate running '
37 'of common forensic processing tools (i.e. Plaso, TSK, strings, etc) to '
38 'help with processing evidence in the Cloud, scaling the processing of '
39 'large amounts of evidence, and decreasing response time by parallelizing'
40 'processing where possible.')
41
42 requirements = []
43 with open('requirements.txt','r') as f:
44 requirements = f.read().splitlines()
45 setup(
46 name='turbinia',
47 version=turbinia.__version__,
48 description='Automation and Scaling of Digital Forensics Tools',
49 long_description=turbinia_description,
50 license='Apache License, Version 2.0',
51 url='http://turbinia.plumbing/',
52 maintainer='Turbinia development team',
53 maintainer_email='[email protected]',
54 classifiers=[
55 'Development Status :: 4 - Beta',
56 'Environment :: Console',
57 'Operating System :: OS Independent',
58 'Programming Language :: Python',
59 ],
60 packages=find_packages(),
61 include_package_data=True,
62 zip_safe=False,
63 entry_points={'console_scripts': ['turbiniactl=turbinia.turbiniactl:main']},
64 install_requires=requirements,
65 extras_require={
66 'dev': ['mock', 'nose', 'yapf', 'celery~=4.1', 'coverage'],
67 'local': ['celery~=4.1', 'kombu~=4.1', 'redis~=3.0'],
68 'worker': ['docker-explorer>=20191104', 'plaso>=20200430', 'pyhindsight>=20200607']
69 }
70 )
71
```
Path: `turbinia/__init__.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # Copyright 2015 Google Inc.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 """Main Turbinia application."""
16
17 __version__ = '20190819'
18
19 import logging
20 log = logging.getLogger('turbinia')
21
22
23 def log_and_report(message, trace):
24 """Log an error and if enabled, send to GCP Error Reporting API.
25
26 Args:
27 message(str): The user defined message to log.
28 trace(str): The error traceback message to log.
29 """
30 from turbinia import config
31
32 log.error(message)
33 log.error(trace)
34 # If GCP Error Reporting is enabled.
35 config.LoadConfig()
36 if config.STACKDRIVER_TRACEBACK:
37 # Only load google_cloud if needed
38 from turbinia.lib import google_cloud
39 client = google_cloud.setup_stackdriver_traceback(config.TURBINIA_PROJECT)
40 client.report_exception()
41
42
43 class TurbiniaException(Exception):
44 """Turbinia Exception class."""
45 pass
46
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -44,7 +44,6 @@
requirements = f.read().splitlines()
setup(
name='turbinia',
- version=turbinia.__version__,
description='Automation and Scaling of Digital Forensics Tools',
long_description=turbinia_description,
license='Apache License, Version 2.0',
@@ -66,5 +65,7 @@
'dev': ['mock', 'nose', 'yapf', 'celery~=4.1', 'coverage'],
'local': ['celery~=4.1', 'kombu~=4.1', 'redis~=3.0'],
'worker': ['docker-explorer>=20191104', 'plaso>=20200430', 'pyhindsight>=20200607']
- }
+ },
+ use_scm_version=True,
+ setup_requires=['setuptools_scm']
)
diff --git a/turbinia/__init__.py b/turbinia/__init__.py
--- a/turbinia/__init__.py
+++ b/turbinia/__init__.py
@@ -14,11 +14,15 @@
# limitations under the License.
"""Main Turbinia application."""
-__version__ = '20190819'
-
import logging
log = logging.getLogger('turbinia')
+from pkg_resources import get_distribution, DistributionNotFound
+try:
+ __version__ = get_distribution(__name__).version
+except DistributionNotFound:
+ __version__ = "unknown"
+
def log_and_report(message, trace):
"""Log an error and if enabled, send to GCP Error Reporting API.
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -44,7 +44,6 @@\n requirements = f.read().splitlines()\n setup(\n name='turbinia',\n- version=turbinia.__version__,\n description='Automation and Scaling of Digital Forensics Tools',\n long_description=turbinia_description,\n license='Apache License, Version 2.0',\n@@ -66,5 +65,7 @@\n 'dev': ['mock', 'nose', 'yapf', 'celery~=4.1', 'coverage'],\n 'local': ['celery~=4.1', 'kombu~=4.1', 'redis~=3.0'],\n 'worker': ['docker-explorer>=20191104', 'plaso>=20200430', 'pyhindsight>=20200607']\n- }\n+ },\n+ use_scm_version=True,\n+ setup_requires=['setuptools_scm']\n )\ndiff --git a/turbinia/__init__.py b/turbinia/__init__.py\n--- a/turbinia/__init__.py\n+++ b/turbinia/__init__.py\n@@ -14,11 +14,15 @@\n # limitations under the License.\n \"\"\"Main Turbinia application.\"\"\"\n \n-__version__ = '20190819'\n-\n import logging\n log = logging.getLogger('turbinia')\n \n+from pkg_resources import get_distribution, DistributionNotFound\n+try:\n+ __version__ = get_distribution(__name__).version\n+except DistributionNotFound:\n+ __version__ = \"unknown\"\n+\n \n def log_and_report(message, trace):\n \"\"\"Log an error and if enabled, send to GCP Error Reporting API.\n", "issue": "Generate version data from tags and commits\nToday we have a hard-coded value in `turbinia/__init__.py`, but it would be nice to generate the version number from the current TAG for releases and from the git commit id when there is no TAG (ie. when running from master or a different branch).\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright 2017 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"This is the setup file for the project.\"\"\"\n\n# yapf: disable\n\nfrom __future__ import unicode_literals\n\nimport sys\n\nfrom setuptools import find_packages\nfrom setuptools import setup\n\n\n# make sure turbinia is in path\nsys.path.insert(0, '.')\n\nimport turbinia # pylint: disable=wrong-import-position\n\nturbinia_description = (\n 'Turbinia is an open-source framework for deploying, managing, and running'\n 'forensic workloads on cloud platforms. It is intended to automate running '\n 'of common forensic processing tools (i.e. Plaso, TSK, strings, etc) to '\n 'help with processing evidence in the Cloud, scaling the processing of '\n 'large amounts of evidence, and decreasing response time by parallelizing'\n 'processing where possible.')\n\nrequirements = []\nwith open('requirements.txt','r') as f:\n requirements = f.read().splitlines()\nsetup(\n name='turbinia',\n version=turbinia.__version__,\n description='Automation and Scaling of Digital Forensics Tools',\n long_description=turbinia_description,\n license='Apache License, Version 2.0',\n url='http://turbinia.plumbing/',\n maintainer='Turbinia development team',\n maintainer_email='[email protected]',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Console',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n ],\n packages=find_packages(),\n include_package_data=True,\n zip_safe=False,\n entry_points={'console_scripts': ['turbiniactl=turbinia.turbiniactl:main']},\n install_requires=requirements,\n extras_require={\n 'dev': ['mock', 'nose', 'yapf', 'celery~=4.1', 'coverage'],\n 'local': ['celery~=4.1', 'kombu~=4.1', 'redis~=3.0'],\n 'worker': ['docker-explorer>=20191104', 'plaso>=20200430', 'pyhindsight>=20200607']\n }\n)\n", "path": "setup.py"}, {"content": "# -*- coding: utf-8 -*-\n# Copyright 2015 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Main Turbinia application.\"\"\"\n\n__version__ = '20190819'\n\nimport logging\nlog = logging.getLogger('turbinia')\n\n\ndef log_and_report(message, trace):\n \"\"\"Log an error and if enabled, send to GCP Error Reporting API.\n\n Args:\n message(str): The user defined message to log.\n trace(str): The error traceback message to log.\n \"\"\"\n from turbinia import config\n\n log.error(message)\n log.error(trace)\n # If GCP Error Reporting is enabled.\n config.LoadConfig()\n if config.STACKDRIVER_TRACEBACK:\n # Only load google_cloud if needed\n from turbinia.lib import google_cloud\n client = google_cloud.setup_stackdriver_traceback(config.TURBINIA_PROJECT)\n client.report_exception()\n\n\nclass TurbiniaException(Exception):\n \"\"\"Turbinia Exception class.\"\"\"\n pass\n", "path": "turbinia/__init__.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright 2017 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"This is the setup file for the project.\"\"\"\n\n# yapf: disable\n\nfrom __future__ import unicode_literals\n\nimport sys\n\nfrom setuptools import find_packages\nfrom setuptools import setup\n\n\n# make sure turbinia is in path\nsys.path.insert(0, '.')\n\nimport turbinia # pylint: disable=wrong-import-position\n\nturbinia_description = (\n 'Turbinia is an open-source framework for deploying, managing, and running'\n 'forensic workloads on cloud platforms. It is intended to automate running '\n 'of common forensic processing tools (i.e. Plaso, TSK, strings, etc) to '\n 'help with processing evidence in the Cloud, scaling the processing of '\n 'large amounts of evidence, and decreasing response time by parallelizing'\n 'processing where possible.')\n\nrequirements = []\nwith open('requirements.txt','r') as f:\n requirements = f.read().splitlines()\nsetup(\n name='turbinia',\n description='Automation and Scaling of Digital Forensics Tools',\n long_description=turbinia_description,\n license='Apache License, Version 2.0',\n url='http://turbinia.plumbing/',\n maintainer='Turbinia development team',\n maintainer_email='[email protected]',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Console',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n ],\n packages=find_packages(),\n include_package_data=True,\n zip_safe=False,\n entry_points={'console_scripts': ['turbiniactl=turbinia.turbiniactl:main']},\n install_requires=requirements,\n extras_require={\n 'dev': ['mock', 'nose', 'yapf', 'celery~=4.1', 'coverage'],\n 'local': ['celery~=4.1', 'kombu~=4.1', 'redis~=3.0'],\n 'worker': ['docker-explorer>=20191104', 'plaso>=20200430', 'pyhindsight>=20200607']\n },\n use_scm_version=True,\n setup_requires=['setuptools_scm']\n)\n", "path": "setup.py"}, {"content": "# -*- coding: utf-8 -*-\n# Copyright 2015 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Main Turbinia application.\"\"\"\n\nimport logging\nlog = logging.getLogger('turbinia')\n\nfrom pkg_resources import get_distribution, DistributionNotFound\ntry:\n __version__ = get_distribution(__name__).version\nexcept DistributionNotFound:\n __version__ = \"unknown\"\n\n\ndef log_and_report(message, trace):\n \"\"\"Log an error and if enabled, send to GCP Error Reporting API.\n\n Args:\n message(str): The user defined message to log.\n trace(str): The error traceback message to log.\n \"\"\"\n from turbinia import config\n\n log.error(message)\n log.error(trace)\n # If GCP Error Reporting is enabled.\n config.LoadConfig()\n if config.STACKDRIVER_TRACEBACK:\n # Only load google_cloud if needed\n from turbinia.lib import google_cloud\n client = google_cloud.setup_stackdriver_traceback(config.TURBINIA_PROJECT)\n client.report_exception()\n\n\nclass TurbiniaException(Exception):\n \"\"\"Turbinia Exception class.\"\"\"\n pass\n", "path": "turbinia/__init__.py"}]} | 1,513 | 400 |
gh_patches_debug_7116 | rasdani/github-patches | git_diff | kivy__kivy-2814 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
codeinput.py example missing Pro.ttf
I was trying the various kivy examples, and one of them failed, the `codeinput.py` file, with the following error:
```
sinclair@blastocyst $ kivy /Users/sinclair/kivy/examples/widgets/codeinput.py
[INFO ] Kivy v1.8.0
[INFO ] [Logger ] Record log in /Users/sinclair/.kivy/logs/kivy_14-12-27_20.txt
[INFO ] [Factory ] 157 symbols loaded
[DEBUG ] [Cache ] register <kv.lang> with limit=None, timeout=Nones
[DEBUG ] [Cache ] register <kv.image> with limit=None, timeout=60s
[DEBUG ] [Cache ] register <kv.atlas> with limit=None, timeout=Nones
[INFO ] [Image ] Providers: img_imageio, img_tex, img_dds, img_pygame, img_gif (img_pil ignored)
[DEBUG ] [Cache ] register <kv.texture> with limit=1000, timeout=60s
[DEBUG ] [Cache ] register <kv.shader> with limit=1000, timeout=3600s
[INFO ] [Text ] Provider: pygame
[DEBUG ] [Window ] Ignored <egl_rpi> (import error)
[INFO ] [Window ] Provider: pygame(['window_egl_rpi'] ignored)
[DEBUG ] [Window ] Display driver Quartz
[DEBUG ] [Window ] Actual window size: 800x600
[DEBUG ] [Window ] Actual color bits r8 g8 b8 a8
[DEBUG ] [Window ] Actual depth bits: 24
[DEBUG ] [Window ] Actual stencil bits: 8
[DEBUG ] [Window ] Actual multisampling samples: 4
[INFO ] [GL ] OpenGL version <2.1 INTEL-10.0.86>
[INFO ] [GL ] OpenGL vendor <Intel Inc.>
[INFO ] [GL ] OpenGL renderer <Intel HD Graphics 5000 OpenGL Engine>
[INFO ] [GL ] OpenGL parsed version: 2, 1
[INFO ] [GL ] Shading version <1.20>
[INFO ] [GL ] Texture max size <16384>
[INFO ] [GL ] Texture max units <16>
[DEBUG ] [Shader ] Fragment compiled successfully
[DEBUG ] [Shader ] Vertex compiled successfully
[DEBUG ] [ImageImageIO] Load </Applications/Kivy.app/Contents/Resources/kivy/kivy/data/glsl/default.png>
[INFO ] [GL ] BGRA texture support is available
[INFO ] [Window ] virtual keyboard not allowed, single mode, not docked
[DEBUG ] [Cache ] register <kv.loader> with limit=500, timeout=60s
[INFO ] [Loader ] using a thread pool of 2 workers
[DEBUG ] [Cache ] register <textinput.label> with limit=None, timeout=60.0s
[DEBUG ] [Cache ] register <textinput.width> with limit=None, timeout=60.0s
[DEBUG ] [App ] Loading kv </Users/sinclair/kivy/examples/widgets/codeinputtest.kv>
Traceback (most recent call last):
File "/Users/sinclair/kivy/examples/widgets/codeinput.py", line 179, in <module>
CodeInputTest().run()
File "/Applications/Kivy.app/Contents/Resources/kivy/kivy/app.py", line 766, in run
root = self.build()
File "/Users/sinclair/kivy/examples/widgets/codeinput.py", line 111, in build
values=sorted(map(str, fonts.get_fonts())))
File "/Applications/Kivy.app/Contents/Resources/kivy/kivy/uix/spinner.py", line 116, in __init__
self._build_dropdown()
File "/Applications/Kivy.app/Contents/Resources/kivy/kivy/uix/spinner.py", line 130, in _build_dropdown
self._update_dropdown()
File "/Applications/Kivy.app/Contents/Resources/kivy/kivy/uix/spinner.py", line 139, in _update_dropdown
item = cls(text=value)
File "/Applications/Kivy.app/Contents/Resources/kivy/kivy/uix/behaviors.py", line 72, in __init__
super(ButtonBehavior, self).__init__(**kwargs)
File "/Applications/Kivy.app/Contents/Resources/kivy/kivy/uix/label.py", line 141, in __init__
self._create_label()
File "/Applications/Kivy.app/Contents/Resources/kivy/kivy/uix/label.py", line 161, in _create_label
self._label = CoreLabel(**dkw)
File "/Applications/Kivy.app/Contents/Resources/kivy/kivy/core/text/__init__.py", line 136, in __init__
self.resolve_font_name()
File "/Applications/Kivy.app/Contents/Resources/kivy/kivy/core/text/__init__.py", line 196, in resolve_font_name
raise IOError('Label: File %r not found' % fontname)
IOError: Label: File u'/Library/Fonts/ Pro.ttf' not found
```
I am using OS X 10.10.1
I tried adding a font file called "Pro.ttf" at that location to make it happy, but the result was the same.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/widgets/codeinput.py`
Content:
```
1 from kivy.app import App
2 from kivy.extras.highlight import KivyLexer
3 from kivy.uix.spinner import Spinner, SpinnerOption
4 from kivy.uix.boxlayout import BoxLayout
5 from kivy.uix.codeinput import CodeInput
6 from kivy.uix.popup import Popup
7 from kivy.properties import ListProperty
8 from kivy.core.window import Window
9 from pygments import lexers
10 from pygame import font as fonts
11 import codecs, os
12
13 example_text = '''
14 ---------------------Python----------------------------------
15 import kivy
16 kivy.require('1.0.6') # replace with your current kivy version !
17 from kivy.app import App
18 from kivy.uix.button import Button
19
20 class MyApp(App):
21 def build(self):
22 return Button(text='Hello World')
23
24 if __name__ == '__main__':
25 MyApp().run()
26 ----------------------Java-----------------------------------
27
28 public static byte toUnsignedByte(int intVal) {
29 byte byteVal;
30 return (byte)(intVal & 0xFF);
31 }
32 ---------------------kv lang---------------------------------
33 #:kivy 1.0
34
35 <YourWidget>:
36 canvas:
37 Color:
38 rgb: .5, .5, .5
39 Rectangle:
40 pos: self.pos
41 size: self.size
42 ---------------------HTML------------------------------------
43 <!-- Place this tag where you want the +1 button to render. -->
44 <div class="g-plusone" data-annotation="inline" data-width="300"></div>
45
46 <!-- Place this tag after the last +1 button tag. -->
47 <script type="text/javascript">
48 (function() {
49 var po = document.createElement('script');
50 po.type = 'text/javascript';
51 po.async = true;
52 po.src = 'https://apis.google.com/js/plusone.js';
53 var s = document.getElementsByTagName('script')[0];
54 s.parentNode.insertBefore(po, s);
55 })();
56 </script>
57 '''
58
59
60 class Fnt_SpinnerOption(SpinnerOption):
61 pass
62
63
64 class LoadDialog(Popup):
65
66 def load(self, path, selection):
67 self.choosen_file = [None, ]
68 self.choosen_file = selection
69 Window.title = selection[0][selection[0].rfind(os.sep)+1:]
70 self.dismiss()
71
72 def cancel(self):
73 self.dismiss()
74
75
76 class SaveDialog(Popup):
77
78 def save(self, path, selection):
79 _file = codecs.open(selection, 'w', encoding='utf8')
80 _file.write(self.text)
81 Window.title = selection[selection.rfind(os.sep)+1:]
82 _file.close()
83 self.dismiss()
84
85 def cancel(self):
86 self.dismiss()
87
88
89 class CodeInputTest(App):
90
91 files = ListProperty([None, ])
92
93 def build(self):
94 b = BoxLayout(orientation='vertical')
95 languages = Spinner(
96 text='language',
97 values=sorted(['KvLexer', ] + list(lexers.LEXERS.keys())))
98
99 languages.bind(text=self.change_lang)
100
101 menu = BoxLayout(
102 size_hint_y=None,
103 height='30pt')
104 fnt_size = Spinner(
105 text='12',
106 values=list(map(str, list(range(5, 40)))))
107 fnt_size.bind(text=self._update_size)
108 fnt_name = Spinner(
109 text='DroidSansMono',
110 option_cls=Fnt_SpinnerOption,
111 values=sorted(map(str, fonts.get_fonts())))
112 fnt_name.bind(text=self._update_font)
113 mnu_file = Spinner(
114 text='File',
115 values=('Open', 'SaveAs', 'Save', 'Close'))
116 mnu_file.bind(text=self._file_menu_selected)
117
118 menu.add_widget(mnu_file)
119 menu.add_widget(fnt_size)
120 menu.add_widget(fnt_name)
121 menu.add_widget(languages)
122 b.add_widget(menu)
123
124 self.codeinput = CodeInput(
125 lexer=KivyLexer(),
126 font_name='data/fonts/DroidSansMono.ttf', font_size=12,
127 text=example_text)
128
129 b.add_widget(self.codeinput)
130
131 return b
132
133 def _update_size(self, instance, size):
134 self.codeinput.font_size = float(size)
135
136 def _update_font(self, instance, fnt_name):
137 instance.font_name = self.codeinput.font_name =\
138 fonts.match_font(fnt_name)
139
140 def _file_menu_selected(self, instance, value):
141 if value == 'File':
142 return
143 instance.text = 'File'
144 if value == 'Open':
145 if not hasattr(self, 'load_dialog'):
146 self.load_dialog = LoadDialog()
147 self.load_dialog.open()
148 self.load_dialog.bind(choosen_file=self.setter('files'))
149 elif value == 'SaveAs':
150 if not hasattr(self, 'saveas_dialog'):
151 self.saveas_dialog = SaveDialog()
152 self.saveas_dialog.text = self.codeinput.text
153 self.saveas_dialog.open()
154 elif value == 'Save':
155 if self.files[0]:
156 _file = codecs.open(self.files[0], 'w', encoding='utf8')
157 _file.write(self.codeinput.text)
158 _file.close()
159 elif value == 'Close':
160 if self.files[0]:
161 self.codeinput.text = ''
162 Window.title = 'untitled'
163
164 def on_files(self, instance, values):
165 if not values[0]:
166 return
167 _file = codecs.open(values[0], 'r', encoding='utf8')
168 self.codeinput.text = _file.read()
169 _file.close()
170
171 def change_lang(self, instance, l):
172 if l == 'KvLexer':
173 lx = KivyLexer()
174 else:
175 lx = lexers.get_lexer_by_name(lexers.LEXERS[l][2][0])
176 self.codeinput.lexer = lx
177
178 if __name__ == '__main__':
179 CodeInputTest().run()
180
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/widgets/codeinput.py b/examples/widgets/codeinput.py
--- a/examples/widgets/codeinput.py
+++ b/examples/widgets/codeinput.py
@@ -134,8 +134,9 @@
self.codeinput.font_size = float(size)
def _update_font(self, instance, fnt_name):
- instance.font_name = self.codeinput.font_name =\
- fonts.match_font(fnt_name)
+ font_name = fonts.match_font(fnt_name)
+ if os.path.exists(font_name):
+ instance.font_name = self.codeinput.font_name = font_name
def _file_menu_selected(self, instance, value):
if value == 'File':
| {"golden_diff": "diff --git a/examples/widgets/codeinput.py b/examples/widgets/codeinput.py\n--- a/examples/widgets/codeinput.py\n+++ b/examples/widgets/codeinput.py\n@@ -134,8 +134,9 @@\n self.codeinput.font_size = float(size)\n \n def _update_font(self, instance, fnt_name):\n- instance.font_name = self.codeinput.font_name =\\\n- fonts.match_font(fnt_name)\n+ font_name = fonts.match_font(fnt_name)\n+ if os.path.exists(font_name):\n+ instance.font_name = self.codeinput.font_name = font_name\n \n def _file_menu_selected(self, instance, value):\n if value == 'File':\n", "issue": "codeinput.py example missing Pro.ttf\nI was trying the various kivy examples, and one of them failed, the `codeinput.py` file, with the following error:\n\n```\nsinclair@blastocyst $ kivy /Users/sinclair/kivy/examples/widgets/codeinput.py\n[INFO ] Kivy v1.8.0\n[INFO ] [Logger ] Record log in /Users/sinclair/.kivy/logs/kivy_14-12-27_20.txt\n[INFO ] [Factory ] 157 symbols loaded\n[DEBUG ] [Cache ] register <kv.lang> with limit=None, timeout=Nones\n[DEBUG ] [Cache ] register <kv.image> with limit=None, timeout=60s\n[DEBUG ] [Cache ] register <kv.atlas> with limit=None, timeout=Nones\n[INFO ] [Image ] Providers: img_imageio, img_tex, img_dds, img_pygame, img_gif (img_pil ignored)\n[DEBUG ] [Cache ] register <kv.texture> with limit=1000, timeout=60s\n[DEBUG ] [Cache ] register <kv.shader> with limit=1000, timeout=3600s\n[INFO ] [Text ] Provider: pygame\n[DEBUG ] [Window ] Ignored <egl_rpi> (import error)\n[INFO ] [Window ] Provider: pygame(['window_egl_rpi'] ignored)\n[DEBUG ] [Window ] Display driver Quartz\n[DEBUG ] [Window ] Actual window size: 800x600\n[DEBUG ] [Window ] Actual color bits r8 g8 b8 a8\n[DEBUG ] [Window ] Actual depth bits: 24\n[DEBUG ] [Window ] Actual stencil bits: 8\n[DEBUG ] [Window ] Actual multisampling samples: 4\n[INFO ] [GL ] OpenGL version <2.1 INTEL-10.0.86>\n[INFO ] [GL ] OpenGL vendor <Intel Inc.>\n[INFO ] [GL ] OpenGL renderer <Intel HD Graphics 5000 OpenGL Engine>\n[INFO ] [GL ] OpenGL parsed version: 2, 1\n[INFO ] [GL ] Shading version <1.20>\n[INFO ] [GL ] Texture max size <16384>\n[INFO ] [GL ] Texture max units <16>\n[DEBUG ] [Shader ] Fragment compiled successfully\n[DEBUG ] [Shader ] Vertex compiled successfully\n[DEBUG ] [ImageImageIO] Load </Applications/Kivy.app/Contents/Resources/kivy/kivy/data/glsl/default.png>\n[INFO ] [GL ] BGRA texture support is available\n[INFO ] [Window ] virtual keyboard not allowed, single mode, not docked\n[DEBUG ] [Cache ] register <kv.loader> with limit=500, timeout=60s\n[INFO ] [Loader ] using a thread pool of 2 workers\n[DEBUG ] [Cache ] register <textinput.label> with limit=None, timeout=60.0s\n[DEBUG ] [Cache ] register <textinput.width> with limit=None, timeout=60.0s\n[DEBUG ] [App ] Loading kv </Users/sinclair/kivy/examples/widgets/codeinputtest.kv>\n Traceback (most recent call last):\n File \"/Users/sinclair/kivy/examples/widgets/codeinput.py\", line 179, in <module>\n CodeInputTest().run()\n File \"/Applications/Kivy.app/Contents/Resources/kivy/kivy/app.py\", line 766, in run\n root = self.build()\n File \"/Users/sinclair/kivy/examples/widgets/codeinput.py\", line 111, in build\n values=sorted(map(str, fonts.get_fonts())))\n File \"/Applications/Kivy.app/Contents/Resources/kivy/kivy/uix/spinner.py\", line 116, in __init__\n self._build_dropdown()\n File \"/Applications/Kivy.app/Contents/Resources/kivy/kivy/uix/spinner.py\", line 130, in _build_dropdown\n self._update_dropdown()\n File \"/Applications/Kivy.app/Contents/Resources/kivy/kivy/uix/spinner.py\", line 139, in _update_dropdown\n item = cls(text=value)\n File \"/Applications/Kivy.app/Contents/Resources/kivy/kivy/uix/behaviors.py\", line 72, in __init__\n super(ButtonBehavior, self).__init__(**kwargs)\n File \"/Applications/Kivy.app/Contents/Resources/kivy/kivy/uix/label.py\", line 141, in __init__\n self._create_label()\n File \"/Applications/Kivy.app/Contents/Resources/kivy/kivy/uix/label.py\", line 161, in _create_label\n self._label = CoreLabel(**dkw)\n File \"/Applications/Kivy.app/Contents/Resources/kivy/kivy/core/text/__init__.py\", line 136, in __init__\n self.resolve_font_name()\n File \"/Applications/Kivy.app/Contents/Resources/kivy/kivy/core/text/__init__.py\", line 196, in resolve_font_name\n raise IOError('Label: File %r not found' % fontname)\n IOError: Label: File u'/Library/Fonts/ Pro.ttf' not found\n```\n\nI am using OS X 10.10.1\n\nI tried adding a font file called \"Pro.ttf\" at that location to make it happy, but the result was the same.\n\n", "before_files": [{"content": "from kivy.app import App\nfrom kivy.extras.highlight import KivyLexer\nfrom kivy.uix.spinner import Spinner, SpinnerOption\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.uix.codeinput import CodeInput\nfrom kivy.uix.popup import Popup\nfrom kivy.properties import ListProperty\nfrom kivy.core.window import Window\nfrom pygments import lexers\nfrom pygame import font as fonts\nimport codecs, os\n\nexample_text = '''\n---------------------Python----------------------------------\nimport kivy\nkivy.require('1.0.6') # replace with your current kivy version !\nfrom kivy.app import App\nfrom kivy.uix.button import Button\n\nclass MyApp(App):\n def build(self):\n return Button(text='Hello World')\n\nif __name__ == '__main__':\n MyApp().run()\n----------------------Java-----------------------------------\n\npublic static byte toUnsignedByte(int intVal) {\n byte byteVal;\n return (byte)(intVal & 0xFF);\n}\n---------------------kv lang---------------------------------\n#:kivy 1.0\n\n<YourWidget>:\n canvas:\n Color:\n rgb: .5, .5, .5\n Rectangle:\n pos: self.pos\n size: self.size\n---------------------HTML------------------------------------\n<!-- Place this tag where you want the +1 button to render. -->\n<div class=\"g-plusone\" data-annotation=\"inline\" data-width=\"300\"></div>\n\n<!-- Place this tag after the last +1 button tag. -->\n<script type=\"text/javascript\">\n (function() {\n var po = document.createElement('script');\n po.type = 'text/javascript';\n po.async = true;\n po.src = 'https://apis.google.com/js/plusone.js';\n var s = document.getElementsByTagName('script')[0];\n s.parentNode.insertBefore(po, s);\n })();\n</script>\n'''\n\n\nclass Fnt_SpinnerOption(SpinnerOption):\n pass\n\n\nclass LoadDialog(Popup):\n\n def load(self, path, selection):\n self.choosen_file = [None, ]\n self.choosen_file = selection\n Window.title = selection[0][selection[0].rfind(os.sep)+1:]\n self.dismiss()\n\n def cancel(self):\n self.dismiss()\n\n\nclass SaveDialog(Popup):\n\n def save(self, path, selection):\n _file = codecs.open(selection, 'w', encoding='utf8')\n _file.write(self.text)\n Window.title = selection[selection.rfind(os.sep)+1:]\n _file.close()\n self.dismiss()\n\n def cancel(self):\n self.dismiss()\n\n\nclass CodeInputTest(App):\n\n files = ListProperty([None, ])\n\n def build(self):\n b = BoxLayout(orientation='vertical')\n languages = Spinner(\n text='language',\n values=sorted(['KvLexer', ] + list(lexers.LEXERS.keys())))\n\n languages.bind(text=self.change_lang)\n\n menu = BoxLayout(\n size_hint_y=None,\n height='30pt')\n fnt_size = Spinner(\n text='12',\n values=list(map(str, list(range(5, 40)))))\n fnt_size.bind(text=self._update_size)\n fnt_name = Spinner(\n text='DroidSansMono',\n option_cls=Fnt_SpinnerOption,\n values=sorted(map(str, fonts.get_fonts())))\n fnt_name.bind(text=self._update_font)\n mnu_file = Spinner(\n text='File',\n values=('Open', 'SaveAs', 'Save', 'Close'))\n mnu_file.bind(text=self._file_menu_selected)\n\n menu.add_widget(mnu_file)\n menu.add_widget(fnt_size)\n menu.add_widget(fnt_name)\n menu.add_widget(languages)\n b.add_widget(menu)\n\n self.codeinput = CodeInput(\n lexer=KivyLexer(),\n font_name='data/fonts/DroidSansMono.ttf', font_size=12,\n text=example_text)\n\n b.add_widget(self.codeinput)\n\n return b\n\n def _update_size(self, instance, size):\n self.codeinput.font_size = float(size)\n\n def _update_font(self, instance, fnt_name):\n instance.font_name = self.codeinput.font_name =\\\n fonts.match_font(fnt_name)\n\n def _file_menu_selected(self, instance, value):\n if value == 'File':\n return\n instance.text = 'File'\n if value == 'Open':\n if not hasattr(self, 'load_dialog'):\n self.load_dialog = LoadDialog()\n self.load_dialog.open()\n self.load_dialog.bind(choosen_file=self.setter('files'))\n elif value == 'SaveAs':\n if not hasattr(self, 'saveas_dialog'):\n self.saveas_dialog = SaveDialog()\n self.saveas_dialog.text = self.codeinput.text\n self.saveas_dialog.open()\n elif value == 'Save':\n if self.files[0]:\n _file = codecs.open(self.files[0], 'w', encoding='utf8')\n _file.write(self.codeinput.text)\n _file.close()\n elif value == 'Close':\n if self.files[0]:\n self.codeinput.text = ''\n Window.title = 'untitled'\n\n def on_files(self, instance, values):\n if not values[0]:\n return\n _file = codecs.open(values[0], 'r', encoding='utf8')\n self.codeinput.text = _file.read()\n _file.close()\n\n def change_lang(self, instance, l):\n if l == 'KvLexer':\n lx = KivyLexer()\n else:\n lx = lexers.get_lexer_by_name(lexers.LEXERS[l][2][0])\n self.codeinput.lexer = lx\n\nif __name__ == '__main__':\n CodeInputTest().run()\n", "path": "examples/widgets/codeinput.py"}], "after_files": [{"content": "from kivy.app import App\nfrom kivy.extras.highlight import KivyLexer\nfrom kivy.uix.spinner import Spinner, SpinnerOption\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.uix.codeinput import CodeInput\nfrom kivy.uix.popup import Popup\nfrom kivy.properties import ListProperty\nfrom kivy.core.window import Window\nfrom pygments import lexers\nfrom pygame import font as fonts\nimport codecs, os\n\nexample_text = '''\n---------------------Python----------------------------------\nimport kivy\nkivy.require('1.0.6') # replace with your current kivy version !\nfrom kivy.app import App\nfrom kivy.uix.button import Button\n\nclass MyApp(App):\n def build(self):\n return Button(text='Hello World')\n\nif __name__ == '__main__':\n MyApp().run()\n----------------------Java-----------------------------------\n\npublic static byte toUnsignedByte(int intVal) {\n byte byteVal;\n return (byte)(intVal & 0xFF);\n}\n---------------------kv lang---------------------------------\n#:kivy 1.0\n\n<YourWidget>:\n canvas:\n Color:\n rgb: .5, .5, .5\n Rectangle:\n pos: self.pos\n size: self.size\n---------------------HTML------------------------------------\n<!-- Place this tag where you want the +1 button to render. -->\n<div class=\"g-plusone\" data-annotation=\"inline\" data-width=\"300\"></div>\n\n<!-- Place this tag after the last +1 button tag. -->\n<script type=\"text/javascript\">\n (function() {\n var po = document.createElement('script');\n po.type = 'text/javascript';\n po.async = true;\n po.src = 'https://apis.google.com/js/plusone.js';\n var s = document.getElementsByTagName('script')[0];\n s.parentNode.insertBefore(po, s);\n })();\n</script>\n'''\n\n\nclass Fnt_SpinnerOption(SpinnerOption):\n pass\n\n\nclass LoadDialog(Popup):\n\n def load(self, path, selection):\n self.choosen_file = [None, ]\n self.choosen_file = selection\n Window.title = selection[0][selection[0].rfind(os.sep)+1:]\n self.dismiss()\n\n def cancel(self):\n self.dismiss()\n\n\nclass SaveDialog(Popup):\n\n def save(self, path, selection):\n _file = codecs.open(selection, 'w', encoding='utf8')\n _file.write(self.text)\n Window.title = selection[selection.rfind(os.sep)+1:]\n _file.close()\n self.dismiss()\n\n def cancel(self):\n self.dismiss()\n\n\nclass CodeInputTest(App):\n\n files = ListProperty([None, ])\n\n def build(self):\n b = BoxLayout(orientation='vertical')\n languages = Spinner(\n text='language',\n values=sorted(['KvLexer', ] + list(lexers.LEXERS.keys())))\n\n languages.bind(text=self.change_lang)\n\n menu = BoxLayout(\n size_hint_y=None,\n height='30pt')\n fnt_size = Spinner(\n text='12',\n values=list(map(str, list(range(5, 40)))))\n fnt_size.bind(text=self._update_size)\n fnt_name = Spinner(\n text='DroidSansMono',\n option_cls=Fnt_SpinnerOption,\n values=sorted(map(str, fonts.get_fonts())))\n fnt_name.bind(text=self._update_font)\n mnu_file = Spinner(\n text='File',\n values=('Open', 'SaveAs', 'Save', 'Close'))\n mnu_file.bind(text=self._file_menu_selected)\n\n menu.add_widget(mnu_file)\n menu.add_widget(fnt_size)\n menu.add_widget(fnt_name)\n menu.add_widget(languages)\n b.add_widget(menu)\n\n self.codeinput = CodeInput(\n lexer=KivyLexer(),\n font_name='data/fonts/DroidSansMono.ttf', font_size=12,\n text=example_text)\n\n b.add_widget(self.codeinput)\n\n return b\n\n def _update_size(self, instance, size):\n self.codeinput.font_size = float(size)\n\n def _update_font(self, instance, fnt_name):\n font_name = fonts.match_font(fnt_name)\n if os.path.exists(font_name):\n instance.font_name = self.codeinput.font_name = font_name\n\n def _file_menu_selected(self, instance, value):\n if value == 'File':\n return\n instance.text = 'File'\n if value == 'Open':\n if not hasattr(self, 'load_dialog'):\n self.load_dialog = LoadDialog()\n self.load_dialog.open()\n self.load_dialog.bind(choosen_file=self.setter('files'))\n elif value == 'SaveAs':\n if not hasattr(self, 'saveas_dialog'):\n self.saveas_dialog = SaveDialog()\n self.saveas_dialog.text = self.codeinput.text\n self.saveas_dialog.open()\n elif value == 'Save':\n if self.files[0]:\n _file = codecs.open(self.files[0], 'w', encoding='utf8')\n _file.write(self.codeinput.text)\n _file.close()\n elif value == 'Close':\n if self.files[0]:\n self.codeinput.text = ''\n Window.title = 'untitled'\n\n def on_files(self, instance, values):\n if not values[0]:\n return\n _file = codecs.open(values[0], 'r', encoding='utf8')\n self.codeinput.text = _file.read()\n _file.close()\n\n def change_lang(self, instance, l):\n if l == 'KvLexer':\n lx = KivyLexer()\n else:\n lx = lexers.get_lexer_by_name(lexers.LEXERS[l][2][0])\n self.codeinput.lexer = lx\n\nif __name__ == '__main__':\n CodeInputTest().run()\n", "path": "examples/widgets/codeinput.py"}]} | 3,197 | 147 |
gh_patches_debug_2116 | rasdani/github-patches | git_diff | comic__grand-challenge.org-3383 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Viewer configuration does not show linking options
**Describe the bug**
The view and edit pages for viewer configurations no longer show options to set the linking configuration.
**To Reproduce**
Steps to reproduce the behavior:
1. Go to https://grand-challenge.org/viewer-configurations/demo-rse/
2. Scroll down to 'Linking Configuration'
The options displayed are duplicates of the 'Plugin and Tools' section.
**Expected behavior**
A clear and concise description of what you expected to happen.
**Screenshots**
If applicable, add screenshots to help explain your problem.
**Desktop (please complete the following information):**
- OS: [e.g. iOS]
- Browser [e.g. chrome, safari]
- Version [e.g. 22]
**Smartphone (please complete the following information):**
- Device: [e.g. iPhone6]
- OS: [e.g. iOS8.1]
- Browser [e.g. stock browser, safari]
- Version [e.g. 22]
**Additional context**
Add any other context about the problem here.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `app/grandchallenge/workstation_configs/forms.py`
Content:
```
1 from crispy_forms.helper import FormHelper
2 from crispy_forms.layout import Fieldset, Layout, Submit
3 from django.forms import ModelForm
4 from django_select2.forms import Select2MultipleWidget
5
6 from grandchallenge.core.forms import SaveFormInitMixin
7 from grandchallenge.core.widgets import ColorEditorWidget, JSONEditorWidget
8 from grandchallenge.workstation_configs.models import (
9 KEY_BINDINGS_SCHEMA,
10 OVERLAY_SEGMENTS_SCHEMA,
11 WorkstationConfig,
12 )
13
14 GENERAL_FIELDS = (
15 "title",
16 "description",
17 "image_context",
18 "window_presets",
19 "default_window_preset",
20 "default_slab_thickness_mm",
21 "default_slab_render_method",
22 "default_orientation",
23 "default_image_interpolation",
24 "default_limit_view_area_to_image_volume",
25 "default_overlay_alpha",
26 "ghosting_slice_depth",
27 "overlay_luts",
28 "default_overlay_lut",
29 "default_overlay_interpolation",
30 "overlay_segments",
31 "key_bindings",
32 "default_zoom_scale",
33 "default_brush_size",
34 "default_annotation_color",
35 "default_annotation_line_width",
36 "auto_jump_center_of_gravity",
37 "point_bounding_box_size_mm",
38 )
39 PLUGIN_FIELDS = (
40 "show_image_info_plugin",
41 "show_display_plugin",
42 "show_image_switcher_plugin",
43 "show_algorithm_output_plugin",
44 "show_overlay_plugin",
45 "show_annotation_statistics_plugin",
46 "show_swivel_tool",
47 "show_invert_tool",
48 "show_flip_tool",
49 "show_window_level_tool",
50 "show_reset_tool",
51 "show_overlay_selection_tool",
52 "show_lut_selection_tool",
53 "show_annotation_counter_tool",
54 "enable_contrast_enhancement",
55 )
56 LINKED_FIELDS = (
57 "link_images",
58 "link_panning",
59 "link_zooming",
60 "link_slicing",
61 "link_orienting",
62 "link_windowing",
63 "link_inverting",
64 "link_flipping",
65 )
66
67
68 class WorkstationConfigForm(SaveFormInitMixin, ModelForm):
69 def __init__(self, *args, read_only=False, **kwargs):
70 super().__init__(*args, **kwargs)
71
72 self.helper = FormHelper(self)
73 self.helper.layout = Layout(
74 Fieldset("", *GENERAL_FIELDS),
75 Fieldset(
76 "Plugins and Tools",
77 *PLUGIN_FIELDS,
78 css_class="border rounded px-2 my-4",
79 ),
80 Fieldset(
81 "Linking Configuration",
82 *PLUGIN_FIELDS,
83 css_class="border rounded px-2 my-4",
84 ),
85 )
86
87 if read_only:
88 for field in self.fields:
89 self.fields[field].disabled = True
90 else:
91 self.helper.layout.append(Submit("save", "Save"))
92
93 class Meta:
94 model = WorkstationConfig
95 fields = (
96 *GENERAL_FIELDS,
97 *PLUGIN_FIELDS,
98 *LINKED_FIELDS,
99 )
100
101 widgets = {
102 "overlay_segments": JSONEditorWidget(
103 schema=OVERLAY_SEGMENTS_SCHEMA
104 ),
105 "key_bindings": JSONEditorWidget(schema=KEY_BINDINGS_SCHEMA),
106 "default_annotation_color": ColorEditorWidget(format="hex"),
107 "window_presets": Select2MultipleWidget,
108 "overlay_luts": Select2MultipleWidget,
109 }
110 help_texts = {
111 "overlay_segments": (
112 model._meta.get_field("overlay_segments").help_text
113 + ". If an categorical overlay is shown, it is possible to show toggles "
114 "to change the visibility of the different overlay categories. To do "
115 "so, configure the categories that should be displayed. Data from the"
116 " algorithm's output.json can be added as an extra label to each "
117 "toggle using jinja templating. "
118 'For example: [{ "voxel_value": 0, "name": "Level 0", "visible": '
119 'false, "metric_template": "{{metrics.volumes[0]}} mm³"},]'
120 ),
121 "key_bindings": model._meta.get_field("key_bindings").help_text
122 + ". A copy and paste JSON can be obtained from the viewer.",
123 }
124
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/app/grandchallenge/workstation_configs/forms.py b/app/grandchallenge/workstation_configs/forms.py
--- a/app/grandchallenge/workstation_configs/forms.py
+++ b/app/grandchallenge/workstation_configs/forms.py
@@ -79,7 +79,7 @@
),
Fieldset(
"Linking Configuration",
- *PLUGIN_FIELDS,
+ *LINKED_FIELDS,
css_class="border rounded px-2 my-4",
),
)
| {"golden_diff": "diff --git a/app/grandchallenge/workstation_configs/forms.py b/app/grandchallenge/workstation_configs/forms.py\n--- a/app/grandchallenge/workstation_configs/forms.py\n+++ b/app/grandchallenge/workstation_configs/forms.py\n@@ -79,7 +79,7 @@\n ),\n Fieldset(\n \"Linking Configuration\",\n- *PLUGIN_FIELDS,\n+ *LINKED_FIELDS,\n css_class=\"border rounded px-2 my-4\",\n ),\n )\n", "issue": "Viewer configuration does not show linking options\n**Describe the bug**\r\nThe view and edit pages for viewer configurations no longer show options to set the linking configuration.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Go to https://grand-challenge.org/viewer-configurations/demo-rse/\r\n2. Scroll down to 'Linking Configuration'\r\nThe options displayed are duplicates of the 'Plugin and Tools' section.\r\n\r\n**Expected behavior**\r\nA clear and concise description of what you expected to happen.\r\n\r\n**Screenshots**\r\nIf applicable, add screenshots to help explain your problem.\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: [e.g. iOS]\r\n - Browser [e.g. chrome, safari]\r\n - Version [e.g. 22]\r\n\r\n**Smartphone (please complete the following information):**\r\n - Device: [e.g. iPhone6]\r\n - OS: [e.g. iOS8.1]\r\n - Browser [e.g. stock browser, safari]\r\n - Version [e.g. 22]\r\n\r\n**Additional context**\r\nAdd any other context about the problem here.\r\n\n", "before_files": [{"content": "from crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Fieldset, Layout, Submit\nfrom django.forms import ModelForm\nfrom django_select2.forms import Select2MultipleWidget\n\nfrom grandchallenge.core.forms import SaveFormInitMixin\nfrom grandchallenge.core.widgets import ColorEditorWidget, JSONEditorWidget\nfrom grandchallenge.workstation_configs.models import (\n KEY_BINDINGS_SCHEMA,\n OVERLAY_SEGMENTS_SCHEMA,\n WorkstationConfig,\n)\n\nGENERAL_FIELDS = (\n \"title\",\n \"description\",\n \"image_context\",\n \"window_presets\",\n \"default_window_preset\",\n \"default_slab_thickness_mm\",\n \"default_slab_render_method\",\n \"default_orientation\",\n \"default_image_interpolation\",\n \"default_limit_view_area_to_image_volume\",\n \"default_overlay_alpha\",\n \"ghosting_slice_depth\",\n \"overlay_luts\",\n \"default_overlay_lut\",\n \"default_overlay_interpolation\",\n \"overlay_segments\",\n \"key_bindings\",\n \"default_zoom_scale\",\n \"default_brush_size\",\n \"default_annotation_color\",\n \"default_annotation_line_width\",\n \"auto_jump_center_of_gravity\",\n \"point_bounding_box_size_mm\",\n)\nPLUGIN_FIELDS = (\n \"show_image_info_plugin\",\n \"show_display_plugin\",\n \"show_image_switcher_plugin\",\n \"show_algorithm_output_plugin\",\n \"show_overlay_plugin\",\n \"show_annotation_statistics_plugin\",\n \"show_swivel_tool\",\n \"show_invert_tool\",\n \"show_flip_tool\",\n \"show_window_level_tool\",\n \"show_reset_tool\",\n \"show_overlay_selection_tool\",\n \"show_lut_selection_tool\",\n \"show_annotation_counter_tool\",\n \"enable_contrast_enhancement\",\n)\nLINKED_FIELDS = (\n \"link_images\",\n \"link_panning\",\n \"link_zooming\",\n \"link_slicing\",\n \"link_orienting\",\n \"link_windowing\",\n \"link_inverting\",\n \"link_flipping\",\n)\n\n\nclass WorkstationConfigForm(SaveFormInitMixin, ModelForm):\n def __init__(self, *args, read_only=False, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.helper = FormHelper(self)\n self.helper.layout = Layout(\n Fieldset(\"\", *GENERAL_FIELDS),\n Fieldset(\n \"Plugins and Tools\",\n *PLUGIN_FIELDS,\n css_class=\"border rounded px-2 my-4\",\n ),\n Fieldset(\n \"Linking Configuration\",\n *PLUGIN_FIELDS,\n css_class=\"border rounded px-2 my-4\",\n ),\n )\n\n if read_only:\n for field in self.fields:\n self.fields[field].disabled = True\n else:\n self.helper.layout.append(Submit(\"save\", \"Save\"))\n\n class Meta:\n model = WorkstationConfig\n fields = (\n *GENERAL_FIELDS,\n *PLUGIN_FIELDS,\n *LINKED_FIELDS,\n )\n\n widgets = {\n \"overlay_segments\": JSONEditorWidget(\n schema=OVERLAY_SEGMENTS_SCHEMA\n ),\n \"key_bindings\": JSONEditorWidget(schema=KEY_BINDINGS_SCHEMA),\n \"default_annotation_color\": ColorEditorWidget(format=\"hex\"),\n \"window_presets\": Select2MultipleWidget,\n \"overlay_luts\": Select2MultipleWidget,\n }\n help_texts = {\n \"overlay_segments\": (\n model._meta.get_field(\"overlay_segments\").help_text\n + \". If an categorical overlay is shown, it is possible to show toggles \"\n \"to change the visibility of the different overlay categories. To do \"\n \"so, configure the categories that should be displayed. Data from the\"\n \" algorithm's output.json can be added as an extra label to each \"\n \"toggle using jinja templating. \"\n 'For example: [{ \"voxel_value\": 0, \"name\": \"Level 0\", \"visible\": '\n 'false, \"metric_template\": \"{{metrics.volumes[0]}} mm\u00b3\"},]'\n ),\n \"key_bindings\": model._meta.get_field(\"key_bindings\").help_text\n + \". A copy and paste JSON can be obtained from the viewer.\",\n }\n", "path": "app/grandchallenge/workstation_configs/forms.py"}], "after_files": [{"content": "from crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Fieldset, Layout, Submit\nfrom django.forms import ModelForm\nfrom django_select2.forms import Select2MultipleWidget\n\nfrom grandchallenge.core.forms import SaveFormInitMixin\nfrom grandchallenge.core.widgets import ColorEditorWidget, JSONEditorWidget\nfrom grandchallenge.workstation_configs.models import (\n KEY_BINDINGS_SCHEMA,\n OVERLAY_SEGMENTS_SCHEMA,\n WorkstationConfig,\n)\n\nGENERAL_FIELDS = (\n \"title\",\n \"description\",\n \"image_context\",\n \"window_presets\",\n \"default_window_preset\",\n \"default_slab_thickness_mm\",\n \"default_slab_render_method\",\n \"default_orientation\",\n \"default_image_interpolation\",\n \"default_limit_view_area_to_image_volume\",\n \"default_overlay_alpha\",\n \"ghosting_slice_depth\",\n \"overlay_luts\",\n \"default_overlay_lut\",\n \"default_overlay_interpolation\",\n \"overlay_segments\",\n \"key_bindings\",\n \"default_zoom_scale\",\n \"default_brush_size\",\n \"default_annotation_color\",\n \"default_annotation_line_width\",\n \"auto_jump_center_of_gravity\",\n \"point_bounding_box_size_mm\",\n)\nPLUGIN_FIELDS = (\n \"show_image_info_plugin\",\n \"show_display_plugin\",\n \"show_image_switcher_plugin\",\n \"show_algorithm_output_plugin\",\n \"show_overlay_plugin\",\n \"show_annotation_statistics_plugin\",\n \"show_swivel_tool\",\n \"show_invert_tool\",\n \"show_flip_tool\",\n \"show_window_level_tool\",\n \"show_reset_tool\",\n \"show_overlay_selection_tool\",\n \"show_lut_selection_tool\",\n \"show_annotation_counter_tool\",\n \"enable_contrast_enhancement\",\n)\nLINKED_FIELDS = (\n \"link_images\",\n \"link_panning\",\n \"link_zooming\",\n \"link_slicing\",\n \"link_orienting\",\n \"link_windowing\",\n \"link_inverting\",\n \"link_flipping\",\n)\n\n\nclass WorkstationConfigForm(SaveFormInitMixin, ModelForm):\n def __init__(self, *args, read_only=False, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.helper = FormHelper(self)\n self.helper.layout = Layout(\n Fieldset(\"\", *GENERAL_FIELDS),\n Fieldset(\n \"Plugins and Tools\",\n *PLUGIN_FIELDS,\n css_class=\"border rounded px-2 my-4\",\n ),\n Fieldset(\n \"Linking Configuration\",\n *LINKED_FIELDS,\n css_class=\"border rounded px-2 my-4\",\n ),\n )\n\n if read_only:\n for field in self.fields:\n self.fields[field].disabled = True\n else:\n self.helper.layout.append(Submit(\"save\", \"Save\"))\n\n class Meta:\n model = WorkstationConfig\n fields = (\n *GENERAL_FIELDS,\n *PLUGIN_FIELDS,\n *LINKED_FIELDS,\n )\n\n widgets = {\n \"overlay_segments\": JSONEditorWidget(\n schema=OVERLAY_SEGMENTS_SCHEMA\n ),\n \"key_bindings\": JSONEditorWidget(schema=KEY_BINDINGS_SCHEMA),\n \"default_annotation_color\": ColorEditorWidget(format=\"hex\"),\n \"window_presets\": Select2MultipleWidget,\n \"overlay_luts\": Select2MultipleWidget,\n }\n help_texts = {\n \"overlay_segments\": (\n model._meta.get_field(\"overlay_segments\").help_text\n + \". If an categorical overlay is shown, it is possible to show toggles \"\n \"to change the visibility of the different overlay categories. To do \"\n \"so, configure the categories that should be displayed. Data from the\"\n \" algorithm's output.json can be added as an extra label to each \"\n \"toggle using jinja templating. \"\n 'For example: [{ \"voxel_value\": 0, \"name\": \"Level 0\", \"visible\": '\n 'false, \"metric_template\": \"{{metrics.volumes[0]}} mm\u00b3\"},]'\n ),\n \"key_bindings\": model._meta.get_field(\"key_bindings\").help_text\n + \". A copy and paste JSON can be obtained from the viewer.\",\n }\n", "path": "app/grandchallenge/workstation_configs/forms.py"}]} | 1,621 | 102 |
gh_patches_debug_6050 | rasdani/github-patches | git_diff | learningequality__kolibri-3563 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Very long, CPU-intensive process after asking to import selected Khan Academy resources
### Observed behavior
Need more info on this, but this is the best I can produce right now.. observe the 566m of CPU usage:
```
top - 01:39:58 up 2 days, 11:30, 1 user, load average: 2.10, 2.17, 2.17
Tasks: 287 total, 1 running, 286 sleeping, 0 stopped, 0 zombie
%Cpu(s): 45.6 us, 12.9 sy, 0.0 ni, 41.5 id, 0.0 wa, 0.0 hi, 0.0 si, 0.0 st
KiB Mem : 8054696 total, 904492 free, 5504828 used, 1645376 buff/cache
KiB Swap: 16381948 total, 14482384 free, 1899564 used. 1499508 avail Mem
PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
15123 kolibri 20 0 3474360 1.112g 10440 S 201.0 14.5 566:35.82 kolibri
```
### Steps to reproduce
Not sure, but I have asked to download a select sub-part of the KA resources, and my laptop is stuck for hours.
### Context
Kolibri 0.9.0, debian installation source, Ubuntu 16.04
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kolibri/content/management/commands/importcontent.py`
Content:
```
1 import logging as logger
2 import os
3
4 from django.conf import settings
5 from django.core.management.base import CommandError
6 from requests.exceptions import HTTPError
7
8 from ...utils import annotation
9 from ...utils import import_export_content
10 from ...utils import paths
11 from ...utils import transfer
12 from kolibri.tasks.management.commands.base import AsyncCommand
13
14 # constants to specify the transfer method to be used
15 DOWNLOAD_METHOD = "download"
16 COPY_METHOD = "copy"
17
18 logging = logger.getLogger(__name__)
19
20
21 class Command(AsyncCommand):
22
23 def add_arguments(self, parser):
24 # let's save the parser in case we need to print a help statement
25 self._parser = parser
26
27 # we want two groups of arguments. One group is when the
28 # 'importcontent disk' command is given, where we'll expect a file
29 # directory to be given. Another is the 'importcontent network'
30 # command to be given, where we'll expect a channel.
31
32 # However, some optional arguments apply to both groups. Add them here!
33 node_ids_help_text = """
34 Specify one or more node IDs to import. Only the files associated to those node IDs will be imported.
35
36 e.g.
37
38 kolibri manage importcontent --node_ids <id1>,<id2>, [<ids>,...] {network, disk} <channel id>
39 """
40 parser.add_argument(
41 "--node_ids", "-n",
42 # Split the comma separated string we get, into a list of strings
43 type=lambda x: x.split(","),
44 default=[],
45 required=False,
46 dest="node_ids",
47 help=node_ids_help_text,
48 )
49
50 exclude_node_ids_help_text = """
51 Specify one or more node IDs to exclude. Files associated to those node IDs will be not be imported.
52
53 e.g.
54
55 kolibri manage importcontent --exclude_node_ids <id1>,<id2>, [<ids>,...] {network, disk} <channel id>
56 """
57 parser.add_argument(
58 "--exclude_node_ids",
59 # Split the comma separated string we get, into a list of string
60 type=lambda x: x.split(","),
61 default=[],
62 required=False,
63 dest="exclude_node_ids",
64 help=exclude_node_ids_help_text
65 )
66
67 parser.add_argument(
68 "--include-unrenderable-content",
69 action='store_false',
70 default=True,
71 dest="renderable_only",
72 help="Import all content, not just that which this Kolibri instance can render"
73 )
74
75 # to implement these two groups of commands and their corresponding
76 # arguments, we'll need argparse.subparsers.
77 subparsers = parser.add_subparsers(dest='command', help="The following subcommands are available.")
78
79 # the network command has a channel id required positional argument,
80 # and some optional content_id arguments.
81
82 # TODO: implement a --content-domain parameter, for optionally
83 # specifying the domain for the curation server.
84
85 # Note: cmd should be the management command instance, as though the
86 # interface for adding arguments is argparse, Django overrides the
87 # parser object with its own thing, hence why we need to add cmd. See
88 # http://stackoverflow.com/questions/36706220/is-it-possible-to-create-subparsers-in-a-django-management-command
89 network_subparser = subparsers.add_parser(
90 name='network',
91 cmd=self,
92 help="Download the given channel through the network.",
93 )
94 network_subparser.add_argument('channel_id', type=str)
95
96 default_studio_url = settings.CENTRAL_CONTENT_DOWNLOAD_BASE_URL
97 network_subparser.add_argument(
98 "--baseurl",
99 type=str,
100 default=default_studio_url,
101 dest="baseurl",
102 )
103
104 disk_subparser = subparsers.add_parser(
105 name='disk',
106 cmd=self,
107 help='Copy the content from the given folder.'
108 )
109 disk_subparser.add_argument('channel_id', type=str)
110 disk_subparser.add_argument('directory', type=str)
111
112 def download_content(self, channel_id, node_ids=None, exclude_node_ids=None, baseurl=None, renderable_only=True):
113 self._transfer(DOWNLOAD_METHOD, channel_id, node_ids=node_ids, exclude_node_ids=exclude_node_ids, baseurl=baseurl, renderable_only=renderable_only)
114
115 def copy_content(self, channel_id, path, node_ids=None, exclude_node_ids=None, renderable_only=True):
116 self._transfer(COPY_METHOD, channel_id, path=path, node_ids=node_ids, exclude_node_ids=exclude_node_ids, renderable_only=renderable_only)
117
118 def _transfer(self, method, channel_id, path=None, node_ids=None, exclude_node_ids=None, baseurl=None, renderable_only=True): # noqa: max-complexity=16
119
120 files_to_download, total_bytes_to_transfer = import_export_content.get_files_to_transfer(
121 channel_id, node_ids, exclude_node_ids, False, renderable_only=renderable_only)
122
123 number_of_skipped_files = 0
124 file_checksums_to_annotate = []
125
126 with self.start_progress(total=total_bytes_to_transfer) as overall_progress_update:
127
128 for f in files_to_download:
129
130 if self.is_cancelled():
131 break
132
133 filename = f.get_filename()
134 dest = paths.get_content_storage_file_path(filename)
135
136 # if the file already exists, add its size to our overall progress, and skip
137 if os.path.isfile(dest) and os.path.getsize(dest) == f.file_size:
138 overall_progress_update(f.file_size)
139 file_checksums_to_annotate.append(f.id)
140 continue
141
142 # determine where we're downloading/copying from, and create appropriate transfer object
143 if method == DOWNLOAD_METHOD:
144 url = paths.get_content_storage_remote_url(filename, baseurl=baseurl)
145 filetransfer = transfer.FileDownload(url, dest)
146 elif method == COPY_METHOD:
147 srcpath = paths.get_content_storage_file_path(filename, datafolder=path)
148 filetransfer = transfer.FileCopy(srcpath, dest)
149
150 try:
151
152 with filetransfer:
153
154 with self.start_progress(total=filetransfer.total_size) as file_dl_progress_update:
155
156 for chunk in filetransfer:
157 if self.is_cancelled():
158 filetransfer.cancel()
159 break
160 length = len(chunk)
161 overall_progress_update(length)
162 file_dl_progress_update(length)
163
164 file_checksums_to_annotate.append(f.id)
165
166 except HTTPError:
167 overall_progress_update(f.file_size)
168
169 except OSError:
170 number_of_skipped_files += 1
171 overall_progress_update(f.file_size)
172
173 annotation.set_availability(channel_id, file_checksums_to_annotate)
174
175 if number_of_skipped_files > 0:
176 logging.warning(
177 "{} files are skipped, because they are not found in the given external drive.".format(
178 number_of_skipped_files))
179
180 if self.is_cancelled():
181 self.cancel()
182
183 def handle_async(self, *args, **options):
184 if options['command'] == 'network':
185 self.download_content(options["channel_id"],
186 node_ids=options["node_ids"],
187 exclude_node_ids=options['exclude_node_ids'],
188 baseurl=options["baseurl"],
189 renderable_only=options["renderable_only"])
190 elif options['command'] == 'disk':
191 self.copy_content(options["channel_id"],
192 options["directory"],
193 node_ids=options["node_ids"],
194 exclude_node_ids=options["exclude_node_ids"],
195 renderable_only=options["renderable_only"])
196 else:
197 self._parser.print_help()
198 raise CommandError("Please give a valid subcommand. You gave: {}".format(options["command"]))
199
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kolibri/content/management/commands/importcontent.py b/kolibri/content/management/commands/importcontent.py
--- a/kolibri/content/management/commands/importcontent.py
+++ b/kolibri/content/management/commands/importcontent.py
@@ -67,7 +67,7 @@
parser.add_argument(
"--include-unrenderable-content",
action='store_false',
- default=True,
+ default=False,
dest="renderable_only",
help="Import all content, not just that which this Kolibri instance can render"
)
| {"golden_diff": "diff --git a/kolibri/content/management/commands/importcontent.py b/kolibri/content/management/commands/importcontent.py\n--- a/kolibri/content/management/commands/importcontent.py\n+++ b/kolibri/content/management/commands/importcontent.py\n@@ -67,7 +67,7 @@\n parser.add_argument(\n \"--include-unrenderable-content\",\n action='store_false',\n- default=True,\n+ default=False,\n dest=\"renderable_only\",\n help=\"Import all content, not just that which this Kolibri instance can render\"\n )\n", "issue": "Very long, CPU-intensive process after asking to import selected Khan Academy resources\n### Observed behavior\r\n\r\nNeed more info on this, but this is the best I can produce right now.. observe the 566m of CPU usage:\r\n\r\n```\r\ntop - 01:39:58 up 2 days, 11:30, 1 user, load average: 2.10, 2.17, 2.17\r\nTasks: 287 total, 1 running, 286 sleeping, 0 stopped, 0 zombie\r\n%Cpu(s): 45.6 us, 12.9 sy, 0.0 ni, 41.5 id, 0.0 wa, 0.0 hi, 0.0 si, 0.0 st\r\nKiB Mem : 8054696 total, 904492 free, 5504828 used, 1645376 buff/cache\r\nKiB Swap: 16381948 total, 14482384 free, 1899564 used. 1499508 avail Mem \r\n\r\n PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND \r\n15123 kolibri 20 0 3474360 1.112g 10440 S 201.0 14.5 566:35.82 kolibri \r\n```\r\n\r\n### Steps to reproduce\r\n\r\nNot sure, but I have asked to download a select sub-part of the KA resources, and my laptop is stuck for hours.\r\n\r\n\r\n### Context\r\n\r\nKolibri 0.9.0, debian installation source, Ubuntu 16.04\r\n\n", "before_files": [{"content": "import logging as logger\nimport os\n\nfrom django.conf import settings\nfrom django.core.management.base import CommandError\nfrom requests.exceptions import HTTPError\n\nfrom ...utils import annotation\nfrom ...utils import import_export_content\nfrom ...utils import paths\nfrom ...utils import transfer\nfrom kolibri.tasks.management.commands.base import AsyncCommand\n\n# constants to specify the transfer method to be used\nDOWNLOAD_METHOD = \"download\"\nCOPY_METHOD = \"copy\"\n\nlogging = logger.getLogger(__name__)\n\n\nclass Command(AsyncCommand):\n\n def add_arguments(self, parser):\n # let's save the parser in case we need to print a help statement\n self._parser = parser\n\n # we want two groups of arguments. One group is when the\n # 'importcontent disk' command is given, where we'll expect a file\n # directory to be given. Another is the 'importcontent network'\n # command to be given, where we'll expect a channel.\n\n # However, some optional arguments apply to both groups. Add them here!\n node_ids_help_text = \"\"\"\n Specify one or more node IDs to import. Only the files associated to those node IDs will be imported.\n\n e.g.\n\n kolibri manage importcontent --node_ids <id1>,<id2>, [<ids>,...] {network, disk} <channel id>\n \"\"\"\n parser.add_argument(\n \"--node_ids\", \"-n\",\n # Split the comma separated string we get, into a list of strings\n type=lambda x: x.split(\",\"),\n default=[],\n required=False,\n dest=\"node_ids\",\n help=node_ids_help_text,\n )\n\n exclude_node_ids_help_text = \"\"\"\n Specify one or more node IDs to exclude. Files associated to those node IDs will be not be imported.\n\n e.g.\n\n kolibri manage importcontent --exclude_node_ids <id1>,<id2>, [<ids>,...] {network, disk} <channel id>\n \"\"\"\n parser.add_argument(\n \"--exclude_node_ids\",\n # Split the comma separated string we get, into a list of string\n type=lambda x: x.split(\",\"),\n default=[],\n required=False,\n dest=\"exclude_node_ids\",\n help=exclude_node_ids_help_text\n )\n\n parser.add_argument(\n \"--include-unrenderable-content\",\n action='store_false',\n default=True,\n dest=\"renderable_only\",\n help=\"Import all content, not just that which this Kolibri instance can render\"\n )\n\n # to implement these two groups of commands and their corresponding\n # arguments, we'll need argparse.subparsers.\n subparsers = parser.add_subparsers(dest='command', help=\"The following subcommands are available.\")\n\n # the network command has a channel id required positional argument,\n # and some optional content_id arguments.\n\n # TODO: implement a --content-domain parameter, for optionally\n # specifying the domain for the curation server.\n\n # Note: cmd should be the management command instance, as though the\n # interface for adding arguments is argparse, Django overrides the\n # parser object with its own thing, hence why we need to add cmd. See\n # http://stackoverflow.com/questions/36706220/is-it-possible-to-create-subparsers-in-a-django-management-command\n network_subparser = subparsers.add_parser(\n name='network',\n cmd=self,\n help=\"Download the given channel through the network.\",\n )\n network_subparser.add_argument('channel_id', type=str)\n\n default_studio_url = settings.CENTRAL_CONTENT_DOWNLOAD_BASE_URL\n network_subparser.add_argument(\n \"--baseurl\",\n type=str,\n default=default_studio_url,\n dest=\"baseurl\",\n )\n\n disk_subparser = subparsers.add_parser(\n name='disk',\n cmd=self,\n help='Copy the content from the given folder.'\n )\n disk_subparser.add_argument('channel_id', type=str)\n disk_subparser.add_argument('directory', type=str)\n\n def download_content(self, channel_id, node_ids=None, exclude_node_ids=None, baseurl=None, renderable_only=True):\n self._transfer(DOWNLOAD_METHOD, channel_id, node_ids=node_ids, exclude_node_ids=exclude_node_ids, baseurl=baseurl, renderable_only=renderable_only)\n\n def copy_content(self, channel_id, path, node_ids=None, exclude_node_ids=None, renderable_only=True):\n self._transfer(COPY_METHOD, channel_id, path=path, node_ids=node_ids, exclude_node_ids=exclude_node_ids, renderable_only=renderable_only)\n\n def _transfer(self, method, channel_id, path=None, node_ids=None, exclude_node_ids=None, baseurl=None, renderable_only=True): # noqa: max-complexity=16\n\n files_to_download, total_bytes_to_transfer = import_export_content.get_files_to_transfer(\n channel_id, node_ids, exclude_node_ids, False, renderable_only=renderable_only)\n\n number_of_skipped_files = 0\n file_checksums_to_annotate = []\n\n with self.start_progress(total=total_bytes_to_transfer) as overall_progress_update:\n\n for f in files_to_download:\n\n if self.is_cancelled():\n break\n\n filename = f.get_filename()\n dest = paths.get_content_storage_file_path(filename)\n\n # if the file already exists, add its size to our overall progress, and skip\n if os.path.isfile(dest) and os.path.getsize(dest) == f.file_size:\n overall_progress_update(f.file_size)\n file_checksums_to_annotate.append(f.id)\n continue\n\n # determine where we're downloading/copying from, and create appropriate transfer object\n if method == DOWNLOAD_METHOD:\n url = paths.get_content_storage_remote_url(filename, baseurl=baseurl)\n filetransfer = transfer.FileDownload(url, dest)\n elif method == COPY_METHOD:\n srcpath = paths.get_content_storage_file_path(filename, datafolder=path)\n filetransfer = transfer.FileCopy(srcpath, dest)\n\n try:\n\n with filetransfer:\n\n with self.start_progress(total=filetransfer.total_size) as file_dl_progress_update:\n\n for chunk in filetransfer:\n if self.is_cancelled():\n filetransfer.cancel()\n break\n length = len(chunk)\n overall_progress_update(length)\n file_dl_progress_update(length)\n\n file_checksums_to_annotate.append(f.id)\n\n except HTTPError:\n overall_progress_update(f.file_size)\n\n except OSError:\n number_of_skipped_files += 1\n overall_progress_update(f.file_size)\n\n annotation.set_availability(channel_id, file_checksums_to_annotate)\n\n if number_of_skipped_files > 0:\n logging.warning(\n \"{} files are skipped, because they are not found in the given external drive.\".format(\n number_of_skipped_files))\n\n if self.is_cancelled():\n self.cancel()\n\n def handle_async(self, *args, **options):\n if options['command'] == 'network':\n self.download_content(options[\"channel_id\"],\n node_ids=options[\"node_ids\"],\n exclude_node_ids=options['exclude_node_ids'],\n baseurl=options[\"baseurl\"],\n renderable_only=options[\"renderable_only\"])\n elif options['command'] == 'disk':\n self.copy_content(options[\"channel_id\"],\n options[\"directory\"],\n node_ids=options[\"node_ids\"],\n exclude_node_ids=options[\"exclude_node_ids\"],\n renderable_only=options[\"renderable_only\"])\n else:\n self._parser.print_help()\n raise CommandError(\"Please give a valid subcommand. You gave: {}\".format(options[\"command\"]))\n", "path": "kolibri/content/management/commands/importcontent.py"}], "after_files": [{"content": "import logging as logger\nimport os\n\nfrom django.conf import settings\nfrom django.core.management.base import CommandError\nfrom requests.exceptions import HTTPError\n\nfrom ...utils import annotation\nfrom ...utils import import_export_content\nfrom ...utils import paths\nfrom ...utils import transfer\nfrom kolibri.tasks.management.commands.base import AsyncCommand\n\n# constants to specify the transfer method to be used\nDOWNLOAD_METHOD = \"download\"\nCOPY_METHOD = \"copy\"\n\nlogging = logger.getLogger(__name__)\n\n\nclass Command(AsyncCommand):\n\n def add_arguments(self, parser):\n # let's save the parser in case we need to print a help statement\n self._parser = parser\n\n # we want two groups of arguments. One group is when the\n # 'importcontent disk' command is given, where we'll expect a file\n # directory to be given. Another is the 'importcontent network'\n # command to be given, where we'll expect a channel.\n\n # However, some optional arguments apply to both groups. Add them here!\n node_ids_help_text = \"\"\"\n Specify one or more node IDs to import. Only the files associated to those node IDs will be imported.\n\n e.g.\n\n kolibri manage importcontent --node_ids <id1>,<id2>, [<ids>,...] {network, disk} <channel id>\n \"\"\"\n parser.add_argument(\n \"--node_ids\", \"-n\",\n # Split the comma separated string we get, into a list of strings\n type=lambda x: x.split(\",\"),\n default=[],\n required=False,\n dest=\"node_ids\",\n help=node_ids_help_text,\n )\n\n exclude_node_ids_help_text = \"\"\"\n Specify one or more node IDs to exclude. Files associated to those node IDs will be not be imported.\n\n e.g.\n\n kolibri manage importcontent --exclude_node_ids <id1>,<id2>, [<ids>,...] {network, disk} <channel id>\n \"\"\"\n parser.add_argument(\n \"--exclude_node_ids\",\n # Split the comma separated string we get, into a list of string\n type=lambda x: x.split(\",\"),\n default=[],\n required=False,\n dest=\"exclude_node_ids\",\n help=exclude_node_ids_help_text\n )\n\n parser.add_argument(\n \"--include-unrenderable-content\",\n action='store_false',\n default=False,\n dest=\"renderable_only\",\n help=\"Import all content, not just that which this Kolibri instance can render\"\n )\n\n # to implement these two groups of commands and their corresponding\n # arguments, we'll need argparse.subparsers.\n subparsers = parser.add_subparsers(dest='command', help=\"The following subcommands are available.\")\n\n # the network command has a channel id required positional argument,\n # and some optional content_id arguments.\n\n # TODO: implement a --content-domain parameter, for optionally\n # specifying the domain for the curation server.\n\n # Note: cmd should be the management command instance, as though the\n # interface for adding arguments is argparse, Django overrides the\n # parser object with its own thing, hence why we need to add cmd. See\n # http://stackoverflow.com/questions/36706220/is-it-possible-to-create-subparsers-in-a-django-management-command\n network_subparser = subparsers.add_parser(\n name='network',\n cmd=self,\n help=\"Download the given channel through the network.\",\n )\n network_subparser.add_argument('channel_id', type=str)\n\n default_studio_url = settings.CENTRAL_CONTENT_DOWNLOAD_BASE_URL\n network_subparser.add_argument(\n \"--baseurl\",\n type=str,\n default=default_studio_url,\n dest=\"baseurl\",\n )\n\n disk_subparser = subparsers.add_parser(\n name='disk',\n cmd=self,\n help='Copy the content from the given folder.'\n )\n disk_subparser.add_argument('channel_id', type=str)\n disk_subparser.add_argument('directory', type=str)\n\n def download_content(self, channel_id, node_ids=None, exclude_node_ids=None, baseurl=None, renderable_only=True):\n self._transfer(DOWNLOAD_METHOD, channel_id, node_ids=node_ids, exclude_node_ids=exclude_node_ids, baseurl=baseurl, renderable_only=renderable_only)\n\n def copy_content(self, channel_id, path, node_ids=None, exclude_node_ids=None, renderable_only=True):\n self._transfer(COPY_METHOD, channel_id, path=path, node_ids=node_ids, exclude_node_ids=exclude_node_ids, renderable_only=renderable_only)\n\n def _transfer(self, method, channel_id, path=None, node_ids=None, exclude_node_ids=None, baseurl=None, renderable_only=True): # noqa: max-complexity=16\n\n files_to_download, total_bytes_to_transfer = import_export_content.get_files_to_transfer(\n channel_id, node_ids, exclude_node_ids, False, renderable_only=renderable_only)\n\n number_of_skipped_files = 0\n file_checksums_to_annotate = []\n\n with self.start_progress(total=total_bytes_to_transfer) as overall_progress_update:\n\n for f in files_to_download:\n\n if self.is_cancelled():\n break\n\n filename = f.get_filename()\n dest = paths.get_content_storage_file_path(filename)\n\n # if the file already exists, add its size to our overall progress, and skip\n if os.path.isfile(dest) and os.path.getsize(dest) == f.file_size:\n overall_progress_update(f.file_size)\n file_checksums_to_annotate.append(f.id)\n continue\n\n # determine where we're downloading/copying from, and create appropriate transfer object\n if method == DOWNLOAD_METHOD:\n url = paths.get_content_storage_remote_url(filename, baseurl=baseurl)\n filetransfer = transfer.FileDownload(url, dest)\n elif method == COPY_METHOD:\n srcpath = paths.get_content_storage_file_path(filename, datafolder=path)\n filetransfer = transfer.FileCopy(srcpath, dest)\n\n try:\n\n with filetransfer:\n\n with self.start_progress(total=filetransfer.total_size) as file_dl_progress_update:\n\n for chunk in filetransfer:\n if self.is_cancelled():\n filetransfer.cancel()\n break\n length = len(chunk)\n overall_progress_update(length)\n file_dl_progress_update(length)\n\n file_checksums_to_annotate.append(f.id)\n\n except HTTPError:\n overall_progress_update(f.file_size)\n\n except OSError:\n number_of_skipped_files += 1\n overall_progress_update(f.file_size)\n\n annotation.set_availability(channel_id, file_checksums_to_annotate)\n\n if number_of_skipped_files > 0:\n logging.warning(\n \"{} files are skipped, because they are not found in the given external drive.\".format(\n number_of_skipped_files))\n\n if self.is_cancelled():\n self.cancel()\n\n def handle_async(self, *args, **options):\n if options['command'] == 'network':\n self.download_content(options[\"channel_id\"],\n node_ids=options[\"node_ids\"],\n exclude_node_ids=options['exclude_node_ids'],\n baseurl=options[\"baseurl\"],\n renderable_only=options[\"renderable_only\"])\n elif options['command'] == 'disk':\n self.copy_content(options[\"channel_id\"],\n options[\"directory\"],\n node_ids=options[\"node_ids\"],\n exclude_node_ids=options[\"exclude_node_ids\"],\n renderable_only=options[\"renderable_only\"])\n else:\n self._parser.print_help()\n raise CommandError(\"Please give a valid subcommand. You gave: {}\".format(options[\"command\"]))\n", "path": "kolibri/content/management/commands/importcontent.py"}]} | 2,825 | 124 |
gh_patches_debug_36194 | rasdani/github-patches | git_diff | rlworkgroup__garage-625 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
FireReset is firing warnings in the CI
from recent tests runs
```sh
UserWarning: WARN: <class 'garage.envs.wrappers.fire_reset.FireReset'> doesn't implement 'step' method, which is required for wrappers derived directly from Wrapper. Deprecated default implementation is used.
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `garage/envs/wrappers/noop.py`
Content:
```
1 """Noop wrapper for gym.Env."""
2 import gym
3 import numpy as np
4
5
6 class Noop(gym.Wrapper):
7 """
8 Noop wrapper for gym.Env.
9
10 It samples initial states by taking random number of no-ops on reset.
11 No-op is assumed to be action 0.
12
13 Args:
14 env: The environment to be wrapped.
15 noop_max: Maximum number no-op to be performed on reset.
16 """
17
18 def __init__(self, env, noop_max=30):
19 super().__init__(env)
20 self._noop_max = noop_max
21 self._noop_action = 0
22 assert noop_max > 0, "noop_max should be larger than 0!"
23 assert env.unwrapped.get_action_meanings()[0] == 'NOOP', (
24 "No-op should be the 0-th action but it's not in {}!".format(env))
25
26 def reset(self, **kwargs):
27 """gym.Env reset function."""
28 obs = self.env.reset(**kwargs)
29 noops = np.random.randint(1, self._noop_max + 1)
30 for _ in range(noops):
31 obs, _, done, _ = self.env.step(self._noop_action)
32 if done:
33 obs = self.env.reset(**kwargs)
34 return obs
35
```
Path: `garage/envs/wrappers/fire_reset.py`
Content:
```
1 """Fire reset wrapper for gym.Env."""
2 import gym
3
4
5 class FireReset(gym.Wrapper):
6 """
7 Fire reset wrapper for gym.Env.
8
9 Take action "fire" on reset.
10
11 Args:
12 env: The environment to be wrapped.
13 """
14
15 def __init__(self, env):
16 super().__init__(env)
17 assert env.unwrapped.get_action_meanings()[1] == 'FIRE', (
18 "Only use fire reset wrapper for suitable environment!")
19 assert len(env.unwrapped.get_action_meanings()) >= 3, (
20 "Only use fire reset wrapper for suitable environment!")
21
22 def reset(self, **kwargs):
23 """gym.Env reset function."""
24 self.env.reset(**kwargs)
25 obs, _, done, _ = self.env.step(1)
26 if done:
27 obs = self.env.reset(**kwargs)
28 return obs
29
```
Path: `garage/envs/wrappers/clip_reward.py`
Content:
```
1 """Clip reward for gym.Env."""
2 import gym
3 import numpy as np
4
5
6 class ClipReward(gym.Wrapper):
7 """Clip the reward by its sign."""
8
9 def step(self, ac):
10 """gym.Env step function."""
11 obs, reward, done, info = self.env.step(ac)
12 return obs, np.sign(reward), done, info
13
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/garage/envs/wrappers/clip_reward.py b/garage/envs/wrappers/clip_reward.py
--- a/garage/envs/wrappers/clip_reward.py
+++ b/garage/envs/wrappers/clip_reward.py
@@ -10,3 +10,7 @@
"""gym.Env step function."""
obs, reward, done, info = self.env.step(ac)
return obs, np.sign(reward), done, info
+
+ def reset(self):
+ """gym.Env reset."""
+ return self.env.reset()
diff --git a/garage/envs/wrappers/fire_reset.py b/garage/envs/wrappers/fire_reset.py
--- a/garage/envs/wrappers/fire_reset.py
+++ b/garage/envs/wrappers/fire_reset.py
@@ -15,9 +15,9 @@
def __init__(self, env):
super().__init__(env)
assert env.unwrapped.get_action_meanings()[1] == 'FIRE', (
- "Only use fire reset wrapper for suitable environment!")
+ 'Only use fire reset wrapper for suitable environment!')
assert len(env.unwrapped.get_action_meanings()) >= 3, (
- "Only use fire reset wrapper for suitable environment!")
+ 'Only use fire reset wrapper for suitable environment!')
def reset(self, **kwargs):
"""gym.Env reset function."""
@@ -26,3 +26,7 @@
if done:
obs = self.env.reset(**kwargs)
return obs
+
+ def step(self, action):
+ """gym.Env step function."""
+ return self.env.step(action)
diff --git a/garage/envs/wrappers/noop.py b/garage/envs/wrappers/noop.py
--- a/garage/envs/wrappers/noop.py
+++ b/garage/envs/wrappers/noop.py
@@ -19,7 +19,7 @@
super().__init__(env)
self._noop_max = noop_max
self._noop_action = 0
- assert noop_max > 0, "noop_max should be larger than 0!"
+ assert noop_max > 0, 'noop_max should be larger than 0!'
assert env.unwrapped.get_action_meanings()[0] == 'NOOP', (
"No-op should be the 0-th action but it's not in {}!".format(env))
@@ -28,7 +28,11 @@
obs = self.env.reset(**kwargs)
noops = np.random.randint(1, self._noop_max + 1)
for _ in range(noops):
- obs, _, done, _ = self.env.step(self._noop_action)
+ obs, _, done, _ = self.step(self._noop_action)
if done:
obs = self.env.reset(**kwargs)
return obs
+
+ def step(self, action):
+ """gym.Env step function."""
+ return self.env.step(action)
| {"golden_diff": "diff --git a/garage/envs/wrappers/clip_reward.py b/garage/envs/wrappers/clip_reward.py\n--- a/garage/envs/wrappers/clip_reward.py\n+++ b/garage/envs/wrappers/clip_reward.py\n@@ -10,3 +10,7 @@\n \"\"\"gym.Env step function.\"\"\"\n obs, reward, done, info = self.env.step(ac)\n return obs, np.sign(reward), done, info\n+\n+ def reset(self):\n+ \"\"\"gym.Env reset.\"\"\"\n+ return self.env.reset()\ndiff --git a/garage/envs/wrappers/fire_reset.py b/garage/envs/wrappers/fire_reset.py\n--- a/garage/envs/wrappers/fire_reset.py\n+++ b/garage/envs/wrappers/fire_reset.py\n@@ -15,9 +15,9 @@\n def __init__(self, env):\n super().__init__(env)\n assert env.unwrapped.get_action_meanings()[1] == 'FIRE', (\n- \"Only use fire reset wrapper for suitable environment!\")\n+ 'Only use fire reset wrapper for suitable environment!')\n assert len(env.unwrapped.get_action_meanings()) >= 3, (\n- \"Only use fire reset wrapper for suitable environment!\")\n+ 'Only use fire reset wrapper for suitable environment!')\n \n def reset(self, **kwargs):\n \"\"\"gym.Env reset function.\"\"\"\n@@ -26,3 +26,7 @@\n if done:\n obs = self.env.reset(**kwargs)\n return obs\n+\n+ def step(self, action):\n+ \"\"\"gym.Env step function.\"\"\"\n+ return self.env.step(action)\ndiff --git a/garage/envs/wrappers/noop.py b/garage/envs/wrappers/noop.py\n--- a/garage/envs/wrappers/noop.py\n+++ b/garage/envs/wrappers/noop.py\n@@ -19,7 +19,7 @@\n super().__init__(env)\n self._noop_max = noop_max\n self._noop_action = 0\n- assert noop_max > 0, \"noop_max should be larger than 0!\"\n+ assert noop_max > 0, 'noop_max should be larger than 0!'\n assert env.unwrapped.get_action_meanings()[0] == 'NOOP', (\n \"No-op should be the 0-th action but it's not in {}!\".format(env))\n \n@@ -28,7 +28,11 @@\n obs = self.env.reset(**kwargs)\n noops = np.random.randint(1, self._noop_max + 1)\n for _ in range(noops):\n- obs, _, done, _ = self.env.step(self._noop_action)\n+ obs, _, done, _ = self.step(self._noop_action)\n if done:\n obs = self.env.reset(**kwargs)\n return obs\n+\n+ def step(self, action):\n+ \"\"\"gym.Env step function.\"\"\"\n+ return self.env.step(action)\n", "issue": "FireReset is firing warnings in the CI\nfrom recent tests runs\r\n```sh\r\nUserWarning: WARN: <class 'garage.envs.wrappers.fire_reset.FireReset'> doesn't implement 'step' method, which is required for wrappers derived directly from Wrapper. Deprecated default implementation is used.\r\n```\n", "before_files": [{"content": "\"\"\"Noop wrapper for gym.Env.\"\"\"\nimport gym\nimport numpy as np\n\n\nclass Noop(gym.Wrapper):\n \"\"\"\n Noop wrapper for gym.Env.\n\n It samples initial states by taking random number of no-ops on reset.\n No-op is assumed to be action 0.\n\n Args:\n env: The environment to be wrapped.\n noop_max: Maximum number no-op to be performed on reset.\n \"\"\"\n\n def __init__(self, env, noop_max=30):\n super().__init__(env)\n self._noop_max = noop_max\n self._noop_action = 0\n assert noop_max > 0, \"noop_max should be larger than 0!\"\n assert env.unwrapped.get_action_meanings()[0] == 'NOOP', (\n \"No-op should be the 0-th action but it's not in {}!\".format(env))\n\n def reset(self, **kwargs):\n \"\"\"gym.Env reset function.\"\"\"\n obs = self.env.reset(**kwargs)\n noops = np.random.randint(1, self._noop_max + 1)\n for _ in range(noops):\n obs, _, done, _ = self.env.step(self._noop_action)\n if done:\n obs = self.env.reset(**kwargs)\n return obs\n", "path": "garage/envs/wrappers/noop.py"}, {"content": "\"\"\"Fire reset wrapper for gym.Env.\"\"\"\nimport gym\n\n\nclass FireReset(gym.Wrapper):\n \"\"\"\n Fire reset wrapper for gym.Env.\n\n Take action \"fire\" on reset.\n\n Args:\n env: The environment to be wrapped.\n \"\"\"\n\n def __init__(self, env):\n super().__init__(env)\n assert env.unwrapped.get_action_meanings()[1] == 'FIRE', (\n \"Only use fire reset wrapper for suitable environment!\")\n assert len(env.unwrapped.get_action_meanings()) >= 3, (\n \"Only use fire reset wrapper for suitable environment!\")\n\n def reset(self, **kwargs):\n \"\"\"gym.Env reset function.\"\"\"\n self.env.reset(**kwargs)\n obs, _, done, _ = self.env.step(1)\n if done:\n obs = self.env.reset(**kwargs)\n return obs\n", "path": "garage/envs/wrappers/fire_reset.py"}, {"content": "\"\"\"Clip reward for gym.Env.\"\"\"\nimport gym\nimport numpy as np\n\n\nclass ClipReward(gym.Wrapper):\n \"\"\"Clip the reward by its sign.\"\"\"\n\n def step(self, ac):\n \"\"\"gym.Env step function.\"\"\"\n obs, reward, done, info = self.env.step(ac)\n return obs, np.sign(reward), done, info\n", "path": "garage/envs/wrappers/clip_reward.py"}], "after_files": [{"content": "\"\"\"Noop wrapper for gym.Env.\"\"\"\nimport gym\nimport numpy as np\n\n\nclass Noop(gym.Wrapper):\n \"\"\"\n Noop wrapper for gym.Env.\n\n It samples initial states by taking random number of no-ops on reset.\n No-op is assumed to be action 0.\n\n Args:\n env: The environment to be wrapped.\n noop_max: Maximum number no-op to be performed on reset.\n \"\"\"\n\n def __init__(self, env, noop_max=30):\n super().__init__(env)\n self._noop_max = noop_max\n self._noop_action = 0\n assert noop_max > 0, 'noop_max should be larger than 0!'\n assert env.unwrapped.get_action_meanings()[0] == 'NOOP', (\n \"No-op should be the 0-th action but it's not in {}!\".format(env))\n\n def reset(self, **kwargs):\n \"\"\"gym.Env reset function.\"\"\"\n obs = self.env.reset(**kwargs)\n noops = np.random.randint(1, self._noop_max + 1)\n for _ in range(noops):\n obs, _, done, _ = self.step(self._noop_action)\n if done:\n obs = self.env.reset(**kwargs)\n return obs\n\n def step(self, action):\n \"\"\"gym.Env step function.\"\"\"\n return self.env.step(action)\n", "path": "garage/envs/wrappers/noop.py"}, {"content": "\"\"\"Fire reset wrapper for gym.Env.\"\"\"\nimport gym\n\n\nclass FireReset(gym.Wrapper):\n \"\"\"\n Fire reset wrapper for gym.Env.\n\n Take action \"fire\" on reset.\n\n Args:\n env: The environment to be wrapped.\n \"\"\"\n\n def __init__(self, env):\n super().__init__(env)\n assert env.unwrapped.get_action_meanings()[1] == 'FIRE', (\n 'Only use fire reset wrapper for suitable environment!')\n assert len(env.unwrapped.get_action_meanings()) >= 3, (\n 'Only use fire reset wrapper for suitable environment!')\n\n def reset(self, **kwargs):\n \"\"\"gym.Env reset function.\"\"\"\n self.env.reset(**kwargs)\n obs, _, done, _ = self.env.step(1)\n if done:\n obs = self.env.reset(**kwargs)\n return obs\n\n def step(self, action):\n \"\"\"gym.Env step function.\"\"\"\n return self.env.step(action)\n", "path": "garage/envs/wrappers/fire_reset.py"}, {"content": "\"\"\"Clip reward for gym.Env.\"\"\"\nimport gym\nimport numpy as np\n\n\nclass ClipReward(gym.Wrapper):\n \"\"\"Clip the reward by its sign.\"\"\"\n\n def step(self, ac):\n \"\"\"gym.Env step function.\"\"\"\n obs, reward, done, info = self.env.step(ac)\n return obs, np.sign(reward), done, info\n\n def reset(self):\n \"\"\"gym.Env reset.\"\"\"\n return self.env.reset()\n", "path": "garage/envs/wrappers/clip_reward.py"}]} | 1,033 | 662 |
gh_patches_debug_660 | rasdani/github-patches | git_diff | pex-tool__pex-2153 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Release 2.1.137
On the docket:
+ [x] A locked requirement with mixed artifact types fails to lock. #2150
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pex/version.py`
Content:
```
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = "2.1.136"
5
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = "2.1.136"
+__version__ = "2.1.137"
| {"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.136\"\n+__version__ = \"2.1.137\"\n", "issue": "Release 2.1.137\nOn the docket:\r\n+ [x] A locked requirement with mixed artifact types fails to lock. #2150\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.136\"\n", "path": "pex/version.py"}], "after_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.137\"\n", "path": "pex/version.py"}]} | 344 | 98 |
gh_patches_debug_19672 | rasdani/github-patches | git_diff | NVIDIA__apex-620 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
_amp_state determines whether running in distributed at import
It looks like the `_amp_state.py` module determines whether pytorch is running in distributed mode at the import level. The `distributed` only seems to be used in `maybe_print`. See code snippet:
https://github.com/NVIDIA/apex/blob/37cdaf4ad57ab4e7dd9ef13dbed7b29aa939d061/apex/amp/_amp_state.py#L38-L52
This causes a couple issues:
1. It will only support the `env://` initialization of torch distributed
2. It will fail if amp is imported before launching the distributed training
Neither of these is an issue for most, since most people launch via `torch.distributed.launch`. However, it can be an issue if you define your own distributed launch function or use `torch.multiprocessing.spawn`. I can't see a good reason to do it this way anyway, as it appears this variable is only used in the `maybe_print` function. I'll submit a pull request to fix this. Let me know if I'm missing something though.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `apex/amp/_amp_state.py`
Content:
```
1 # This is a "header object" that allows different amp modules to communicate.
2 # I'm a C++ guy, not a python guy. I decided this approach because it seemed most C++-like.
3 # But apparently it's ok:
4 # http://effbot.org/pyfaq/how-do-i-share-global-variables-across-modules.htm
5 import os
6 import torch
7
8 TORCH_MAJOR = int(torch.__version__.split('.')[0])
9 TORCH_MINOR = int(torch.__version__.split('.')[1])
10
11 if TORCH_MAJOR == 0:
12 import collections.abc as container_abcs
13 else:
14 from torch._six import container_abcs
15
16
17 class AmpState(object):
18 def __init__(self):
19 self.hard_override=False
20 self.allow_incoming_model_not_fp32 = False
21 self.verbosity=1
22
23
24 # Attribute stash. Could also just stash things as global module attributes.
25 _amp_state = AmpState()
26
27
28 def warn_or_err(msg):
29 if _amp_state.hard_override:
30 print("Warning: " + msg)
31 else:
32 raise RuntimeError(msg)
33 # I'm not sure if allowing hard_override is a good idea.
34 # + " If you're sure you know what you're doing, supply " +
35 # "hard_override=True to amp.initialize.")
36
37
38 distributed = False
39 if 'WORLD_SIZE' in os.environ:
40 distributed = int(os.environ['WORLD_SIZE']) > 1
41
42
43 def maybe_print(msg, rank0=False):
44 if _amp_state.verbosity > 0:
45 if rank0:
46 if distributed:
47 if torch.distributed.get_rank() == 0:
48 print(msg)
49 else:
50 print(msg)
51 else:
52 print(msg)
53
54
55 # def iter_params(param_groups):
56 # for group in param_groups:
57 # for p in group['params']:
58 # yield p
59
60
61 def master_params(optimizer):
62 """
63 Generator expression that iterates over the params owned by ``optimizer``.
64
65 Args:
66 optimizer: An optimizer previously returned from ``amp.initialize``.
67 """
68 for group in optimizer.param_groups:
69 for p in group['params']:
70 yield p
71
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/apex/amp/_amp_state.py b/apex/amp/_amp_state.py
--- a/apex/amp/_amp_state.py
+++ b/apex/amp/_amp_state.py
@@ -1,5 +1,5 @@
# This is a "header object" that allows different amp modules to communicate.
-# I'm a C++ guy, not a python guy. I decided this approach because it seemed most C++-like.
+# I'm a C++ guy, not a python guy. I decided this approach because it seemed most C++-like.
# But apparently it's ok:
# http://effbot.org/pyfaq/how-do-i-share-global-variables-across-modules.htm
import os
@@ -35,12 +35,9 @@
# "hard_override=True to amp.initialize.")
-distributed = False
-if 'WORLD_SIZE' in os.environ:
- distributed = int(os.environ['WORLD_SIZE']) > 1
-
-
def maybe_print(msg, rank0=False):
+ distributed = torch.distributed.is_initialized() and \
+ torch.distributed.get_world_size() > 1
if _amp_state.verbosity > 0:
if rank0:
if distributed:
| {"golden_diff": "diff --git a/apex/amp/_amp_state.py b/apex/amp/_amp_state.py\n--- a/apex/amp/_amp_state.py\n+++ b/apex/amp/_amp_state.py\n@@ -1,5 +1,5 @@\n # This is a \"header object\" that allows different amp modules to communicate.\n-# I'm a C++ guy, not a python guy. I decided this approach because it seemed most C++-like. \n+# I'm a C++ guy, not a python guy. I decided this approach because it seemed most C++-like.\n # But apparently it's ok:\n # http://effbot.org/pyfaq/how-do-i-share-global-variables-across-modules.htm\n import os\n@@ -35,12 +35,9 @@\n # \"hard_override=True to amp.initialize.\")\n \n \n-distributed = False\n-if 'WORLD_SIZE' in os.environ:\n- distributed = int(os.environ['WORLD_SIZE']) > 1\n-\n-\n def maybe_print(msg, rank0=False):\n+ distributed = torch.distributed.is_initialized() and \\\n+ torch.distributed.get_world_size() > 1\n if _amp_state.verbosity > 0:\n if rank0:\n if distributed:\n", "issue": "_amp_state determines whether running in distributed at import\nIt looks like the `_amp_state.py` module determines whether pytorch is running in distributed mode at the import level. The `distributed` only seems to be used in `maybe_print`. See code snippet:\r\n\r\nhttps://github.com/NVIDIA/apex/blob/37cdaf4ad57ab4e7dd9ef13dbed7b29aa939d061/apex/amp/_amp_state.py#L38-L52\r\n\r\nThis causes a couple issues:\r\n\r\n1. It will only support the `env://` initialization of torch distributed\r\n2. It will fail if amp is imported before launching the distributed training\r\n\r\nNeither of these is an issue for most, since most people launch via `torch.distributed.launch`. However, it can be an issue if you define your own distributed launch function or use `torch.multiprocessing.spawn`. I can't see a good reason to do it this way anyway, as it appears this variable is only used in the `maybe_print` function. I'll submit a pull request to fix this. Let me know if I'm missing something though.\n", "before_files": [{"content": "# This is a \"header object\" that allows different amp modules to communicate.\n# I'm a C++ guy, not a python guy. I decided this approach because it seemed most C++-like. \n# But apparently it's ok:\n# http://effbot.org/pyfaq/how-do-i-share-global-variables-across-modules.htm\nimport os\nimport torch\n\nTORCH_MAJOR = int(torch.__version__.split('.')[0])\nTORCH_MINOR = int(torch.__version__.split('.')[1])\n\nif TORCH_MAJOR == 0:\n import collections.abc as container_abcs\nelse:\n from torch._six import container_abcs\n\n\nclass AmpState(object):\n def __init__(self):\n self.hard_override=False\n self.allow_incoming_model_not_fp32 = False\n self.verbosity=1\n\n\n# Attribute stash. Could also just stash things as global module attributes.\n_amp_state = AmpState()\n\n\ndef warn_or_err(msg):\n if _amp_state.hard_override:\n print(\"Warning: \" + msg)\n else:\n raise RuntimeError(msg)\n # I'm not sure if allowing hard_override is a good idea.\n # + \" If you're sure you know what you're doing, supply \" +\n # \"hard_override=True to amp.initialize.\")\n\n\ndistributed = False\nif 'WORLD_SIZE' in os.environ:\n distributed = int(os.environ['WORLD_SIZE']) > 1\n\n\ndef maybe_print(msg, rank0=False):\n if _amp_state.verbosity > 0:\n if rank0:\n if distributed:\n if torch.distributed.get_rank() == 0:\n print(msg)\n else:\n print(msg)\n else:\n print(msg)\n\n\n# def iter_params(param_groups):\n# for group in param_groups:\n# for p in group['params']:\n# yield p\n\n\ndef master_params(optimizer):\n \"\"\"\n Generator expression that iterates over the params owned by ``optimizer``.\n\n Args:\n optimizer: An optimizer previously returned from ``amp.initialize``.\n \"\"\"\n for group in optimizer.param_groups:\n for p in group['params']:\n yield p\n", "path": "apex/amp/_amp_state.py"}], "after_files": [{"content": "# This is a \"header object\" that allows different amp modules to communicate.\n# I'm a C++ guy, not a python guy. I decided this approach because it seemed most C++-like.\n# But apparently it's ok:\n# http://effbot.org/pyfaq/how-do-i-share-global-variables-across-modules.htm\nimport os\nimport torch\n\nTORCH_MAJOR = int(torch.__version__.split('.')[0])\nTORCH_MINOR = int(torch.__version__.split('.')[1])\n\nif TORCH_MAJOR == 0:\n import collections.abc as container_abcs\nelse:\n from torch._six import container_abcs\n\n\nclass AmpState(object):\n def __init__(self):\n self.hard_override=False\n self.allow_incoming_model_not_fp32 = False\n self.verbosity=1\n\n\n# Attribute stash. Could also just stash things as global module attributes.\n_amp_state = AmpState()\n\n\ndef warn_or_err(msg):\n if _amp_state.hard_override:\n print(\"Warning: \" + msg)\n else:\n raise RuntimeError(msg)\n # I'm not sure if allowing hard_override is a good idea.\n # + \" If you're sure you know what you're doing, supply \" +\n # \"hard_override=True to amp.initialize.\")\n\n\ndef maybe_print(msg, rank0=False):\n distributed = torch.distributed.is_initialized() and \\\n torch.distributed.get_world_size() > 1\n if _amp_state.verbosity > 0:\n if rank0:\n if distributed:\n if torch.distributed.get_rank() == 0:\n print(msg)\n else:\n print(msg)\n else:\n print(msg)\n\n\n# def iter_params(param_groups):\n# for group in param_groups:\n# for p in group['params']:\n# yield p\n\n\ndef master_params(optimizer):\n \"\"\"\n Generator expression that iterates over the params owned by ``optimizer``.\n\n Args:\n optimizer: An optimizer previously returned from ``amp.initialize``.\n \"\"\"\n for group in optimizer.param_groups:\n for p in group['params']:\n yield p\n", "path": "apex/amp/_amp_state.py"}]} | 1,101 | 268 |
gh_patches_debug_9775 | rasdani/github-patches | git_diff | tensorflow__tfx-635 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`transform_output` not truely optional in 0.14.0 release
`transform_output` not truely optional for Trainer in 0.14.0 release. Culprit code is [here](https://github.com/tensorflow/tfx/blob/r0.14/tfx/components/trainer/executor.py#L148).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tfx/components/trainer/executor.py`
Content:
```
1 # Copyright 2019 Google LLC. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """TFX local trainer executor."""
15
16 from __future__ import absolute_import
17 from __future__ import division
18 from __future__ import print_function
19
20 import os
21 import tensorflow as tf
22 import tensorflow_model_analysis as tfma
23 from typing import Any, Dict, List, Text
24
25 from tensorflow_metadata.proto.v0 import schema_pb2
26 from tfx import types
27 from tfx.components.base import base_executor
28 from tfx.extensions.google_cloud_ai_platform import runner
29 from tfx.proto import trainer_pb2
30 from tfx.types import artifact_utils
31 from tfx.utils import import_utils
32 from tfx.utils import io_utils
33 from tfx.utils import path_utils
34 from google.protobuf import json_format
35
36
37 def _all_files_pattern(file_pattern: Text) -> Text:
38 return '{}*'.format(file_pattern)
39
40
41 class Executor(base_executor.BaseExecutor):
42 """Local trainer used by the TFX Trainer component.
43
44 The Trainer executor supplements TensorFlow training with a component to
45 enable warm-start training of any user-specified tf.estimator. The Trainer is
46 a library built on top of TensorFlow that is expected to be integrated into a
47 custom user-specified binary.
48
49 To include Trainer in a TFX pipeline, configure your pipeline similar to
50 https://github.com/tensorflow/tfx/blob/master/tfx/examples/chicago_taxi_pipeline/taxi_pipeline_simple.py#L104.
51
52 For more details on the Trainer component itself, please refer to
53 https://tensorflow.org/tfx/guide/trainer. For a tutorial on TF Estimator,
54 please refer to https://www.tensorflow.org/extend/estimators.
55
56 How to create a trainer callback function to be used by this Trainer executor:
57 An estimator can be executed by TFX by first creating a trainer_fn callback
58 method that returns an estimator and some additional parameters, similar to
59 https://github.com/tensorflow/tfx/blob/master/tfx/examples/chicago_taxi_pipeline/taxi_utils.py#L285.
60 This becomes the basis of the new Executor for Trainer. This Executor will
61 then train and evaluate this estimator using the
62 tf.estimator.train_and_evaluate API to train locally.
63 """
64
65 # Name of subdirectory which contains checkpoints from prior runs
66 _CHECKPOINT_FILE_NAME = 'checkpoint'
67
68 def _GetTrainerFn(self, exec_properties: Dict[Text, Any]) -> Any:
69 """Loads and returns user-defined trainer_fn."""
70
71 has_module_file = bool(exec_properties.get('module_file'))
72 has_trainer_fn = bool(exec_properties.get('trainer_fn'))
73
74 if has_module_file == has_trainer_fn:
75 raise ValueError(
76 "Neither or both of 'module_file' 'trainer_fn' have been supplied in "
77 "'exec_properties'.")
78
79 if has_module_file:
80 return import_utils.import_func_from_source(
81 exec_properties['module_file'], 'trainer_fn')
82
83 trainer_fn_path_split = exec_properties['trainer_fn'].split('.')
84 return import_utils.import_func_from_module(
85 '.'.join(trainer_fn_path_split[0:-1]), trainer_fn_path_split[-1])
86
87 def Do(self, input_dict: Dict[Text, List[types.Artifact]],
88 output_dict: Dict[Text, List[types.Artifact]],
89 exec_properties: Dict[Text, Any]) -> None:
90 """Uses a user-supplied tf.estimator to train a TensorFlow model locally.
91
92 The Trainer Executor invokes a training_fn callback function provided by
93 the user via the module_file parameter. With the tf.estimator returned by
94 this function, the Trainer Executor then builds a TensorFlow model using the
95 user-provided tf.estimator.
96
97 Args:
98 input_dict: Input dict from input key to a list of ML-Metadata Artifacts.
99 - examples: Examples used for training, must include 'train' and 'eval'
100 splits.
101 - transform_output: Optional input transform graph.
102 - schema: Schema of the data.
103 output_dict: Output dict from output key to a list of Artifacts.
104 - output: Exported model.
105 exec_properties: A dict of execution properties.
106 - train_args: JSON string of trainer_pb2.TrainArgs instance, providing
107 args for training.
108 - eval_args: JSON string of trainer_pb2.EvalArgs instance, providing
109 args for eval.
110 - module_file: Python module file containing UDF model definition.
111 - warm_starting: Whether or not we need to do warm starting.
112 - warm_start_from: Optional. If warm_starting is True, this is the
113 directory to find previous model to warm start on.
114
115 Returns:
116 None
117
118 Raises:
119 ValueError: When neither or both of 'module_file' and 'trainer_fn'
120 are present in 'exec_properties'.
121 """
122 self._log_startup(input_dict, output_dict, exec_properties)
123
124 # TODO(zhitaoli): Deprecate this in a future version.
125 if exec_properties.get('custom_config', None):
126 cmle_args = exec_properties.get('custom_config',
127 {}).get('cmle_training_args')
128 if cmle_args:
129 executor_class_path = '.'.join([Executor.__module__, Executor.__name__])
130 tf.logging.warn(
131 'Passing \'cmle_training_args\' to trainer directly is deprecated, '
132 'please use extension executor at '
133 'tfx.extensions.google_cloud_ai_platform.trainer.executor instead')
134
135 return runner.start_cmle_training(input_dict, output_dict,
136 exec_properties, executor_class_path,
137 cmle_args)
138
139 trainer_fn = self._GetTrainerFn(exec_properties)
140
141 # Set up training parameters
142 train_files = [
143 _all_files_pattern(
144 artifact_utils.get_split_uri(input_dict['examples'], 'train'))
145 ]
146 transform_output = artifact_utils.get_single_uri(
147 input_dict['transform_output']
148 ) if input_dict['transform_output'] else None
149 eval_files = [
150 _all_files_pattern(
151 artifact_utils.get_split_uri(input_dict['examples'], 'eval'))
152 ]
153 schema_file = io_utils.get_only_uri_in_dir(
154 artifact_utils.get_single_uri(input_dict['schema']))
155
156 train_args = trainer_pb2.TrainArgs()
157 eval_args = trainer_pb2.EvalArgs()
158 json_format.Parse(exec_properties['train_args'], train_args)
159 json_format.Parse(exec_properties['eval_args'], eval_args)
160
161 # https://github.com/tensorflow/tfx/issues/45: Replace num_steps=0 with
162 # num_steps=None. Conversion of the proto to python will set the default
163 # value of an int as 0 so modify the value here. Tensorflow will raise an
164 # error if num_steps <= 0.
165 train_steps = train_args.num_steps or None
166 eval_steps = eval_args.num_steps or None
167
168 output_path = artifact_utils.get_single_uri(output_dict['output'])
169 serving_model_dir = path_utils.serving_model_dir(output_path)
170 eval_model_dir = path_utils.eval_model_dir(output_path)
171
172 # Assemble warm start path if needed.
173 warm_start_from = None
174 if exec_properties.get('warm_starting') and exec_properties.get(
175 'warm_start_from'):
176 previous_model_dir = os.path.join(exec_properties['warm_start_from'],
177 path_utils.SERVING_MODEL_DIR)
178 if previous_model_dir and tf.gfile.Exists(
179 os.path.join(previous_model_dir, self._CHECKPOINT_FILE_NAME)):
180 warm_start_from = previous_model_dir
181
182 # TODO(b/126242806) Use PipelineInputs when it is available in third_party.
183 hparams = tf.contrib.training.HParams(
184 # A list of uris for train files.
185 train_files=train_files,
186 # An optional single uri for transform graph produced by TFT. Will be
187 # None if not specified.
188 transform_output=transform_output,
189 # A single uri for the output directory of the serving model.
190 serving_model_dir=serving_model_dir,
191 # A list of uris for eval files.
192 eval_files=eval_files,
193 # A single uri for schema file.
194 schema_file=schema_file,
195 # Number of train steps.
196 train_steps=train_steps,
197 # Number of eval steps.
198 eval_steps=eval_steps,
199 # A single uri for the model directory to warm start from.
200 warm_start_from=warm_start_from)
201
202 schema = io_utils.parse_pbtxt_file(schema_file, schema_pb2.Schema())
203
204 training_spec = trainer_fn(hparams, schema)
205
206 # Train the model
207 tf.logging.info('Training model.')
208 tf.estimator.train_and_evaluate(training_spec['estimator'],
209 training_spec['train_spec'],
210 training_spec['eval_spec'])
211 tf.logging.info('Training complete. Model written to %s',
212 serving_model_dir)
213
214 # Export an eval savedmodel for TFMA
215 tf.logging.info('Exporting eval_savedmodel for TFMA.')
216 tfma.export.export_eval_savedmodel(
217 estimator=training_spec['estimator'],
218 export_dir_base=eval_model_dir,
219 eval_input_receiver_fn=training_spec['eval_input_receiver_fn'])
220
221 tf.logging.info('Exported eval_savedmodel to %s.', eval_model_dir)
222
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/tfx/components/trainer/executor.py b/tfx/components/trainer/executor.py
--- a/tfx/components/trainer/executor.py
+++ b/tfx/components/trainer/executor.py
@@ -144,8 +144,8 @@
artifact_utils.get_split_uri(input_dict['examples'], 'train'))
]
transform_output = artifact_utils.get_single_uri(
- input_dict['transform_output']
- ) if input_dict['transform_output'] else None
+ input_dict['transform_output']) if input_dict.get(
+ 'transform_output', None) else None
eval_files = [
_all_files_pattern(
artifact_utils.get_split_uri(input_dict['examples'], 'eval'))
| {"golden_diff": "diff --git a/tfx/components/trainer/executor.py b/tfx/components/trainer/executor.py\n--- a/tfx/components/trainer/executor.py\n+++ b/tfx/components/trainer/executor.py\n@@ -144,8 +144,8 @@\n artifact_utils.get_split_uri(input_dict['examples'], 'train'))\n ]\n transform_output = artifact_utils.get_single_uri(\n- input_dict['transform_output']\n- ) if input_dict['transform_output'] else None\n+ input_dict['transform_output']) if input_dict.get(\n+ 'transform_output', None) else None\n eval_files = [\n _all_files_pattern(\n artifact_utils.get_split_uri(input_dict['examples'], 'eval'))\n", "issue": "`transform_output` not truely optional in 0.14.0 release\n`transform_output` not truely optional for Trainer in 0.14.0 release. Culprit code is [here](https://github.com/tensorflow/tfx/blob/r0.14/tfx/components/trainer/executor.py#L148).\n", "before_files": [{"content": "# Copyright 2019 Google LLC. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"TFX local trainer executor.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport tensorflow as tf\nimport tensorflow_model_analysis as tfma\nfrom typing import Any, Dict, List, Text\n\nfrom tensorflow_metadata.proto.v0 import schema_pb2\nfrom tfx import types\nfrom tfx.components.base import base_executor\nfrom tfx.extensions.google_cloud_ai_platform import runner\nfrom tfx.proto import trainer_pb2\nfrom tfx.types import artifact_utils\nfrom tfx.utils import import_utils\nfrom tfx.utils import io_utils\nfrom tfx.utils import path_utils\nfrom google.protobuf import json_format\n\n\ndef _all_files_pattern(file_pattern: Text) -> Text:\n return '{}*'.format(file_pattern)\n\n\nclass Executor(base_executor.BaseExecutor):\n \"\"\"Local trainer used by the TFX Trainer component.\n\n The Trainer executor supplements TensorFlow training with a component to\n enable warm-start training of any user-specified tf.estimator. The Trainer is\n a library built on top of TensorFlow that is expected to be integrated into a\n custom user-specified binary.\n\n To include Trainer in a TFX pipeline, configure your pipeline similar to\n https://github.com/tensorflow/tfx/blob/master/tfx/examples/chicago_taxi_pipeline/taxi_pipeline_simple.py#L104.\n\n For more details on the Trainer component itself, please refer to\n https://tensorflow.org/tfx/guide/trainer. For a tutorial on TF Estimator,\n please refer to https://www.tensorflow.org/extend/estimators.\n\n How to create a trainer callback function to be used by this Trainer executor:\n An estimator can be executed by TFX by first creating a trainer_fn callback\n method that returns an estimator and some additional parameters, similar to\n https://github.com/tensorflow/tfx/blob/master/tfx/examples/chicago_taxi_pipeline/taxi_utils.py#L285.\n This becomes the basis of the new Executor for Trainer. This Executor will\n then train and evaluate this estimator using the\n tf.estimator.train_and_evaluate API to train locally.\n \"\"\"\n\n # Name of subdirectory which contains checkpoints from prior runs\n _CHECKPOINT_FILE_NAME = 'checkpoint'\n\n def _GetTrainerFn(self, exec_properties: Dict[Text, Any]) -> Any:\n \"\"\"Loads and returns user-defined trainer_fn.\"\"\"\n\n has_module_file = bool(exec_properties.get('module_file'))\n has_trainer_fn = bool(exec_properties.get('trainer_fn'))\n\n if has_module_file == has_trainer_fn:\n raise ValueError(\n \"Neither or both of 'module_file' 'trainer_fn' have been supplied in \"\n \"'exec_properties'.\")\n\n if has_module_file:\n return import_utils.import_func_from_source(\n exec_properties['module_file'], 'trainer_fn')\n\n trainer_fn_path_split = exec_properties['trainer_fn'].split('.')\n return import_utils.import_func_from_module(\n '.'.join(trainer_fn_path_split[0:-1]), trainer_fn_path_split[-1])\n\n def Do(self, input_dict: Dict[Text, List[types.Artifact]],\n output_dict: Dict[Text, List[types.Artifact]],\n exec_properties: Dict[Text, Any]) -> None:\n \"\"\"Uses a user-supplied tf.estimator to train a TensorFlow model locally.\n\n The Trainer Executor invokes a training_fn callback function provided by\n the user via the module_file parameter. With the tf.estimator returned by\n this function, the Trainer Executor then builds a TensorFlow model using the\n user-provided tf.estimator.\n\n Args:\n input_dict: Input dict from input key to a list of ML-Metadata Artifacts.\n - examples: Examples used for training, must include 'train' and 'eval'\n splits.\n - transform_output: Optional input transform graph.\n - schema: Schema of the data.\n output_dict: Output dict from output key to a list of Artifacts.\n - output: Exported model.\n exec_properties: A dict of execution properties.\n - train_args: JSON string of trainer_pb2.TrainArgs instance, providing\n args for training.\n - eval_args: JSON string of trainer_pb2.EvalArgs instance, providing\n args for eval.\n - module_file: Python module file containing UDF model definition.\n - warm_starting: Whether or not we need to do warm starting.\n - warm_start_from: Optional. If warm_starting is True, this is the\n directory to find previous model to warm start on.\n\n Returns:\n None\n\n Raises:\n ValueError: When neither or both of 'module_file' and 'trainer_fn'\n are present in 'exec_properties'.\n \"\"\"\n self._log_startup(input_dict, output_dict, exec_properties)\n\n # TODO(zhitaoli): Deprecate this in a future version.\n if exec_properties.get('custom_config', None):\n cmle_args = exec_properties.get('custom_config',\n {}).get('cmle_training_args')\n if cmle_args:\n executor_class_path = '.'.join([Executor.__module__, Executor.__name__])\n tf.logging.warn(\n 'Passing \\'cmle_training_args\\' to trainer directly is deprecated, '\n 'please use extension executor at '\n 'tfx.extensions.google_cloud_ai_platform.trainer.executor instead')\n\n return runner.start_cmle_training(input_dict, output_dict,\n exec_properties, executor_class_path,\n cmle_args)\n\n trainer_fn = self._GetTrainerFn(exec_properties)\n\n # Set up training parameters\n train_files = [\n _all_files_pattern(\n artifact_utils.get_split_uri(input_dict['examples'], 'train'))\n ]\n transform_output = artifact_utils.get_single_uri(\n input_dict['transform_output']\n ) if input_dict['transform_output'] else None\n eval_files = [\n _all_files_pattern(\n artifact_utils.get_split_uri(input_dict['examples'], 'eval'))\n ]\n schema_file = io_utils.get_only_uri_in_dir(\n artifact_utils.get_single_uri(input_dict['schema']))\n\n train_args = trainer_pb2.TrainArgs()\n eval_args = trainer_pb2.EvalArgs()\n json_format.Parse(exec_properties['train_args'], train_args)\n json_format.Parse(exec_properties['eval_args'], eval_args)\n\n # https://github.com/tensorflow/tfx/issues/45: Replace num_steps=0 with\n # num_steps=None. Conversion of the proto to python will set the default\n # value of an int as 0 so modify the value here. Tensorflow will raise an\n # error if num_steps <= 0.\n train_steps = train_args.num_steps or None\n eval_steps = eval_args.num_steps or None\n\n output_path = artifact_utils.get_single_uri(output_dict['output'])\n serving_model_dir = path_utils.serving_model_dir(output_path)\n eval_model_dir = path_utils.eval_model_dir(output_path)\n\n # Assemble warm start path if needed.\n warm_start_from = None\n if exec_properties.get('warm_starting') and exec_properties.get(\n 'warm_start_from'):\n previous_model_dir = os.path.join(exec_properties['warm_start_from'],\n path_utils.SERVING_MODEL_DIR)\n if previous_model_dir and tf.gfile.Exists(\n os.path.join(previous_model_dir, self._CHECKPOINT_FILE_NAME)):\n warm_start_from = previous_model_dir\n\n # TODO(b/126242806) Use PipelineInputs when it is available in third_party.\n hparams = tf.contrib.training.HParams(\n # A list of uris for train files.\n train_files=train_files,\n # An optional single uri for transform graph produced by TFT. Will be\n # None if not specified.\n transform_output=transform_output,\n # A single uri for the output directory of the serving model.\n serving_model_dir=serving_model_dir,\n # A list of uris for eval files.\n eval_files=eval_files,\n # A single uri for schema file.\n schema_file=schema_file,\n # Number of train steps.\n train_steps=train_steps,\n # Number of eval steps.\n eval_steps=eval_steps,\n # A single uri for the model directory to warm start from.\n warm_start_from=warm_start_from)\n\n schema = io_utils.parse_pbtxt_file(schema_file, schema_pb2.Schema())\n\n training_spec = trainer_fn(hparams, schema)\n\n # Train the model\n tf.logging.info('Training model.')\n tf.estimator.train_and_evaluate(training_spec['estimator'],\n training_spec['train_spec'],\n training_spec['eval_spec'])\n tf.logging.info('Training complete. Model written to %s',\n serving_model_dir)\n\n # Export an eval savedmodel for TFMA\n tf.logging.info('Exporting eval_savedmodel for TFMA.')\n tfma.export.export_eval_savedmodel(\n estimator=training_spec['estimator'],\n export_dir_base=eval_model_dir,\n eval_input_receiver_fn=training_spec['eval_input_receiver_fn'])\n\n tf.logging.info('Exported eval_savedmodel to %s.', eval_model_dir)\n", "path": "tfx/components/trainer/executor.py"}], "after_files": [{"content": "# Copyright 2019 Google LLC. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"TFX local trainer executor.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport tensorflow as tf\nimport tensorflow_model_analysis as tfma\nfrom typing import Any, Dict, List, Text\n\nfrom tensorflow_metadata.proto.v0 import schema_pb2\nfrom tfx import types\nfrom tfx.components.base import base_executor\nfrom tfx.extensions.google_cloud_ai_platform import runner\nfrom tfx.proto import trainer_pb2\nfrom tfx.types import artifact_utils\nfrom tfx.utils import import_utils\nfrom tfx.utils import io_utils\nfrom tfx.utils import path_utils\nfrom google.protobuf import json_format\n\n\ndef _all_files_pattern(file_pattern: Text) -> Text:\n return '{}*'.format(file_pattern)\n\n\nclass Executor(base_executor.BaseExecutor):\n \"\"\"Local trainer used by the TFX Trainer component.\n\n The Trainer executor supplements TensorFlow training with a component to\n enable warm-start training of any user-specified tf.estimator. The Trainer is\n a library built on top of TensorFlow that is expected to be integrated into a\n custom user-specified binary.\n\n To include Trainer in a TFX pipeline, configure your pipeline similar to\n https://github.com/tensorflow/tfx/blob/master/tfx/examples/chicago_taxi_pipeline/taxi_pipeline_simple.py#L104.\n\n For more details on the Trainer component itself, please refer to\n https://tensorflow.org/tfx/guide/trainer. For a tutorial on TF Estimator,\n please refer to https://www.tensorflow.org/extend/estimators.\n\n How to create a trainer callback function to be used by this Trainer executor:\n An estimator can be executed by TFX by first creating a trainer_fn callback\n method that returns an estimator and some additional parameters, similar to\n https://github.com/tensorflow/tfx/blob/master/tfx/examples/chicago_taxi_pipeline/taxi_utils.py#L285.\n This becomes the basis of the new Executor for Trainer. This Executor will\n then train and evaluate this estimator using the\n tf.estimator.train_and_evaluate API to train locally.\n \"\"\"\n\n # Name of subdirectory which contains checkpoints from prior runs\n _CHECKPOINT_FILE_NAME = 'checkpoint'\n\n def _GetTrainerFn(self, exec_properties: Dict[Text, Any]) -> Any:\n \"\"\"Loads and returns user-defined trainer_fn.\"\"\"\n\n has_module_file = bool(exec_properties.get('module_file'))\n has_trainer_fn = bool(exec_properties.get('trainer_fn'))\n\n if has_module_file == has_trainer_fn:\n raise ValueError(\n \"Neither or both of 'module_file' 'trainer_fn' have been supplied in \"\n \"'exec_properties'.\")\n\n if has_module_file:\n return import_utils.import_func_from_source(\n exec_properties['module_file'], 'trainer_fn')\n\n trainer_fn_path_split = exec_properties['trainer_fn'].split('.')\n return import_utils.import_func_from_module(\n '.'.join(trainer_fn_path_split[0:-1]), trainer_fn_path_split[-1])\n\n def Do(self, input_dict: Dict[Text, List[types.Artifact]],\n output_dict: Dict[Text, List[types.Artifact]],\n exec_properties: Dict[Text, Any]) -> None:\n \"\"\"Uses a user-supplied tf.estimator to train a TensorFlow model locally.\n\n The Trainer Executor invokes a training_fn callback function provided by\n the user via the module_file parameter. With the tf.estimator returned by\n this function, the Trainer Executor then builds a TensorFlow model using the\n user-provided tf.estimator.\n\n Args:\n input_dict: Input dict from input key to a list of ML-Metadata Artifacts.\n - examples: Examples used for training, must include 'train' and 'eval'\n splits.\n - transform_output: Optional input transform graph.\n - schema: Schema of the data.\n output_dict: Output dict from output key to a list of Artifacts.\n - output: Exported model.\n exec_properties: A dict of execution properties.\n - train_args: JSON string of trainer_pb2.TrainArgs instance, providing\n args for training.\n - eval_args: JSON string of trainer_pb2.EvalArgs instance, providing\n args for eval.\n - module_file: Python module file containing UDF model definition.\n - warm_starting: Whether or not we need to do warm starting.\n - warm_start_from: Optional. If warm_starting is True, this is the\n directory to find previous model to warm start on.\n\n Returns:\n None\n\n Raises:\n ValueError: When neither or both of 'module_file' and 'trainer_fn'\n are present in 'exec_properties'.\n \"\"\"\n self._log_startup(input_dict, output_dict, exec_properties)\n\n # TODO(zhitaoli): Deprecate this in a future version.\n if exec_properties.get('custom_config', None):\n cmle_args = exec_properties.get('custom_config',\n {}).get('cmle_training_args')\n if cmle_args:\n executor_class_path = '.'.join([Executor.__module__, Executor.__name__])\n tf.logging.warn(\n 'Passing \\'cmle_training_args\\' to trainer directly is deprecated, '\n 'please use extension executor at '\n 'tfx.extensions.google_cloud_ai_platform.trainer.executor instead')\n\n return runner.start_cmle_training(input_dict, output_dict,\n exec_properties, executor_class_path,\n cmle_args)\n\n trainer_fn = self._GetTrainerFn(exec_properties)\n\n # Set up training parameters\n train_files = [\n _all_files_pattern(\n artifact_utils.get_split_uri(input_dict['examples'], 'train'))\n ]\n transform_output = artifact_utils.get_single_uri(\n input_dict['transform_output']) if input_dict.get(\n 'transform_output', None) else None\n eval_files = [\n _all_files_pattern(\n artifact_utils.get_split_uri(input_dict['examples'], 'eval'))\n ]\n schema_file = io_utils.get_only_uri_in_dir(\n artifact_utils.get_single_uri(input_dict['schema']))\n\n train_args = trainer_pb2.TrainArgs()\n eval_args = trainer_pb2.EvalArgs()\n json_format.Parse(exec_properties['train_args'], train_args)\n json_format.Parse(exec_properties['eval_args'], eval_args)\n\n # https://github.com/tensorflow/tfx/issues/45: Replace num_steps=0 with\n # num_steps=None. Conversion of the proto to python will set the default\n # value of an int as 0 so modify the value here. Tensorflow will raise an\n # error if num_steps <= 0.\n train_steps = train_args.num_steps or None\n eval_steps = eval_args.num_steps or None\n\n output_path = artifact_utils.get_single_uri(output_dict['output'])\n serving_model_dir = path_utils.serving_model_dir(output_path)\n eval_model_dir = path_utils.eval_model_dir(output_path)\n\n # Assemble warm start path if needed.\n warm_start_from = None\n if exec_properties.get('warm_starting') and exec_properties.get(\n 'warm_start_from'):\n previous_model_dir = os.path.join(exec_properties['warm_start_from'],\n path_utils.SERVING_MODEL_DIR)\n if previous_model_dir and tf.gfile.Exists(\n os.path.join(previous_model_dir, self._CHECKPOINT_FILE_NAME)):\n warm_start_from = previous_model_dir\n\n # TODO(b/126242806) Use PipelineInputs when it is available in third_party.\n hparams = tf.contrib.training.HParams(\n # A list of uris for train files.\n train_files=train_files,\n # An optional single uri for transform graph produced by TFT. Will be\n # None if not specified.\n transform_output=transform_output,\n # A single uri for the output directory of the serving model.\n serving_model_dir=serving_model_dir,\n # A list of uris for eval files.\n eval_files=eval_files,\n # A single uri for schema file.\n schema_file=schema_file,\n # Number of train steps.\n train_steps=train_steps,\n # Number of eval steps.\n eval_steps=eval_steps,\n # A single uri for the model directory to warm start from.\n warm_start_from=warm_start_from)\n\n schema = io_utils.parse_pbtxt_file(schema_file, schema_pb2.Schema())\n\n training_spec = trainer_fn(hparams, schema)\n\n # Train the model\n tf.logging.info('Training model.')\n tf.estimator.train_and_evaluate(training_spec['estimator'],\n training_spec['train_spec'],\n training_spec['eval_spec'])\n tf.logging.info('Training complete. Model written to %s',\n serving_model_dir)\n\n # Export an eval savedmodel for TFMA\n tf.logging.info('Exporting eval_savedmodel for TFMA.')\n tfma.export.export_eval_savedmodel(\n estimator=training_spec['estimator'],\n export_dir_base=eval_model_dir,\n eval_input_receiver_fn=training_spec['eval_input_receiver_fn'])\n\n tf.logging.info('Exported eval_savedmodel to %s.', eval_model_dir)\n", "path": "tfx/components/trainer/executor.py"}]} | 2,992 | 154 |
gh_patches_debug_14679 | rasdani/github-patches | git_diff | deepset-ai__haystack-7972 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add Distribution-based rank fusion in JoinDocuments
**Is your feature request related to a problem? Please describe.**
Add [Distribution-based rank fusion](https://medium.com/plain-simple-software/distribution-based-score-fusion-dbsf-a-new-approach-to-vector-search-ranking-f87c37488b18) in JoinDocuments
**Describe the solution you'd like**
```
def _distribution_based_rank_fusion(self, document_lists):
"""
Merge multiple lists of Documents and assign scores based on Distribution-Based Score Fusion.
(https://medium.com/plain-simple-software/distribution-based-score-fusion-dbsf-a-new-approach-to-vector-search-ranking-f87c37488b18)
If a Document is in more than one retriever, the sone with the highest score is used.
"""
for documents in document_lists:
scores_list = []
for doc in documents:
scores_list.append(doc.score)
mean_score = sum(scores_list) / len(scores_list)
std_dev = (
sum((x - mean_score) ** 2 for x in scores_list) / len(scores_list)
) ** 0.5
min_score = mean_score - 3 * std_dev
max_score = mean_score + 3 * std_dev
for doc in documents:
doc.score = (doc.score - min_score) / (max_score - min_score)
output = self._concatenate(document_lists=document_lists)
return output
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `haystack/components/joiners/document_joiner.py`
Content:
```
1 # SPDX-FileCopyrightText: 2022-present deepset GmbH <[email protected]>
2 #
3 # SPDX-License-Identifier: Apache-2.0
4
5 import itertools
6 from collections import defaultdict
7 from math import inf
8 from typing import List, Optional
9
10 from haystack import Document, component, logging
11 from haystack.core.component.types import Variadic
12
13 logger = logging.getLogger(__name__)
14
15
16 @component
17 class DocumentJoiner:
18 """
19 A component that joins multiple list of Documents into a single list.
20
21 It supports different joins modes:
22 - concatenate: Keeps the highest scored Document in case of duplicates.
23 - merge: Merge a calculate a weighted sum of the scores of duplicate Documents.
24 - reciprocal_rank_fusion: Merge and assign scores based on reciprocal rank fusion.
25 - distribution_based_rank_fusion: Merge and assign scores based on scores distribution in each retriever
26
27 Usage example:
28 ```python
29 document_store = InMemoryDocumentStore()
30 p = Pipeline()
31 p.add_component(instance=InMemoryBM25Retriever(document_store=document_store), name="bm25_retriever")
32 p.add_component(
33 instance=SentenceTransformersTextEmbedder(model="sentence-transformers/all-MiniLM-L6-v2"),
34 name="text_embedder",
35 )
36 p.add_component(instance=InMemoryEmbeddingRetriever(document_store=document_store), name="embedding_retriever")
37 p.add_component(instance=DocumentJoiner(), name="joiner")
38 p.connect("bm25_retriever", "joiner")
39 p.connect("embedding_retriever", "joiner")
40 p.connect("text_embedder", "embedding_retriever")
41 query = "What is the capital of France?"
42 p.run(data={"query": query})
43 ```
44 """
45
46 def __init__(
47 self,
48 join_mode: str = "concatenate",
49 weights: Optional[List[float]] = None,
50 top_k: Optional[int] = None,
51 sort_by_score: bool = True,
52 ):
53 """
54 Create an DocumentJoiner component.
55
56 :param join_mode:
57 Specifies the join mode to use. Available modes:
58 - `concatenate`
59 - `merge`
60 - `reciprocal_rank_fusion`
61 - `distribution_based_rank_fusion`
62 :param weights:
63 Weight for each list of Documents received, must have the same length as the number of inputs.
64 If `join_mode` is `concatenate` or `distribution_based_rank_fusion` this parameter is ignored.
65 :param top_k:
66 The maximum number of Documents to return.
67 :param sort_by_score:
68 If True sorts the Documents by score in descending order.
69 If a Document has no score, it is handled as if its score is -infinity.
70 """
71 if join_mode not in ["concatenate", "merge", "reciprocal_rank_fusion", "distribution_based_rank_fusion"]:
72 raise ValueError(f"DocumentJoiner component does not support '{join_mode}' join_mode.")
73 self.join_mode = join_mode
74 self.weights = [float(i) / sum(weights) for i in weights] if weights else None
75 self.top_k = top_k
76 self.sort_by_score = sort_by_score
77
78 @component.output_types(documents=List[Document])
79 def run(self, documents: Variadic[List[Document]], top_k: Optional[int] = None):
80 """
81 Joins multiple lists of Documents into a single list depending on the `join_mode` parameter.
82
83 :param documents:
84 List of list of Documents to be merged.
85 :param top_k:
86 The maximum number of Documents to return. Overrides the instance's `top_k` if provided.
87
88 :returns:
89 A dictionary with the following keys:
90 - `documents`: Merged list of Documents
91 """
92 output_documents = []
93
94 documents = list(documents)
95 if self.join_mode == "concatenate":
96 output_documents = self._concatenate(documents)
97 elif self.join_mode == "merge":
98 output_documents = self._merge(documents)
99 elif self.join_mode == "reciprocal_rank_fusion":
100 output_documents = self._reciprocal_rank_fusion(documents)
101 elif self.join_mode == "distribution_based_rank_fusion":
102 output_documents = self._distribution_based_rank_fusion(documents)
103
104 if self.sort_by_score:
105 output_documents = sorted(
106 output_documents, key=lambda doc: doc.score if doc.score is not None else -inf, reverse=True
107 )
108 if any(doc.score is None for doc in output_documents):
109 logger.info(
110 "Some of the Documents DocumentJoiner got have score=None. It was configured to sort Documents by "
111 "score, so those with score=None were sorted as if they had a score of -infinity."
112 )
113
114 if top_k:
115 output_documents = output_documents[:top_k]
116 elif self.top_k:
117 output_documents = output_documents[: self.top_k]
118
119 return {"documents": output_documents}
120
121 def _concatenate(self, document_lists: List[List[Document]]) -> List[Document]:
122 """
123 Concatenate multiple lists of Documents and return only the Document with the highest score for duplicates.
124 """
125 output = []
126 docs_per_id = defaultdict(list)
127 for doc in itertools.chain.from_iterable(document_lists):
128 docs_per_id[doc.id].append(doc)
129 for docs in docs_per_id.values():
130 doc_with_best_score = max(docs, key=lambda doc: doc.score if doc.score else -inf)
131 output.append(doc_with_best_score)
132 return output
133
134 def _merge(self, document_lists: List[List[Document]]) -> List[Document]:
135 """
136 Merge multiple lists of Documents and calculate a weighted sum of the scores of duplicate Documents.
137 """
138 scores_map: dict = defaultdict(int)
139 documents_map = {}
140 weights = self.weights if self.weights else [1 / len(document_lists)] * len(document_lists)
141
142 for documents, weight in zip(document_lists, weights):
143 for doc in documents:
144 scores_map[doc.id] += (doc.score if doc.score else 0) * weight
145 documents_map[doc.id] = doc
146
147 for doc in documents_map.values():
148 doc.score = scores_map[doc.id]
149
150 return list(documents_map.values())
151
152 def _reciprocal_rank_fusion(self, document_lists: List[List[Document]]) -> List[Document]:
153 """
154 Merge multiple lists of Documents and assign scores based on reciprocal rank fusion.
155
156 The constant k is set to 61 (60 was suggested by the original paper,
157 plus 1 as python lists are 0-based and the paper used 1-based ranking).
158 """
159 k = 61
160
161 scores_map: dict = defaultdict(int)
162 documents_map = {}
163 weights = self.weights if self.weights else [1 / len(document_lists)] * len(document_lists)
164
165 # Calculate weighted reciprocal rank fusion score
166 for documents, weight in zip(document_lists, weights):
167 for rank, doc in enumerate(documents):
168 scores_map[doc.id] += (weight * len(document_lists)) / (k + rank)
169 documents_map[doc.id] = doc
170
171 # Normalize scores. Note: len(results) / k is the maximum possible score,
172 # achieved by being ranked first in all doc lists with non-zero weight.
173 for _id in scores_map:
174 scores_map[_id] /= len(document_lists) / k
175
176 for doc in documents_map.values():
177 doc.score = scores_map[doc.id]
178
179 return list(documents_map.values())
180
181 def _distribution_based_rank_fusion(self, document_lists: List[List[Document]]) -> List[Document]:
182 """
183 Merge multiple lists of Documents and assign scores based on Distribution-Based Score Fusion.
184
185 (https://medium.com/plain-simple-software/distribution-based-score-fusion-dbsf-a-new-approach-to-vector-search-ranking-f87c37488b18)
186 If a Document is in more than one retriever, the one with the highest score is used.
187 """
188 for documents in document_lists:
189 scores_list = []
190
191 for doc in documents:
192 scores_list.append(doc.score if doc.score is not None else 0)
193
194 mean_score = sum(scores_list) / len(scores_list)
195 std_dev = (sum((x - mean_score) ** 2 for x in scores_list) / len(scores_list)) ** 0.5
196 min_score = mean_score - 3 * std_dev
197 max_score = mean_score + 3 * std_dev
198
199 for doc in documents:
200 doc.score = (doc.score - min_score) / (max_score - min_score)
201
202 output = self._concatenate(document_lists=document_lists)
203
204 return output
205
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/haystack/components/joiners/document_joiner.py b/haystack/components/joiners/document_joiner.py
--- a/haystack/components/joiners/document_joiner.py
+++ b/haystack/components/joiners/document_joiner.py
@@ -195,9 +195,11 @@
std_dev = (sum((x - mean_score) ** 2 for x in scores_list) / len(scores_list)) ** 0.5
min_score = mean_score - 3 * std_dev
max_score = mean_score + 3 * std_dev
+ delta_score = max_score - min_score
for doc in documents:
- doc.score = (doc.score - min_score) / (max_score - min_score)
+ doc.score = (doc.score - min_score) / delta_score if delta_score != 0.0 else 0.0
+ # if all docs have the same score delta_score is 0, the docs are uninformative for the query
output = self._concatenate(document_lists=document_lists)
| {"golden_diff": "diff --git a/haystack/components/joiners/document_joiner.py b/haystack/components/joiners/document_joiner.py\n--- a/haystack/components/joiners/document_joiner.py\n+++ b/haystack/components/joiners/document_joiner.py\n@@ -195,9 +195,11 @@\n std_dev = (sum((x - mean_score) ** 2 for x in scores_list) / len(scores_list)) ** 0.5\n min_score = mean_score - 3 * std_dev\n max_score = mean_score + 3 * std_dev\n+ delta_score = max_score - min_score\n \n for doc in documents:\n- doc.score = (doc.score - min_score) / (max_score - min_score)\n+ doc.score = (doc.score - min_score) / delta_score if delta_score != 0.0 else 0.0\n+ # if all docs have the same score delta_score is 0, the docs are uninformative for the query\n \n output = self._concatenate(document_lists=document_lists)\n", "issue": "Add Distribution-based rank fusion in JoinDocuments\n**Is your feature request related to a problem? Please describe.**\r\nAdd [Distribution-based rank fusion](https://medium.com/plain-simple-software/distribution-based-score-fusion-dbsf-a-new-approach-to-vector-search-ranking-f87c37488b18) in JoinDocuments\r\n\r\n**Describe the solution you'd like**\r\n```\r\ndef _distribution_based_rank_fusion(self, document_lists):\r\n \"\"\"\r\n Merge multiple lists of Documents and assign scores based on Distribution-Based Score Fusion.\r\n (https://medium.com/plain-simple-software/distribution-based-score-fusion-dbsf-a-new-approach-to-vector-search-ranking-f87c37488b18)\r\n\r\n If a Document is in more than one retriever, the sone with the highest score is used.\r\n \"\"\"\r\n for documents in document_lists:\r\n scores_list = []\r\n\r\n for doc in documents:\r\n scores_list.append(doc.score)\r\n\r\n mean_score = sum(scores_list) / len(scores_list)\r\n std_dev = (\r\n sum((x - mean_score) ** 2 for x in scores_list) / len(scores_list)\r\n ) ** 0.5\r\n min_score = mean_score - 3 * std_dev\r\n max_score = mean_score + 3 * std_dev\r\n\r\n for doc in documents:\r\n doc.score = (doc.score - min_score) / (max_score - min_score)\r\n\r\n output = self._concatenate(document_lists=document_lists)\r\n\r\n return output\r\n```\n", "before_files": [{"content": "# SPDX-FileCopyrightText: 2022-present deepset GmbH <[email protected]>\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport itertools\nfrom collections import defaultdict\nfrom math import inf\nfrom typing import List, Optional\n\nfrom haystack import Document, component, logging\nfrom haystack.core.component.types import Variadic\n\nlogger = logging.getLogger(__name__)\n\n\n@component\nclass DocumentJoiner:\n \"\"\"\n A component that joins multiple list of Documents into a single list.\n\n It supports different joins modes:\n - concatenate: Keeps the highest scored Document in case of duplicates.\n - merge: Merge a calculate a weighted sum of the scores of duplicate Documents.\n - reciprocal_rank_fusion: Merge and assign scores based on reciprocal rank fusion.\n - distribution_based_rank_fusion: Merge and assign scores based on scores distribution in each retriever\n\n Usage example:\n ```python\n document_store = InMemoryDocumentStore()\n p = Pipeline()\n p.add_component(instance=InMemoryBM25Retriever(document_store=document_store), name=\"bm25_retriever\")\n p.add_component(\n instance=SentenceTransformersTextEmbedder(model=\"sentence-transformers/all-MiniLM-L6-v2\"),\n name=\"text_embedder\",\n )\n p.add_component(instance=InMemoryEmbeddingRetriever(document_store=document_store), name=\"embedding_retriever\")\n p.add_component(instance=DocumentJoiner(), name=\"joiner\")\n p.connect(\"bm25_retriever\", \"joiner\")\n p.connect(\"embedding_retriever\", \"joiner\")\n p.connect(\"text_embedder\", \"embedding_retriever\")\n query = \"What is the capital of France?\"\n p.run(data={\"query\": query})\n ```\n \"\"\"\n\n def __init__(\n self,\n join_mode: str = \"concatenate\",\n weights: Optional[List[float]] = None,\n top_k: Optional[int] = None,\n sort_by_score: bool = True,\n ):\n \"\"\"\n Create an DocumentJoiner component.\n\n :param join_mode:\n Specifies the join mode to use. Available modes:\n - `concatenate`\n - `merge`\n - `reciprocal_rank_fusion`\n - `distribution_based_rank_fusion`\n :param weights:\n Weight for each list of Documents received, must have the same length as the number of inputs.\n If `join_mode` is `concatenate` or `distribution_based_rank_fusion` this parameter is ignored.\n :param top_k:\n The maximum number of Documents to return.\n :param sort_by_score:\n If True sorts the Documents by score in descending order.\n If a Document has no score, it is handled as if its score is -infinity.\n \"\"\"\n if join_mode not in [\"concatenate\", \"merge\", \"reciprocal_rank_fusion\", \"distribution_based_rank_fusion\"]:\n raise ValueError(f\"DocumentJoiner component does not support '{join_mode}' join_mode.\")\n self.join_mode = join_mode\n self.weights = [float(i) / sum(weights) for i in weights] if weights else None\n self.top_k = top_k\n self.sort_by_score = sort_by_score\n\n @component.output_types(documents=List[Document])\n def run(self, documents: Variadic[List[Document]], top_k: Optional[int] = None):\n \"\"\"\n Joins multiple lists of Documents into a single list depending on the `join_mode` parameter.\n\n :param documents:\n List of list of Documents to be merged.\n :param top_k:\n The maximum number of Documents to return. Overrides the instance's `top_k` if provided.\n\n :returns:\n A dictionary with the following keys:\n - `documents`: Merged list of Documents\n \"\"\"\n output_documents = []\n\n documents = list(documents)\n if self.join_mode == \"concatenate\":\n output_documents = self._concatenate(documents)\n elif self.join_mode == \"merge\":\n output_documents = self._merge(documents)\n elif self.join_mode == \"reciprocal_rank_fusion\":\n output_documents = self._reciprocal_rank_fusion(documents)\n elif self.join_mode == \"distribution_based_rank_fusion\":\n output_documents = self._distribution_based_rank_fusion(documents)\n\n if self.sort_by_score:\n output_documents = sorted(\n output_documents, key=lambda doc: doc.score if doc.score is not None else -inf, reverse=True\n )\n if any(doc.score is None for doc in output_documents):\n logger.info(\n \"Some of the Documents DocumentJoiner got have score=None. It was configured to sort Documents by \"\n \"score, so those with score=None were sorted as if they had a score of -infinity.\"\n )\n\n if top_k:\n output_documents = output_documents[:top_k]\n elif self.top_k:\n output_documents = output_documents[: self.top_k]\n\n return {\"documents\": output_documents}\n\n def _concatenate(self, document_lists: List[List[Document]]) -> List[Document]:\n \"\"\"\n Concatenate multiple lists of Documents and return only the Document with the highest score for duplicates.\n \"\"\"\n output = []\n docs_per_id = defaultdict(list)\n for doc in itertools.chain.from_iterable(document_lists):\n docs_per_id[doc.id].append(doc)\n for docs in docs_per_id.values():\n doc_with_best_score = max(docs, key=lambda doc: doc.score if doc.score else -inf)\n output.append(doc_with_best_score)\n return output\n\n def _merge(self, document_lists: List[List[Document]]) -> List[Document]:\n \"\"\"\n Merge multiple lists of Documents and calculate a weighted sum of the scores of duplicate Documents.\n \"\"\"\n scores_map: dict = defaultdict(int)\n documents_map = {}\n weights = self.weights if self.weights else [1 / len(document_lists)] * len(document_lists)\n\n for documents, weight in zip(document_lists, weights):\n for doc in documents:\n scores_map[doc.id] += (doc.score if doc.score else 0) * weight\n documents_map[doc.id] = doc\n\n for doc in documents_map.values():\n doc.score = scores_map[doc.id]\n\n return list(documents_map.values())\n\n def _reciprocal_rank_fusion(self, document_lists: List[List[Document]]) -> List[Document]:\n \"\"\"\n Merge multiple lists of Documents and assign scores based on reciprocal rank fusion.\n\n The constant k is set to 61 (60 was suggested by the original paper,\n plus 1 as python lists are 0-based and the paper used 1-based ranking).\n \"\"\"\n k = 61\n\n scores_map: dict = defaultdict(int)\n documents_map = {}\n weights = self.weights if self.weights else [1 / len(document_lists)] * len(document_lists)\n\n # Calculate weighted reciprocal rank fusion score\n for documents, weight in zip(document_lists, weights):\n for rank, doc in enumerate(documents):\n scores_map[doc.id] += (weight * len(document_lists)) / (k + rank)\n documents_map[doc.id] = doc\n\n # Normalize scores. Note: len(results) / k is the maximum possible score,\n # achieved by being ranked first in all doc lists with non-zero weight.\n for _id in scores_map:\n scores_map[_id] /= len(document_lists) / k\n\n for doc in documents_map.values():\n doc.score = scores_map[doc.id]\n\n return list(documents_map.values())\n\n def _distribution_based_rank_fusion(self, document_lists: List[List[Document]]) -> List[Document]:\n \"\"\"\n Merge multiple lists of Documents and assign scores based on Distribution-Based Score Fusion.\n\n (https://medium.com/plain-simple-software/distribution-based-score-fusion-dbsf-a-new-approach-to-vector-search-ranking-f87c37488b18)\n If a Document is in more than one retriever, the one with the highest score is used.\n \"\"\"\n for documents in document_lists:\n scores_list = []\n\n for doc in documents:\n scores_list.append(doc.score if doc.score is not None else 0)\n\n mean_score = sum(scores_list) / len(scores_list)\n std_dev = (sum((x - mean_score) ** 2 for x in scores_list) / len(scores_list)) ** 0.5\n min_score = mean_score - 3 * std_dev\n max_score = mean_score + 3 * std_dev\n\n for doc in documents:\n doc.score = (doc.score - min_score) / (max_score - min_score)\n\n output = self._concatenate(document_lists=document_lists)\n\n return output\n", "path": "haystack/components/joiners/document_joiner.py"}], "after_files": [{"content": "# SPDX-FileCopyrightText: 2022-present deepset GmbH <[email protected]>\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport itertools\nfrom collections import defaultdict\nfrom math import inf\nfrom typing import List, Optional\n\nfrom haystack import Document, component, logging\nfrom haystack.core.component.types import Variadic\n\nlogger = logging.getLogger(__name__)\n\n\n@component\nclass DocumentJoiner:\n \"\"\"\n A component that joins multiple list of Documents into a single list.\n\n It supports different joins modes:\n - concatenate: Keeps the highest scored Document in case of duplicates.\n - merge: Merge a calculate a weighted sum of the scores of duplicate Documents.\n - reciprocal_rank_fusion: Merge and assign scores based on reciprocal rank fusion.\n - distribution_based_rank_fusion: Merge and assign scores based on scores distribution in each retriever\n\n Usage example:\n ```python\n document_store = InMemoryDocumentStore()\n p = Pipeline()\n p.add_component(instance=InMemoryBM25Retriever(document_store=document_store), name=\"bm25_retriever\")\n p.add_component(\n instance=SentenceTransformersTextEmbedder(model=\"sentence-transformers/all-MiniLM-L6-v2\"),\n name=\"text_embedder\",\n )\n p.add_component(instance=InMemoryEmbeddingRetriever(document_store=document_store), name=\"embedding_retriever\")\n p.add_component(instance=DocumentJoiner(), name=\"joiner\")\n p.connect(\"bm25_retriever\", \"joiner\")\n p.connect(\"embedding_retriever\", \"joiner\")\n p.connect(\"text_embedder\", \"embedding_retriever\")\n query = \"What is the capital of France?\"\n p.run(data={\"query\": query})\n ```\n \"\"\"\n\n def __init__(\n self,\n join_mode: str = \"concatenate\",\n weights: Optional[List[float]] = None,\n top_k: Optional[int] = None,\n sort_by_score: bool = True,\n ):\n \"\"\"\n Create an DocumentJoiner component.\n\n :param join_mode:\n Specifies the join mode to use. Available modes:\n - `concatenate`\n - `merge`\n - `reciprocal_rank_fusion`\n - `distribution_based_rank_fusion`\n :param weights:\n Weight for each list of Documents received, must have the same length as the number of inputs.\n If `join_mode` is `concatenate` or `distribution_based_rank_fusion` this parameter is ignored.\n :param top_k:\n The maximum number of Documents to return.\n :param sort_by_score:\n If True sorts the Documents by score in descending order.\n If a Document has no score, it is handled as if its score is -infinity.\n \"\"\"\n if join_mode not in [\"concatenate\", \"merge\", \"reciprocal_rank_fusion\", \"distribution_based_rank_fusion\"]:\n raise ValueError(f\"DocumentJoiner component does not support '{join_mode}' join_mode.\")\n self.join_mode = join_mode\n self.weights = [float(i) / sum(weights) for i in weights] if weights else None\n self.top_k = top_k\n self.sort_by_score = sort_by_score\n\n @component.output_types(documents=List[Document])\n def run(self, documents: Variadic[List[Document]], top_k: Optional[int] = None):\n \"\"\"\n Joins multiple lists of Documents into a single list depending on the `join_mode` parameter.\n\n :param documents:\n List of list of Documents to be merged.\n :param top_k:\n The maximum number of Documents to return. Overrides the instance's `top_k` if provided.\n\n :returns:\n A dictionary with the following keys:\n - `documents`: Merged list of Documents\n \"\"\"\n output_documents = []\n\n documents = list(documents)\n if self.join_mode == \"concatenate\":\n output_documents = self._concatenate(documents)\n elif self.join_mode == \"merge\":\n output_documents = self._merge(documents)\n elif self.join_mode == \"reciprocal_rank_fusion\":\n output_documents = self._reciprocal_rank_fusion(documents)\n elif self.join_mode == \"distribution_based_rank_fusion\":\n output_documents = self._distribution_based_rank_fusion(documents)\n\n if self.sort_by_score:\n output_documents = sorted(\n output_documents, key=lambda doc: doc.score if doc.score is not None else -inf, reverse=True\n )\n if any(doc.score is None for doc in output_documents):\n logger.info(\n \"Some of the Documents DocumentJoiner got have score=None. It was configured to sort Documents by \"\n \"score, so those with score=None were sorted as if they had a score of -infinity.\"\n )\n\n if top_k:\n output_documents = output_documents[:top_k]\n elif self.top_k:\n output_documents = output_documents[: self.top_k]\n\n return {\"documents\": output_documents}\n\n def _concatenate(self, document_lists: List[List[Document]]) -> List[Document]:\n \"\"\"\n Concatenate multiple lists of Documents and return only the Document with the highest score for duplicates.\n \"\"\"\n output = []\n docs_per_id = defaultdict(list)\n for doc in itertools.chain.from_iterable(document_lists):\n docs_per_id[doc.id].append(doc)\n for docs in docs_per_id.values():\n doc_with_best_score = max(docs, key=lambda doc: doc.score if doc.score else -inf)\n output.append(doc_with_best_score)\n return output\n\n def _merge(self, document_lists: List[List[Document]]) -> List[Document]:\n \"\"\"\n Merge multiple lists of Documents and calculate a weighted sum of the scores of duplicate Documents.\n \"\"\"\n scores_map: dict = defaultdict(int)\n documents_map = {}\n weights = self.weights if self.weights else [1 / len(document_lists)] * len(document_lists)\n\n for documents, weight in zip(document_lists, weights):\n for doc in documents:\n scores_map[doc.id] += (doc.score if doc.score else 0) * weight\n documents_map[doc.id] = doc\n\n for doc in documents_map.values():\n doc.score = scores_map[doc.id]\n\n return list(documents_map.values())\n\n def _reciprocal_rank_fusion(self, document_lists: List[List[Document]]) -> List[Document]:\n \"\"\"\n Merge multiple lists of Documents and assign scores based on reciprocal rank fusion.\n\n The constant k is set to 61 (60 was suggested by the original paper,\n plus 1 as python lists are 0-based and the paper used 1-based ranking).\n \"\"\"\n k = 61\n\n scores_map: dict = defaultdict(int)\n documents_map = {}\n weights = self.weights if self.weights else [1 / len(document_lists)] * len(document_lists)\n\n # Calculate weighted reciprocal rank fusion score\n for documents, weight in zip(document_lists, weights):\n for rank, doc in enumerate(documents):\n scores_map[doc.id] += (weight * len(document_lists)) / (k + rank)\n documents_map[doc.id] = doc\n\n # Normalize scores. Note: len(results) / k is the maximum possible score,\n # achieved by being ranked first in all doc lists with non-zero weight.\n for _id in scores_map:\n scores_map[_id] /= len(document_lists) / k\n\n for doc in documents_map.values():\n doc.score = scores_map[doc.id]\n\n return list(documents_map.values())\n\n def _distribution_based_rank_fusion(self, document_lists: List[List[Document]]) -> List[Document]:\n \"\"\"\n Merge multiple lists of Documents and assign scores based on Distribution-Based Score Fusion.\n\n (https://medium.com/plain-simple-software/distribution-based-score-fusion-dbsf-a-new-approach-to-vector-search-ranking-f87c37488b18)\n If a Document is in more than one retriever, the one with the highest score is used.\n \"\"\"\n for documents in document_lists:\n scores_list = []\n\n for doc in documents:\n scores_list.append(doc.score if doc.score is not None else 0)\n\n mean_score = sum(scores_list) / len(scores_list)\n std_dev = (sum((x - mean_score) ** 2 for x in scores_list) / len(scores_list)) ** 0.5\n min_score = mean_score - 3 * std_dev\n max_score = mean_score + 3 * std_dev\n delta_score = max_score - min_score\n\n for doc in documents:\n doc.score = (doc.score - min_score) / delta_score if delta_score != 0.0 else 0.0\n # if all docs have the same score delta_score is 0, the docs are uninformative for the query\n\n output = self._concatenate(document_lists=document_lists)\n\n return output\n", "path": "haystack/components/joiners/document_joiner.py"}]} | 2,966 | 233 |
gh_patches_debug_24424 | rasdani/github-patches | git_diff | cisagov__manage.get.gov-2112 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Domain request table: Capture the "CISA region" a `domain request` is in
### Issue description
_As an analyst
I want to know which [CISA region](https://www.cisa.gov/about/regions) a request is in
so that if my research doesn't determine a request is authentic/eligible, I can know which region I need to reach out to_
Each US state/territory is in a "CISA region". When we need additional assistance in verification, we can reach out to regional contacts. Having regional designations in-registrar means we don't have to look it up, saving analyst time.
### Acceptance criteria
- [ ] For **non-federal requests**, the domain request detail page shows the CISA Region number based on the state abbreviation code of the organization address in the request. (See markup in "Additional Context")
- [ ] For **federal requests**, the domain request detail page shows "N/A" for the CISA region number, and no table lookup is necessary.
- [ ] The CISA region is not displayed on any user-facing views.
### Additional context
Please make this reusable by domain information as well. consider domain helper or other utility that can then take in the org address state/territory abbreviation and returns the number for the region. Regions and state abbreviations haven't changed in a long time and aren't expected to change in the future, they do not need to be in a table and a simple dictionary lookup (while ugly) is probably the simplest solution.
Based on the official two-letter state/territory abbreviation from the organization address, use the data in the following table to determine the region number:
| Region | Locales |
| ------- | ----- |
| 1 |Connecticut, Maine, Massachusetts, New Hampshire, Rhode Island, Vermont|
| 2 |New Jersey, New York, Puerto Rico, U.S. Virgin Islands|
|3| Delaware, District of Columbia, Maryland, Pennsylvania, Virginia, West Virginia|
|4| Alabama, Florida, Georgia, Kentucky, Mississippi, North Carolina, South Carolina, Tennessee|
|5| Illinois, Indiana, Michigan, Minnesota, Ohio, Wisconsin|
|6| Arkansas, Louisiana, New Mexico, Oklahoma, Texas|
|7|Iowa, Kansas, Missouri, Nebraska|
|8|Colorado, Montana, North Dakota, South Dakota, Utah, Wyoming|
|9|Arizona, California, Hawaii, Nevada, Guam, American Samoa, Commonwealth of the Northern Mariana Islands|
|10|Alaska, Idaho, Oregon, Washington|
Click Image to open Miro:
[](https://miro.com/app/board/uXjVKNvtde0=/?moveToWidget=3458764587423408611&cot=14)
### Links to other issues
Blocks #2095
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/registrar/templatetags/custom_filters.py`
Content:
```
1 import logging
2 from django import template
3 import re
4 from registrar.models.domain_request import DomainRequest
5
6 register = template.Library()
7 logger = logging.getLogger(__name__)
8
9
10 @register.filter(name="extract_value")
11 def extract_value(html_input):
12 match = re.search(r'value="([^"]*)"', html_input)
13 if match:
14 return match.group(1)
15 return ""
16
17
18 @register.filter
19 def extract_a_text(value):
20 # Use regex to extract the text within the <a> tag
21 pattern = r"<a\b[^>]*>(.*?)</a>"
22 match = re.search(pattern, value)
23 if match:
24 extracted_text = match.group(1)
25 else:
26 extracted_text = ""
27
28 return extracted_text
29
30
31 @register.filter
32 def find_index(haystack, needle):
33 try:
34 return haystack.index(needle)
35 except ValueError:
36 return -1
37
38
39 @register.filter
40 def slice_after(value, substring):
41 index = value.find(substring)
42 if index != -1:
43 result = value[index + len(substring) :]
44 return result
45 return value
46
47
48 @register.filter
49 def contains_checkbox(html_list):
50 for html_string in html_list:
51 if re.search(r'<input[^>]*type="checkbox"', html_string):
52 return True
53 return False
54
55
56 @register.filter
57 def get_organization_long_name(generic_org_type):
58 organization_choices_dict = dict(DomainRequest.OrganizationChoicesVerbose.choices)
59 long_form_type = organization_choices_dict[generic_org_type]
60 if long_form_type is None:
61 logger.error("Organization type error, triggered by a template's custom filter")
62 return "Error"
63
64 return long_form_type
65
66
67 @register.filter(name="has_permission")
68 def has_permission(user, permission):
69 return user.has_perm(permission)
70
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/registrar/templatetags/custom_filters.py b/src/registrar/templatetags/custom_filters.py
--- a/src/registrar/templatetags/custom_filters.py
+++ b/src/registrar/templatetags/custom_filters.py
@@ -67,3 +67,69 @@
@register.filter(name="has_permission")
def has_permission(user, permission):
return user.has_perm(permission)
+
+
[email protected]
+def get_region(state):
+ if state and isinstance(state, str):
+ regions = {
+ "CT": 1,
+ "ME": 1,
+ "MA": 1,
+ "NH": 1,
+ "RI": 1,
+ "VT": 1,
+ "NJ": 2,
+ "NY": 2,
+ "PR": 2,
+ "VI": 2,
+ "DE": 3,
+ "DC": 3,
+ "MD": 3,
+ "PA": 3,
+ "VA": 3,
+ "WV": 3,
+ "AL": 4,
+ "FL": 4,
+ "GA": 4,
+ "KY": 4,
+ "MS": 4,
+ "NC": 4,
+ "SC": 4,
+ "TN": 4,
+ "IL": 5,
+ "IN": 5,
+ "MI": 5,
+ "MN": 5,
+ "OH": 5,
+ "WI": 5,
+ "AR": 6,
+ "LA": 6,
+ "NM": 6,
+ "OK": 6,
+ "TX": 6,
+ "IA": 7,
+ "KS": 7,
+ "MO": 7,
+ "NE": 7,
+ "CO": 8,
+ "MT": 8,
+ "ND": 8,
+ "SD": 8,
+ "UT": 8,
+ "WY": 8,
+ "AZ": 9,
+ "CA": 9,
+ "HI": 9,
+ "NV": 9,
+ "GU": 9,
+ "AS": 9,
+ "MP": 9,
+ "AK": 10,
+ "ID": 10,
+ "OR": 10,
+ "WA": 10,
+ }
+ return regions.get(state.upper(), "N/A")
+ else:
+ return None
| {"golden_diff": "diff --git a/src/registrar/templatetags/custom_filters.py b/src/registrar/templatetags/custom_filters.py\n--- a/src/registrar/templatetags/custom_filters.py\n+++ b/src/registrar/templatetags/custom_filters.py\n@@ -67,3 +67,69 @@\n @register.filter(name=\"has_permission\")\n def has_permission(user, permission):\n return user.has_perm(permission)\n+\n+\[email protected]\n+def get_region(state):\n+ if state and isinstance(state, str):\n+ regions = {\n+ \"CT\": 1,\n+ \"ME\": 1,\n+ \"MA\": 1,\n+ \"NH\": 1,\n+ \"RI\": 1,\n+ \"VT\": 1,\n+ \"NJ\": 2,\n+ \"NY\": 2,\n+ \"PR\": 2,\n+ \"VI\": 2,\n+ \"DE\": 3,\n+ \"DC\": 3,\n+ \"MD\": 3,\n+ \"PA\": 3,\n+ \"VA\": 3,\n+ \"WV\": 3,\n+ \"AL\": 4,\n+ \"FL\": 4,\n+ \"GA\": 4,\n+ \"KY\": 4,\n+ \"MS\": 4,\n+ \"NC\": 4,\n+ \"SC\": 4,\n+ \"TN\": 4,\n+ \"IL\": 5,\n+ \"IN\": 5,\n+ \"MI\": 5,\n+ \"MN\": 5,\n+ \"OH\": 5,\n+ \"WI\": 5,\n+ \"AR\": 6,\n+ \"LA\": 6,\n+ \"NM\": 6,\n+ \"OK\": 6,\n+ \"TX\": 6,\n+ \"IA\": 7,\n+ \"KS\": 7,\n+ \"MO\": 7,\n+ \"NE\": 7,\n+ \"CO\": 8,\n+ \"MT\": 8,\n+ \"ND\": 8,\n+ \"SD\": 8,\n+ \"UT\": 8,\n+ \"WY\": 8,\n+ \"AZ\": 9,\n+ \"CA\": 9,\n+ \"HI\": 9,\n+ \"NV\": 9,\n+ \"GU\": 9,\n+ \"AS\": 9,\n+ \"MP\": 9,\n+ \"AK\": 10,\n+ \"ID\": 10,\n+ \"OR\": 10,\n+ \"WA\": 10,\n+ }\n+ return regions.get(state.upper(), \"N/A\")\n+ else:\n+ return None\n", "issue": "Domain request table: Capture the \"CISA region\" a `domain request` is in\n### Issue description\n\n_As an analyst\nI want to know which [CISA region](https://www.cisa.gov/about/regions) a request is in \nso that if my research doesn't determine a request is authentic/eligible, I can know which region I need to reach out to_\n\nEach US state/territory is in a \"CISA region\". When we need additional assistance in verification, we can reach out to regional contacts. Having regional designations in-registrar means we don't have to look it up, saving analyst time.\n\n### Acceptance criteria\n\n- [ ] For **non-federal requests**, the domain request detail page shows the CISA Region number based on the state abbreviation code of the organization address in the request. (See markup in \"Additional Context\")\n- [ ] For **federal requests**, the domain request detail page shows \"N/A\" for the CISA region number, and no table lookup is necessary.\n- [ ] The CISA region is not displayed on any user-facing views.\n\n### Additional context\nPlease make this reusable by domain information as well. consider domain helper or other utility that can then take in the org address state/territory abbreviation and returns the number for the region. Regions and state abbreviations haven't changed in a long time and aren't expected to change in the future, they do not need to be in a table and a simple dictionary lookup (while ugly) is probably the simplest solution.\n\nBased on the official two-letter state/territory abbreviation from the organization address, use the data in the following table to determine the region number:\n\n| Region | Locales |\n| ------- | ----- |\n| 1 |Connecticut, Maine, Massachusetts, New Hampshire, Rhode Island, Vermont|\n| 2 |New Jersey, New York, Puerto Rico, U.S. Virgin Islands|\n|3| Delaware, District of Columbia, Maryland, Pennsylvania, Virginia, West Virginia|\n|4| Alabama, Florida, Georgia, Kentucky, Mississippi, North Carolina, South Carolina, Tennessee|\n|5| Illinois, Indiana, Michigan, Minnesota, Ohio, Wisconsin|\n|6| Arkansas, Louisiana, New Mexico, Oklahoma, Texas|\n|7|Iowa, Kansas, Missouri, Nebraska|\n|8|Colorado, Montana, North Dakota, South Dakota, Utah, Wyoming|\n|9|Arizona, California, Hawaii, Nevada, Guam, American Samoa, Commonwealth of the Northern Mariana Islands|\n|10|Alaska, Idaho, Oregon, Washington|\n\nClick Image to open Miro:\n[](https://miro.com/app/board/uXjVKNvtde0=/?moveToWidget=3458764587423408611&cot=14)\n\n### Links to other issues\n\nBlocks #2095 \n", "before_files": [{"content": "import logging\nfrom django import template\nimport re\nfrom registrar.models.domain_request import DomainRequest\n\nregister = template.Library()\nlogger = logging.getLogger(__name__)\n\n\[email protected](name=\"extract_value\")\ndef extract_value(html_input):\n match = re.search(r'value=\"([^\"]*)\"', html_input)\n if match:\n return match.group(1)\n return \"\"\n\n\[email protected]\ndef extract_a_text(value):\n # Use regex to extract the text within the <a> tag\n pattern = r\"<a\\b[^>]*>(.*?)</a>\"\n match = re.search(pattern, value)\n if match:\n extracted_text = match.group(1)\n else:\n extracted_text = \"\"\n\n return extracted_text\n\n\[email protected]\ndef find_index(haystack, needle):\n try:\n return haystack.index(needle)\n except ValueError:\n return -1\n\n\[email protected]\ndef slice_after(value, substring):\n index = value.find(substring)\n if index != -1:\n result = value[index + len(substring) :]\n return result\n return value\n\n\[email protected]\ndef contains_checkbox(html_list):\n for html_string in html_list:\n if re.search(r'<input[^>]*type=\"checkbox\"', html_string):\n return True\n return False\n\n\[email protected]\ndef get_organization_long_name(generic_org_type):\n organization_choices_dict = dict(DomainRequest.OrganizationChoicesVerbose.choices)\n long_form_type = organization_choices_dict[generic_org_type]\n if long_form_type is None:\n logger.error(\"Organization type error, triggered by a template's custom filter\")\n return \"Error\"\n\n return long_form_type\n\n\[email protected](name=\"has_permission\")\ndef has_permission(user, permission):\n return user.has_perm(permission)\n", "path": "src/registrar/templatetags/custom_filters.py"}], "after_files": [{"content": "import logging\nfrom django import template\nimport re\nfrom registrar.models.domain_request import DomainRequest\n\nregister = template.Library()\nlogger = logging.getLogger(__name__)\n\n\[email protected](name=\"extract_value\")\ndef extract_value(html_input):\n match = re.search(r'value=\"([^\"]*)\"', html_input)\n if match:\n return match.group(1)\n return \"\"\n\n\[email protected]\ndef extract_a_text(value):\n # Use regex to extract the text within the <a> tag\n pattern = r\"<a\\b[^>]*>(.*?)</a>\"\n match = re.search(pattern, value)\n if match:\n extracted_text = match.group(1)\n else:\n extracted_text = \"\"\n\n return extracted_text\n\n\[email protected]\ndef find_index(haystack, needle):\n try:\n return haystack.index(needle)\n except ValueError:\n return -1\n\n\[email protected]\ndef slice_after(value, substring):\n index = value.find(substring)\n if index != -1:\n result = value[index + len(substring) :]\n return result\n return value\n\n\[email protected]\ndef contains_checkbox(html_list):\n for html_string in html_list:\n if re.search(r'<input[^>]*type=\"checkbox\"', html_string):\n return True\n return False\n\n\[email protected]\ndef get_organization_long_name(generic_org_type):\n organization_choices_dict = dict(DomainRequest.OrganizationChoicesVerbose.choices)\n long_form_type = organization_choices_dict[generic_org_type]\n if long_form_type is None:\n logger.error(\"Organization type error, triggered by a template's custom filter\")\n return \"Error\"\n\n return long_form_type\n\n\[email protected](name=\"has_permission\")\ndef has_permission(user, permission):\n return user.has_perm(permission)\n\n\[email protected]\ndef get_region(state):\n if state and isinstance(state, str):\n regions = {\n \"CT\": 1,\n \"ME\": 1,\n \"MA\": 1,\n \"NH\": 1,\n \"RI\": 1,\n \"VT\": 1,\n \"NJ\": 2,\n \"NY\": 2,\n \"PR\": 2,\n \"VI\": 2,\n \"DE\": 3,\n \"DC\": 3,\n \"MD\": 3,\n \"PA\": 3,\n \"VA\": 3,\n \"WV\": 3,\n \"AL\": 4,\n \"FL\": 4,\n \"GA\": 4,\n \"KY\": 4,\n \"MS\": 4,\n \"NC\": 4,\n \"SC\": 4,\n \"TN\": 4,\n \"IL\": 5,\n \"IN\": 5,\n \"MI\": 5,\n \"MN\": 5,\n \"OH\": 5,\n \"WI\": 5,\n \"AR\": 6,\n \"LA\": 6,\n \"NM\": 6,\n \"OK\": 6,\n \"TX\": 6,\n \"IA\": 7,\n \"KS\": 7,\n \"MO\": 7,\n \"NE\": 7,\n \"CO\": 8,\n \"MT\": 8,\n \"ND\": 8,\n \"SD\": 8,\n \"UT\": 8,\n \"WY\": 8,\n \"AZ\": 9,\n \"CA\": 9,\n \"HI\": 9,\n \"NV\": 9,\n \"GU\": 9,\n \"AS\": 9,\n \"MP\": 9,\n \"AK\": 10,\n \"ID\": 10,\n \"OR\": 10,\n \"WA\": 10,\n }\n return regions.get(state.upper(), \"N/A\")\n else:\n return None\n", "path": "src/registrar/templatetags/custom_filters.py"}]} | 1,430 | 597 |
gh_patches_debug_4893 | rasdani/github-patches | git_diff | Rapptz__discord.py-6511 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Context.channel
### Summary
The type for [Context.channel](https://discordpy.readthedocs.io/en/latest/api.html?highlight=message%20channel#discord.Message.channel) seems to be incorrect in the doc
### Reproduction Steps
See [Context.channel](https://discordpy.readthedocs.io/en/latest/ext/commands/api.html?highlight=context%20channel#discord.ext.commands.Context.channel)
The type shown here is `TextChannel`, although this could also be a `DMChannel`.
### Expected Results
The type for Context.channel should be the same as [Message.channel](https://discordpy.readthedocs.io/en/latest/api.html?highlight=message%20channel#discord.Message.channel) which is `Union[abc.Messageable]`
### Checklist
- [X] I have searched the open issues for duplicates.
- [X] I have shown the entire traceback, if possible.
- [X] I have removed my token from display, if visible.
I was unsure which template to use, so I picked this one.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `discord/ext/commands/context.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 """
4 The MIT License (MIT)
5
6 Copyright (c) 2015-present Rapptz
7
8 Permission is hereby granted, free of charge, to any person obtaining a
9 copy of this software and associated documentation files (the "Software"),
10 to deal in the Software without restriction, including without limitation
11 the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 and/or sell copies of the Software, and to permit persons to whom the
13 Software is furnished to do so, subject to the following conditions:
14
15 The above copyright notice and this permission notice shall be included in
16 all copies or substantial portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23 FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
24 DEALINGS IN THE SOFTWARE.
25 """
26
27 import discord.abc
28 import discord.utils
29
30 class Context(discord.abc.Messageable):
31 r"""Represents the context in which a command is being invoked under.
32
33 This class contains a lot of meta data to help you understand more about
34 the invocation context. This class is not created manually and is instead
35 passed around to commands as the first parameter.
36
37 This class implements the :class:`~discord.abc.Messageable` ABC.
38
39 Attributes
40 -----------
41 message: :class:`.Message`
42 The message that triggered the command being executed.
43 bot: :class:`.Bot`
44 The bot that contains the command being executed.
45 args: :class:`list`
46 The list of transformed arguments that were passed into the command.
47 If this is accessed during the :func:`on_command_error` event
48 then this list could be incomplete.
49 kwargs: :class:`dict`
50 A dictionary of transformed arguments that were passed into the command.
51 Similar to :attr:`args`\, if this is accessed in the
52 :func:`on_command_error` event then this dict could be incomplete.
53 prefix: :class:`str`
54 The prefix that was used to invoke the command.
55 command: :class:`Command`
56 The command that is being invoked currently.
57 invoked_with: :class:`str`
58 The command name that triggered this invocation. Useful for finding out
59 which alias called the command.
60 invoked_parents: List[:class:`str`]
61 The command names of the parents that triggered this invocation. Useful for
62 finding out which aliases called the command.
63
64 For example in commands ``?a b c test``, the invoked parents are ``['a', 'b', 'c']``.
65
66 .. versionadded:: 1.7
67
68 invoked_subcommand: :class:`Command`
69 The subcommand that was invoked.
70 If no valid subcommand was invoked then this is equal to ``None``.
71 subcommand_passed: Optional[:class:`str`]
72 The string that was attempted to call a subcommand. This does not have
73 to point to a valid registered subcommand and could just point to a
74 nonsense string. If nothing was passed to attempt a call to a
75 subcommand then this is set to ``None``.
76 command_failed: :class:`bool`
77 A boolean that indicates if the command failed to be parsed, checked,
78 or invoked.
79 """
80
81 def __init__(self, **attrs):
82 self.message = attrs.pop('message', None)
83 self.bot = attrs.pop('bot', None)
84 self.args = attrs.pop('args', [])
85 self.kwargs = attrs.pop('kwargs', {})
86 self.prefix = attrs.pop('prefix')
87 self.command = attrs.pop('command', None)
88 self.view = attrs.pop('view', None)
89 self.invoked_with = attrs.pop('invoked_with', None)
90 self.invoked_parents = attrs.pop('invoked_parents', [])
91 self.invoked_subcommand = attrs.pop('invoked_subcommand', None)
92 self.subcommand_passed = attrs.pop('subcommand_passed', None)
93 self.command_failed = attrs.pop('command_failed', False)
94 self._state = self.message._state
95
96 async def invoke(self, *args, **kwargs):
97 r"""|coro|
98
99 Calls a command with the arguments given.
100
101 This is useful if you want to just call the callback that a
102 :class:`.Command` holds internally.
103
104 .. note::
105
106 This does not handle converters, checks, cooldowns, pre-invoke,
107 or after-invoke hooks in any matter. It calls the internal callback
108 directly as-if it was a regular function.
109
110 You must take care in passing the proper arguments when
111 using this function.
112
113 .. warning::
114
115 The first parameter passed **must** be the command being invoked.
116
117 Parameters
118 -----------
119 command: :class:`.Command`
120 The command that is going to be called.
121 \*args
122 The arguments to to use.
123 \*\*kwargs
124 The keyword arguments to use.
125
126 Raises
127 -------
128 TypeError
129 The command argument to invoke is missing.
130 """
131
132 try:
133 command = args[0]
134 except IndexError:
135 raise TypeError('Missing command to invoke.') from None
136
137 arguments = []
138 if command.cog is not None:
139 arguments.append(command.cog)
140
141 arguments.append(self)
142 arguments.extend(args[1:])
143
144 ret = await command.callback(*arguments, **kwargs)
145 return ret
146
147 async def reinvoke(self, *, call_hooks=False, restart=True):
148 """|coro|
149
150 Calls the command again.
151
152 This is similar to :meth:`~.Context.invoke` except that it bypasses
153 checks, cooldowns, and error handlers.
154
155 .. note::
156
157 If you want to bypass :exc:`.UserInputError` derived exceptions,
158 it is recommended to use the regular :meth:`~.Context.invoke`
159 as it will work more naturally. After all, this will end up
160 using the old arguments the user has used and will thus just
161 fail again.
162
163 Parameters
164 ------------
165 call_hooks: :class:`bool`
166 Whether to call the before and after invoke hooks.
167 restart: :class:`bool`
168 Whether to start the call chain from the very beginning
169 or where we left off (i.e. the command that caused the error).
170 The default is to start where we left off.
171
172 Raises
173 -------
174 ValueError
175 The context to reinvoke is not valid.
176 """
177 cmd = self.command
178 view = self.view
179 if cmd is None:
180 raise ValueError('This context is not valid.')
181
182 # some state to revert to when we're done
183 index, previous = view.index, view.previous
184 invoked_with = self.invoked_with
185 invoked_subcommand = self.invoked_subcommand
186 invoked_parents = self.invoked_parents
187 subcommand_passed = self.subcommand_passed
188
189 if restart:
190 to_call = cmd.root_parent or cmd
191 view.index = len(self.prefix)
192 view.previous = 0
193 self.invoked_parents = []
194 self.invoked_with = view.get_word() # advance to get the root command
195 else:
196 to_call = cmd
197
198 try:
199 await to_call.reinvoke(self, call_hooks=call_hooks)
200 finally:
201 self.command = cmd
202 view.index = index
203 view.previous = previous
204 self.invoked_with = invoked_with
205 self.invoked_subcommand = invoked_subcommand
206 self.invoked_parents = invoked_parents
207 self.subcommand_passed = subcommand_passed
208
209 @property
210 def valid(self):
211 """:class:`bool`: Checks if the invocation context is valid to be invoked with."""
212 return self.prefix is not None and self.command is not None
213
214 async def _get_channel(self):
215 return self.channel
216
217 @property
218 def cog(self):
219 """Optional[:class:`.Cog`]: Returns the cog associated with this context's command. None if it does not exist."""
220
221 if self.command is None:
222 return None
223 return self.command.cog
224
225 @discord.utils.cached_property
226 def guild(self):
227 """Optional[:class:`.Guild`]: Returns the guild associated with this context's command. None if not available."""
228 return self.message.guild
229
230 @discord.utils.cached_property
231 def channel(self):
232 """:class:`.TextChannel`:
233 Returns the channel associated with this context's command. Shorthand for :attr:`.Message.channel`.
234 """
235 return self.message.channel
236
237 @discord.utils.cached_property
238 def author(self):
239 """Union[:class:`~discord.User`, :class:`.Member`]:
240 Returns the author associated with this context's command. Shorthand for :attr:`.Message.author`
241 """
242 return self.message.author
243
244 @discord.utils.cached_property
245 def me(self):
246 """Union[:class:`.Member`, :class:`.ClientUser`]:
247 Similar to :attr:`.Guild.me` except it may return the :class:`.ClientUser` in private message contexts.
248 """
249 return self.guild.me if self.guild is not None else self.bot.user
250
251 @property
252 def voice_client(self):
253 r"""Optional[:class:`.VoiceProtocol`]: A shortcut to :attr:`.Guild.voice_client`\, if applicable."""
254 g = self.guild
255 return g.voice_client if g else None
256
257 async def send_help(self, *args):
258 """send_help(entity=<bot>)
259
260 |coro|
261
262 Shows the help command for the specified entity if given.
263 The entity can be a command or a cog.
264
265 If no entity is given, then it'll show help for the
266 entire bot.
267
268 If the entity is a string, then it looks up whether it's a
269 :class:`Cog` or a :class:`Command`.
270
271 .. note::
272
273 Due to the way this function works, instead of returning
274 something similar to :meth:`~.commands.HelpCommand.command_not_found`
275 this returns :class:`None` on bad input or no help command.
276
277 Parameters
278 ------------
279 entity: Optional[Union[:class:`Command`, :class:`Cog`, :class:`str`]]
280 The entity to show help for.
281
282 Returns
283 --------
284 Any
285 The result of the help command, if any.
286 """
287 from .core import Group, Command, wrap_callback
288 from .errors import CommandError
289
290 bot = self.bot
291 cmd = bot.help_command
292
293 if cmd is None:
294 return None
295
296 cmd = cmd.copy()
297 cmd.context = self
298 if len(args) == 0:
299 await cmd.prepare_help_command(self, None)
300 mapping = cmd.get_bot_mapping()
301 injected = wrap_callback(cmd.send_bot_help)
302 try:
303 return await injected(mapping)
304 except CommandError as e:
305 await cmd.on_help_command_error(self, e)
306 return None
307
308 entity = args[0]
309 if entity is None:
310 return None
311
312 if isinstance(entity, str):
313 entity = bot.get_cog(entity) or bot.get_command(entity)
314
315 try:
316 entity.qualified_name
317 except AttributeError:
318 # if we're here then it's not a cog, group, or command.
319 return None
320
321 await cmd.prepare_help_command(self, entity.qualified_name)
322
323 try:
324 if hasattr(entity, '__cog_commands__'):
325 injected = wrap_callback(cmd.send_cog_help)
326 return await injected(entity)
327 elif isinstance(entity, Group):
328 injected = wrap_callback(cmd.send_group_help)
329 return await injected(entity)
330 elif isinstance(entity, Command):
331 injected = wrap_callback(cmd.send_command_help)
332 return await injected(entity)
333 else:
334 return None
335 except CommandError as e:
336 await cmd.on_help_command_error(self, e)
337
338 async def reply(self, content=None, **kwargs):
339 return await self.message.reply(content, **kwargs)
340
341 reply.__doc__ = discord.Message.reply.__doc__
342
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/discord/ext/commands/context.py b/discord/ext/commands/context.py
--- a/discord/ext/commands/context.py
+++ b/discord/ext/commands/context.py
@@ -229,8 +229,8 @@
@discord.utils.cached_property
def channel(self):
- """:class:`.TextChannel`:
- Returns the channel associated with this context's command. Shorthand for :attr:`.Message.channel`.
+ """Union[:class:`.abc.Messageable`]: Returns the channel associated with this context's command.
+ Shorthand for :attr:`.Message.channel`.
"""
return self.message.channel
| {"golden_diff": "diff --git a/discord/ext/commands/context.py b/discord/ext/commands/context.py\n--- a/discord/ext/commands/context.py\n+++ b/discord/ext/commands/context.py\n@@ -229,8 +229,8 @@\n \n @discord.utils.cached_property\n def channel(self):\n- \"\"\":class:`.TextChannel`:\n- Returns the channel associated with this context's command. Shorthand for :attr:`.Message.channel`.\n+ \"\"\"Union[:class:`.abc.Messageable`]: Returns the channel associated with this context's command.\n+ Shorthand for :attr:`.Message.channel`.\n \"\"\"\n return self.message.channel\n", "issue": "Context.channel\n### Summary\r\n\r\nThe type for [Context.channel](https://discordpy.readthedocs.io/en/latest/api.html?highlight=message%20channel#discord.Message.channel) seems to be incorrect in the doc\r\n\r\n### Reproduction Steps\r\n\r\nSee [Context.channel](https://discordpy.readthedocs.io/en/latest/ext/commands/api.html?highlight=context%20channel#discord.ext.commands.Context.channel)\r\n\r\nThe type shown here is `TextChannel`, although this could also be a `DMChannel`.\r\n\r\n### Expected Results\r\n\r\nThe type for Context.channel should be the same as [Message.channel](https://discordpy.readthedocs.io/en/latest/api.html?highlight=message%20channel#discord.Message.channel) which is `Union[abc.Messageable]`\r\n\r\n\r\n### Checklist\r\n\r\n- [X] I have searched the open issues for duplicates.\r\n- [X] I have shown the entire traceback, if possible.\r\n- [X] I have removed my token from display, if visible.\r\n\r\nI was unsure which template to use, so I picked this one.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"\nThe MIT License (MIT)\n\nCopyright (c) 2015-present Rapptz\n\nPermission is hereby granted, free of charge, to any person obtaining a\ncopy of this software and associated documentation files (the \"Software\"),\nto deal in the Software without restriction, including without limitation\nthe rights to use, copy, modify, merge, publish, distribute, sublicense,\nand/or sell copies of the Software, and to permit persons to whom the\nSoftware is furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\nOR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\nFROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\nDEALINGS IN THE SOFTWARE.\n\"\"\"\n\nimport discord.abc\nimport discord.utils\n\nclass Context(discord.abc.Messageable):\n r\"\"\"Represents the context in which a command is being invoked under.\n\n This class contains a lot of meta data to help you understand more about\n the invocation context. This class is not created manually and is instead\n passed around to commands as the first parameter.\n\n This class implements the :class:`~discord.abc.Messageable` ABC.\n\n Attributes\n -----------\n message: :class:`.Message`\n The message that triggered the command being executed.\n bot: :class:`.Bot`\n The bot that contains the command being executed.\n args: :class:`list`\n The list of transformed arguments that were passed into the command.\n If this is accessed during the :func:`on_command_error` event\n then this list could be incomplete.\n kwargs: :class:`dict`\n A dictionary of transformed arguments that were passed into the command.\n Similar to :attr:`args`\\, if this is accessed in the\n :func:`on_command_error` event then this dict could be incomplete.\n prefix: :class:`str`\n The prefix that was used to invoke the command.\n command: :class:`Command`\n The command that is being invoked currently.\n invoked_with: :class:`str`\n The command name that triggered this invocation. Useful for finding out\n which alias called the command.\n invoked_parents: List[:class:`str`]\n The command names of the parents that triggered this invocation. Useful for\n finding out which aliases called the command.\n\n For example in commands ``?a b c test``, the invoked parents are ``['a', 'b', 'c']``.\n\n .. versionadded:: 1.7\n\n invoked_subcommand: :class:`Command`\n The subcommand that was invoked.\n If no valid subcommand was invoked then this is equal to ``None``.\n subcommand_passed: Optional[:class:`str`]\n The string that was attempted to call a subcommand. This does not have\n to point to a valid registered subcommand and could just point to a\n nonsense string. If nothing was passed to attempt a call to a\n subcommand then this is set to ``None``.\n command_failed: :class:`bool`\n A boolean that indicates if the command failed to be parsed, checked,\n or invoked.\n \"\"\"\n\n def __init__(self, **attrs):\n self.message = attrs.pop('message', None)\n self.bot = attrs.pop('bot', None)\n self.args = attrs.pop('args', [])\n self.kwargs = attrs.pop('kwargs', {})\n self.prefix = attrs.pop('prefix')\n self.command = attrs.pop('command', None)\n self.view = attrs.pop('view', None)\n self.invoked_with = attrs.pop('invoked_with', None)\n self.invoked_parents = attrs.pop('invoked_parents', [])\n self.invoked_subcommand = attrs.pop('invoked_subcommand', None)\n self.subcommand_passed = attrs.pop('subcommand_passed', None)\n self.command_failed = attrs.pop('command_failed', False)\n self._state = self.message._state\n\n async def invoke(self, *args, **kwargs):\n r\"\"\"|coro|\n\n Calls a command with the arguments given.\n\n This is useful if you want to just call the callback that a\n :class:`.Command` holds internally.\n\n .. note::\n\n This does not handle converters, checks, cooldowns, pre-invoke,\n or after-invoke hooks in any matter. It calls the internal callback\n directly as-if it was a regular function.\n\n You must take care in passing the proper arguments when\n using this function.\n\n .. warning::\n\n The first parameter passed **must** be the command being invoked.\n\n Parameters\n -----------\n command: :class:`.Command`\n The command that is going to be called.\n \\*args\n The arguments to to use.\n \\*\\*kwargs\n The keyword arguments to use.\n\n Raises\n -------\n TypeError\n The command argument to invoke is missing.\n \"\"\"\n\n try:\n command = args[0]\n except IndexError:\n raise TypeError('Missing command to invoke.') from None\n\n arguments = []\n if command.cog is not None:\n arguments.append(command.cog)\n\n arguments.append(self)\n arguments.extend(args[1:])\n\n ret = await command.callback(*arguments, **kwargs)\n return ret\n\n async def reinvoke(self, *, call_hooks=False, restart=True):\n \"\"\"|coro|\n\n Calls the command again.\n\n This is similar to :meth:`~.Context.invoke` except that it bypasses\n checks, cooldowns, and error handlers.\n\n .. note::\n\n If you want to bypass :exc:`.UserInputError` derived exceptions,\n it is recommended to use the regular :meth:`~.Context.invoke`\n as it will work more naturally. After all, this will end up\n using the old arguments the user has used and will thus just\n fail again.\n\n Parameters\n ------------\n call_hooks: :class:`bool`\n Whether to call the before and after invoke hooks.\n restart: :class:`bool`\n Whether to start the call chain from the very beginning\n or where we left off (i.e. the command that caused the error).\n The default is to start where we left off.\n\n Raises\n -------\n ValueError\n The context to reinvoke is not valid.\n \"\"\"\n cmd = self.command\n view = self.view\n if cmd is None:\n raise ValueError('This context is not valid.')\n\n # some state to revert to when we're done\n index, previous = view.index, view.previous\n invoked_with = self.invoked_with\n invoked_subcommand = self.invoked_subcommand\n invoked_parents = self.invoked_parents\n subcommand_passed = self.subcommand_passed\n\n if restart:\n to_call = cmd.root_parent or cmd\n view.index = len(self.prefix)\n view.previous = 0\n self.invoked_parents = []\n self.invoked_with = view.get_word() # advance to get the root command\n else:\n to_call = cmd\n\n try:\n await to_call.reinvoke(self, call_hooks=call_hooks)\n finally:\n self.command = cmd\n view.index = index\n view.previous = previous\n self.invoked_with = invoked_with\n self.invoked_subcommand = invoked_subcommand\n self.invoked_parents = invoked_parents\n self.subcommand_passed = subcommand_passed\n\n @property\n def valid(self):\n \"\"\":class:`bool`: Checks if the invocation context is valid to be invoked with.\"\"\"\n return self.prefix is not None and self.command is not None\n\n async def _get_channel(self):\n return self.channel\n\n @property\n def cog(self):\n \"\"\"Optional[:class:`.Cog`]: Returns the cog associated with this context's command. None if it does not exist.\"\"\"\n\n if self.command is None:\n return None\n return self.command.cog\n\n @discord.utils.cached_property\n def guild(self):\n \"\"\"Optional[:class:`.Guild`]: Returns the guild associated with this context's command. None if not available.\"\"\"\n return self.message.guild\n\n @discord.utils.cached_property\n def channel(self):\n \"\"\":class:`.TextChannel`:\n Returns the channel associated with this context's command. Shorthand for :attr:`.Message.channel`.\n \"\"\"\n return self.message.channel\n\n @discord.utils.cached_property\n def author(self):\n \"\"\"Union[:class:`~discord.User`, :class:`.Member`]:\n Returns the author associated with this context's command. Shorthand for :attr:`.Message.author`\n \"\"\"\n return self.message.author\n\n @discord.utils.cached_property\n def me(self):\n \"\"\"Union[:class:`.Member`, :class:`.ClientUser`]:\n Similar to :attr:`.Guild.me` except it may return the :class:`.ClientUser` in private message contexts.\n \"\"\"\n return self.guild.me if self.guild is not None else self.bot.user\n\n @property\n def voice_client(self):\n r\"\"\"Optional[:class:`.VoiceProtocol`]: A shortcut to :attr:`.Guild.voice_client`\\, if applicable.\"\"\"\n g = self.guild\n return g.voice_client if g else None\n\n async def send_help(self, *args):\n \"\"\"send_help(entity=<bot>)\n\n |coro|\n\n Shows the help command for the specified entity if given.\n The entity can be a command or a cog.\n\n If no entity is given, then it'll show help for the\n entire bot.\n\n If the entity is a string, then it looks up whether it's a\n :class:`Cog` or a :class:`Command`.\n\n .. note::\n\n Due to the way this function works, instead of returning\n something similar to :meth:`~.commands.HelpCommand.command_not_found`\n this returns :class:`None` on bad input or no help command.\n\n Parameters\n ------------\n entity: Optional[Union[:class:`Command`, :class:`Cog`, :class:`str`]]\n The entity to show help for.\n\n Returns\n --------\n Any\n The result of the help command, if any.\n \"\"\"\n from .core import Group, Command, wrap_callback\n from .errors import CommandError\n\n bot = self.bot\n cmd = bot.help_command\n\n if cmd is None:\n return None\n\n cmd = cmd.copy()\n cmd.context = self\n if len(args) == 0:\n await cmd.prepare_help_command(self, None)\n mapping = cmd.get_bot_mapping()\n injected = wrap_callback(cmd.send_bot_help)\n try:\n return await injected(mapping)\n except CommandError as e:\n await cmd.on_help_command_error(self, e)\n return None\n\n entity = args[0]\n if entity is None:\n return None\n\n if isinstance(entity, str):\n entity = bot.get_cog(entity) or bot.get_command(entity)\n\n try:\n entity.qualified_name\n except AttributeError:\n # if we're here then it's not a cog, group, or command.\n return None\n\n await cmd.prepare_help_command(self, entity.qualified_name)\n\n try:\n if hasattr(entity, '__cog_commands__'):\n injected = wrap_callback(cmd.send_cog_help)\n return await injected(entity)\n elif isinstance(entity, Group):\n injected = wrap_callback(cmd.send_group_help)\n return await injected(entity)\n elif isinstance(entity, Command):\n injected = wrap_callback(cmd.send_command_help)\n return await injected(entity)\n else:\n return None\n except CommandError as e:\n await cmd.on_help_command_error(self, e)\n\n async def reply(self, content=None, **kwargs):\n return await self.message.reply(content, **kwargs)\n\n reply.__doc__ = discord.Message.reply.__doc__\n", "path": "discord/ext/commands/context.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"\nThe MIT License (MIT)\n\nCopyright (c) 2015-present Rapptz\n\nPermission is hereby granted, free of charge, to any person obtaining a\ncopy of this software and associated documentation files (the \"Software\"),\nto deal in the Software without restriction, including without limitation\nthe rights to use, copy, modify, merge, publish, distribute, sublicense,\nand/or sell copies of the Software, and to permit persons to whom the\nSoftware is furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\nOR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\nFROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\nDEALINGS IN THE SOFTWARE.\n\"\"\"\n\nimport discord.abc\nimport discord.utils\n\nclass Context(discord.abc.Messageable):\n r\"\"\"Represents the context in which a command is being invoked under.\n\n This class contains a lot of meta data to help you understand more about\n the invocation context. This class is not created manually and is instead\n passed around to commands as the first parameter.\n\n This class implements the :class:`~discord.abc.Messageable` ABC.\n\n Attributes\n -----------\n message: :class:`.Message`\n The message that triggered the command being executed.\n bot: :class:`.Bot`\n The bot that contains the command being executed.\n args: :class:`list`\n The list of transformed arguments that were passed into the command.\n If this is accessed during the :func:`on_command_error` event\n then this list could be incomplete.\n kwargs: :class:`dict`\n A dictionary of transformed arguments that were passed into the command.\n Similar to :attr:`args`\\, if this is accessed in the\n :func:`on_command_error` event then this dict could be incomplete.\n prefix: :class:`str`\n The prefix that was used to invoke the command.\n command: :class:`Command`\n The command that is being invoked currently.\n invoked_with: :class:`str`\n The command name that triggered this invocation. Useful for finding out\n which alias called the command.\n invoked_parents: List[:class:`str`]\n The command names of the parents that triggered this invocation. Useful for\n finding out which aliases called the command.\n\n For example in commands ``?a b c test``, the invoked parents are ``['a', 'b', 'c']``.\n\n .. versionadded:: 1.7\n\n invoked_subcommand: :class:`Command`\n The subcommand that was invoked.\n If no valid subcommand was invoked then this is equal to ``None``.\n subcommand_passed: Optional[:class:`str`]\n The string that was attempted to call a subcommand. This does not have\n to point to a valid registered subcommand and could just point to a\n nonsense string. If nothing was passed to attempt a call to a\n subcommand then this is set to ``None``.\n command_failed: :class:`bool`\n A boolean that indicates if the command failed to be parsed, checked,\n or invoked.\n \"\"\"\n\n def __init__(self, **attrs):\n self.message = attrs.pop('message', None)\n self.bot = attrs.pop('bot', None)\n self.args = attrs.pop('args', [])\n self.kwargs = attrs.pop('kwargs', {})\n self.prefix = attrs.pop('prefix')\n self.command = attrs.pop('command', None)\n self.view = attrs.pop('view', None)\n self.invoked_with = attrs.pop('invoked_with', None)\n self.invoked_parents = attrs.pop('invoked_parents', [])\n self.invoked_subcommand = attrs.pop('invoked_subcommand', None)\n self.subcommand_passed = attrs.pop('subcommand_passed', None)\n self.command_failed = attrs.pop('command_failed', False)\n self._state = self.message._state\n\n async def invoke(self, *args, **kwargs):\n r\"\"\"|coro|\n\n Calls a command with the arguments given.\n\n This is useful if you want to just call the callback that a\n :class:`.Command` holds internally.\n\n .. note::\n\n This does not handle converters, checks, cooldowns, pre-invoke,\n or after-invoke hooks in any matter. It calls the internal callback\n directly as-if it was a regular function.\n\n You must take care in passing the proper arguments when\n using this function.\n\n .. warning::\n\n The first parameter passed **must** be the command being invoked.\n\n Parameters\n -----------\n command: :class:`.Command`\n The command that is going to be called.\n \\*args\n The arguments to to use.\n \\*\\*kwargs\n The keyword arguments to use.\n\n Raises\n -------\n TypeError\n The command argument to invoke is missing.\n \"\"\"\n\n try:\n command = args[0]\n except IndexError:\n raise TypeError('Missing command to invoke.') from None\n\n arguments = []\n if command.cog is not None:\n arguments.append(command.cog)\n\n arguments.append(self)\n arguments.extend(args[1:])\n\n ret = await command.callback(*arguments, **kwargs)\n return ret\n\n async def reinvoke(self, *, call_hooks=False, restart=True):\n \"\"\"|coro|\n\n Calls the command again.\n\n This is similar to :meth:`~.Context.invoke` except that it bypasses\n checks, cooldowns, and error handlers.\n\n .. note::\n\n If you want to bypass :exc:`.UserInputError` derived exceptions,\n it is recommended to use the regular :meth:`~.Context.invoke`\n as it will work more naturally. After all, this will end up\n using the old arguments the user has used and will thus just\n fail again.\n\n Parameters\n ------------\n call_hooks: :class:`bool`\n Whether to call the before and after invoke hooks.\n restart: :class:`bool`\n Whether to start the call chain from the very beginning\n or where we left off (i.e. the command that caused the error).\n The default is to start where we left off.\n\n Raises\n -------\n ValueError\n The context to reinvoke is not valid.\n \"\"\"\n cmd = self.command\n view = self.view\n if cmd is None:\n raise ValueError('This context is not valid.')\n\n # some state to revert to when we're done\n index, previous = view.index, view.previous\n invoked_with = self.invoked_with\n invoked_subcommand = self.invoked_subcommand\n invoked_parents = self.invoked_parents\n subcommand_passed = self.subcommand_passed\n\n if restart:\n to_call = cmd.root_parent or cmd\n view.index = len(self.prefix)\n view.previous = 0\n self.invoked_parents = []\n self.invoked_with = view.get_word() # advance to get the root command\n else:\n to_call = cmd\n\n try:\n await to_call.reinvoke(self, call_hooks=call_hooks)\n finally:\n self.command = cmd\n view.index = index\n view.previous = previous\n self.invoked_with = invoked_with\n self.invoked_subcommand = invoked_subcommand\n self.invoked_parents = invoked_parents\n self.subcommand_passed = subcommand_passed\n\n @property\n def valid(self):\n \"\"\":class:`bool`: Checks if the invocation context is valid to be invoked with.\"\"\"\n return self.prefix is not None and self.command is not None\n\n async def _get_channel(self):\n return self.channel\n\n @property\n def cog(self):\n \"\"\"Optional[:class:`.Cog`]: Returns the cog associated with this context's command. None if it does not exist.\"\"\"\n\n if self.command is None:\n return None\n return self.command.cog\n\n @discord.utils.cached_property\n def guild(self):\n \"\"\"Optional[:class:`.Guild`]: Returns the guild associated with this context's command. None if not available.\"\"\"\n return self.message.guild\n\n @discord.utils.cached_property\n def channel(self):\n \"\"\"Union[:class:`.abc.Messageable`]: Returns the channel associated with this context's command.\n Shorthand for :attr:`.Message.channel`.\n \"\"\"\n return self.message.channel\n\n @discord.utils.cached_property\n def author(self):\n \"\"\"Union[:class:`~discord.User`, :class:`.Member`]:\n Returns the author associated with this context's command. Shorthand for :attr:`.Message.author`\n \"\"\"\n return self.message.author\n\n @discord.utils.cached_property\n def me(self):\n \"\"\"Union[:class:`.Member`, :class:`.ClientUser`]:\n Similar to :attr:`.Guild.me` except it may return the :class:`.ClientUser` in private message contexts.\n \"\"\"\n return self.guild.me if self.guild is not None else self.bot.user\n\n @property\n def voice_client(self):\n r\"\"\"Optional[:class:`.VoiceProtocol`]: A shortcut to :attr:`.Guild.voice_client`\\, if applicable.\"\"\"\n g = self.guild\n return g.voice_client if g else None\n\n async def send_help(self, *args):\n \"\"\"send_help(entity=<bot>)\n\n |coro|\n\n Shows the help command for the specified entity if given.\n The entity can be a command or a cog.\n\n If no entity is given, then it'll show help for the\n entire bot.\n\n If the entity is a string, then it looks up whether it's a\n :class:`Cog` or a :class:`Command`.\n\n .. note::\n\n Due to the way this function works, instead of returning\n something similar to :meth:`~.commands.HelpCommand.command_not_found`\n this returns :class:`None` on bad input or no help command.\n\n Parameters\n ------------\n entity: Optional[Union[:class:`Command`, :class:`Cog`, :class:`str`]]\n The entity to show help for.\n\n Returns\n --------\n Any\n The result of the help command, if any.\n \"\"\"\n from .core import Group, Command, wrap_callback\n from .errors import CommandError\n\n bot = self.bot\n cmd = bot.help_command\n\n if cmd is None:\n return None\n\n cmd = cmd.copy()\n cmd.context = self\n if len(args) == 0:\n await cmd.prepare_help_command(self, None)\n mapping = cmd.get_bot_mapping()\n injected = wrap_callback(cmd.send_bot_help)\n try:\n return await injected(mapping)\n except CommandError as e:\n await cmd.on_help_command_error(self, e)\n return None\n\n entity = args[0]\n if entity is None:\n return None\n\n if isinstance(entity, str):\n entity = bot.get_cog(entity) or bot.get_command(entity)\n\n try:\n entity.qualified_name\n except AttributeError:\n # if we're here then it's not a cog, group, or command.\n return None\n\n await cmd.prepare_help_command(self, entity.qualified_name)\n\n try:\n if hasattr(entity, '__cog_commands__'):\n injected = wrap_callback(cmd.send_cog_help)\n return await injected(entity)\n elif isinstance(entity, Group):\n injected = wrap_callback(cmd.send_group_help)\n return await injected(entity)\n elif isinstance(entity, Command):\n injected = wrap_callback(cmd.send_command_help)\n return await injected(entity)\n else:\n return None\n except CommandError as e:\n await cmd.on_help_command_error(self, e)\n\n async def reply(self, content=None, **kwargs):\n return await self.message.reply(content, **kwargs)\n\n reply.__doc__ = discord.Message.reply.__doc__\n", "path": "discord/ext/commands/context.py"}]} | 4,032 | 144 |
gh_patches_debug_2096 | rasdani/github-patches | git_diff | liqd__a4-product-1097 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
archived projects accessible via activity feed
At https://www.beteiligung.in/liqd/ all projects are private but I can see the content of the projects if I click on the activity feed. Even if not signed in.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `apps/organisations/views.py`
Content:
```
1 from django.contrib.messages.views import SuccessMessageMixin
2 from django.utils.translation import ugettext_lazy as _
3 from django.views import generic
4 from django.views.generic import DetailView
5
6 from adhocracy4.actions.models import Action
7 from adhocracy4.projects.models import Project
8 from adhocracy4.rules import mixins as rules_mixins
9 from apps.projects import query
10
11 from . import forms
12 from .models import Organisation
13
14
15 class OrganisationView(DetailView):
16 template_name = 'organisation_landing_page.html'
17 model = Organisation
18 slug_url_kwarg = 'organisation_slug'
19
20 def get_context_data(self, **kwargs):
21 context = super().get_context_data(**kwargs)
22
23 project_list = Project.objects\
24 .filter(organisation=self.object,
25 is_archived=False,
26 is_draft=False)
27 project_list = query.filter_viewable(
28 project_list, self.request.user
29 )
30 context['project_list'] = project_list
31
32 context['action_list'] = Action.objects\
33 .filter(project__organisation=self.object)\
34 .filter_public()\
35 .exclude_updates()[:4]
36
37 context['stats'] = {
38 'users': 1204,
39 'items': 3425,
40 'comments': 23234,
41 'ratings': 134234,
42 }
43
44 return context
45
46
47 class InformationView(DetailView):
48 template_name = 'organisation_information.html'
49 model = Organisation
50 slug_url_kwarg = 'organisation_slug'
51
52
53 class ImprintView(DetailView):
54 template_name = 'organisation_imprint.html'
55 model = Organisation
56 slug_url_kwarg = 'organisation_slug'
57
58
59 class OrganisationUpdateView(rules_mixins.PermissionRequiredMixin,
60 SuccessMessageMixin,
61 generic.UpdateView):
62 model = Organisation
63 form_class = forms.OrganisationForm
64 slug_url_kwarg = 'organisation_slug'
65 template_name = 'organisation_form.html'
66 success_message = _('Organisation successfully updated.')
67 permission_required = 'a4_candy_organisations.change_organisation'
68 menu_item = 'organisation'
69
70 def get_success_url(self):
71 return self.request.path
72
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/apps/organisations/views.py b/apps/organisations/views.py
--- a/apps/organisations/views.py
+++ b/apps/organisations/views.py
@@ -31,6 +31,7 @@
context['action_list'] = Action.objects\
.filter(project__organisation=self.object)\
+ .filter(project__is_archived=False) \
.filter_public()\
.exclude_updates()[:4]
| {"golden_diff": "diff --git a/apps/organisations/views.py b/apps/organisations/views.py\n--- a/apps/organisations/views.py\n+++ b/apps/organisations/views.py\n@@ -31,6 +31,7 @@\n \n context['action_list'] = Action.objects\\\n .filter(project__organisation=self.object)\\\n+ .filter(project__is_archived=False) \\\n .filter_public()\\\n .exclude_updates()[:4]\n", "issue": "archived projects accessible via activity feed\n At https://www.beteiligung.in/liqd/ all projects are private but I can see the content of the projects if I click on the activity feed. Even if not signed in.\n", "before_files": [{"content": "from django.contrib.messages.views import SuccessMessageMixin\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.views import generic\nfrom django.views.generic import DetailView\n\nfrom adhocracy4.actions.models import Action\nfrom adhocracy4.projects.models import Project\nfrom adhocracy4.rules import mixins as rules_mixins\nfrom apps.projects import query\n\nfrom . import forms\nfrom .models import Organisation\n\n\nclass OrganisationView(DetailView):\n template_name = 'organisation_landing_page.html'\n model = Organisation\n slug_url_kwarg = 'organisation_slug'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n project_list = Project.objects\\\n .filter(organisation=self.object,\n is_archived=False,\n is_draft=False)\n project_list = query.filter_viewable(\n project_list, self.request.user\n )\n context['project_list'] = project_list\n\n context['action_list'] = Action.objects\\\n .filter(project__organisation=self.object)\\\n .filter_public()\\\n .exclude_updates()[:4]\n\n context['stats'] = {\n 'users': 1204,\n 'items': 3425,\n 'comments': 23234,\n 'ratings': 134234,\n }\n\n return context\n\n\nclass InformationView(DetailView):\n template_name = 'organisation_information.html'\n model = Organisation\n slug_url_kwarg = 'organisation_slug'\n\n\nclass ImprintView(DetailView):\n template_name = 'organisation_imprint.html'\n model = Organisation\n slug_url_kwarg = 'organisation_slug'\n\n\nclass OrganisationUpdateView(rules_mixins.PermissionRequiredMixin,\n SuccessMessageMixin,\n generic.UpdateView):\n model = Organisation\n form_class = forms.OrganisationForm\n slug_url_kwarg = 'organisation_slug'\n template_name = 'organisation_form.html'\n success_message = _('Organisation successfully updated.')\n permission_required = 'a4_candy_organisations.change_organisation'\n menu_item = 'organisation'\n\n def get_success_url(self):\n return self.request.path\n", "path": "apps/organisations/views.py"}], "after_files": [{"content": "from django.contrib.messages.views import SuccessMessageMixin\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.views import generic\nfrom django.views.generic import DetailView\n\nfrom adhocracy4.actions.models import Action\nfrom adhocracy4.projects.models import Project\nfrom adhocracy4.rules import mixins as rules_mixins\nfrom apps.projects import query\n\nfrom . import forms\nfrom .models import Organisation\n\n\nclass OrganisationView(DetailView):\n template_name = 'organisation_landing_page.html'\n model = Organisation\n slug_url_kwarg = 'organisation_slug'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n project_list = Project.objects\\\n .filter(organisation=self.object,\n is_archived=False,\n is_draft=False)\n project_list = query.filter_viewable(\n project_list, self.request.user\n )\n context['project_list'] = project_list\n\n context['action_list'] = Action.objects\\\n .filter(project__organisation=self.object)\\\n .filter(project__is_archived=False) \\\n .filter_public()\\\n .exclude_updates()[:4]\n\n context['stats'] = {\n 'users': 1204,\n 'items': 3425,\n 'comments': 23234,\n 'ratings': 134234,\n }\n\n return context\n\n\nclass InformationView(DetailView):\n template_name = 'organisation_information.html'\n model = Organisation\n slug_url_kwarg = 'organisation_slug'\n\n\nclass ImprintView(DetailView):\n template_name = 'organisation_imprint.html'\n model = Organisation\n slug_url_kwarg = 'organisation_slug'\n\n\nclass OrganisationUpdateView(rules_mixins.PermissionRequiredMixin,\n SuccessMessageMixin,\n generic.UpdateView):\n model = Organisation\n form_class = forms.OrganisationForm\n slug_url_kwarg = 'organisation_slug'\n template_name = 'organisation_form.html'\n success_message = _('Organisation successfully updated.')\n permission_required = 'a4_candy_organisations.change_organisation'\n menu_item = 'organisation'\n\n def get_success_url(self):\n return self.request.path\n", "path": "apps/organisations/views.py"}]} | 904 | 94 |
gh_patches_debug_30641 | rasdani/github-patches | git_diff | localstack__localstack-9677 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
bug: StateMachine yaml Definition is not supported (DefinitionString works)
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Current Behavior
when describing using cloudformation YAML with Definition, LocalStack fails to create StateMachine. Switching to YAML with DefinitionString works as expected.
Examples taken from here
https://docs.aws.amazon.com/step-functions/latest/dg/development-options.html#development-options-format
Attaching working example
[localstack_stepfunctions_and_serverless.zip](https://github.com/localstack/localstack/files/7791134/localstack_stepfunctions_and_serverless.z
### Expected Behavior
YAML with Definition is much more preferable in a complex state machine description with bunch of substitutions, references and parameters
### How are you starting LocalStack?
With the `localstack` script
### Steps To Reproduce
#### How are you starting localstack (e.g., `bin/localstack` command, arguments, or `docker-compose.yml`)
localstack --debug start -d
#### Client commands (e.g., AWS SDK code snippet, or sequence of "awslocal" commands)
awslocal s3 mb s3://s4-echo-serverless-deployment-share-local
sls deploy --stage local
### Environment
```markdown
- OS: OSX Monterey
- LocalStack: latest
```
### Anything else?
Error when using YAML with Definition
[error.txt](https://github.com/localstack/localstack/files/7791149/error.txt)
Success when using YAML with DefinitionString
[success.txt](https://github.com/localstack/localstack/files/7791151/success.txt)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `localstack/services/stepfunctions/resource_providers/aws_stepfunctions_statemachine.py`
Content:
```
1 # LocalStack Resource Provider Scaffolding v2
2 from __future__ import annotations
3
4 import re
5 from pathlib import Path
6 from typing import Optional, TypedDict
7
8 import localstack.services.cloudformation.provider_utils as util
9 from localstack.services.cloudformation.resource_provider import (
10 LOG,
11 OperationStatus,
12 ProgressEvent,
13 ResourceProvider,
14 ResourceRequest,
15 )
16 from localstack.utils.strings import to_str
17
18
19 class StepFunctionsStateMachineProperties(TypedDict):
20 RoleArn: Optional[str]
21 Arn: Optional[str]
22 Definition: Optional[dict]
23 DefinitionS3Location: Optional[S3Location]
24 DefinitionString: Optional[str]
25 DefinitionSubstitutions: Optional[dict]
26 LoggingConfiguration: Optional[LoggingConfiguration]
27 Name: Optional[str]
28 StateMachineName: Optional[str]
29 StateMachineRevisionId: Optional[str]
30 StateMachineType: Optional[str]
31 Tags: Optional[list[TagsEntry]]
32 TracingConfiguration: Optional[TracingConfiguration]
33
34
35 class CloudWatchLogsLogGroup(TypedDict):
36 LogGroupArn: Optional[str]
37
38
39 class LogDestination(TypedDict):
40 CloudWatchLogsLogGroup: Optional[CloudWatchLogsLogGroup]
41
42
43 class LoggingConfiguration(TypedDict):
44 Destinations: Optional[list[LogDestination]]
45 IncludeExecutionData: Optional[bool]
46 Level: Optional[str]
47
48
49 class TracingConfiguration(TypedDict):
50 Enabled: Optional[bool]
51
52
53 class S3Location(TypedDict):
54 Bucket: Optional[str]
55 Key: Optional[str]
56 Version: Optional[str]
57
58
59 class TagsEntry(TypedDict):
60 Key: Optional[str]
61 Value: Optional[str]
62
63
64 REPEATED_INVOCATION = "repeated_invocation"
65
66
67 class StepFunctionsStateMachineProvider(ResourceProvider[StepFunctionsStateMachineProperties]):
68 TYPE = "AWS::StepFunctions::StateMachine" # Autogenerated. Don't change
69 SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change
70
71 def create(
72 self,
73 request: ResourceRequest[StepFunctionsStateMachineProperties],
74 ) -> ProgressEvent[StepFunctionsStateMachineProperties]:
75 """
76 Create a new resource.
77
78 Primary identifier fields:
79 - /properties/Arn
80
81 Required properties:
82 - RoleArn
83
84 Create-only properties:
85 - /properties/StateMachineName
86 - /properties/StateMachineType
87
88 Read-only properties:
89 - /properties/Arn
90 - /properties/Name
91 - /properties/StateMachineRevisionId
92
93 IAM permissions required:
94 - states:CreateStateMachine
95 - iam:PassRole
96 - s3:GetObject
97
98 """
99 model = request.desired_state
100 step_function = request.aws_client_factory.stepfunctions
101
102 if not model.get("StateMachineName"):
103 model["StateMachineName"] = util.generate_default_name(
104 stack_name=request.stack_name, logical_resource_id=request.logical_resource_id
105 )
106
107 params = {
108 "name": model.get("StateMachineName"),
109 "roleArn": model.get("RoleArn"),
110 "type": model.get("StateMachineType", "STANDARD"),
111 }
112
113 # get definition
114 s3_client = request.aws_client_factory.s3
115
116 definition_str = self._get_definition(model, s3_client)
117
118 params["definition"] = definition_str
119
120 response = step_function.create_state_machine(**params)
121
122 model["Arn"] = response["stateMachineArn"]
123 model["Name"] = model["StateMachineName"]
124
125 return ProgressEvent(
126 status=OperationStatus.SUCCESS,
127 resource_model=model,
128 custom_context=request.custom_context,
129 )
130
131 def _get_definition(self, model, s3_client):
132 definition_str = model.get("DefinitionString")
133 s3_location = model.get("DefinitionS3Location")
134 if not definition_str and s3_location:
135 # TODO: currently not covered by tests - add a test to mimick the behavior of "sam deploy ..."
136
137 LOG.debug("Fetching state machine definition from S3: %s", s3_location)
138 result = s3_client.get_object(Bucket=s3_location["Bucket"], Key=s3_location["Key"])
139 definition_str = to_str(result["Body"].read())
140 substitutions = model.get("DefinitionSubstitutions")
141 if substitutions is not None:
142 definition_str = _apply_substitutions(definition_str, substitutions)
143 return definition_str
144
145 def read(
146 self,
147 request: ResourceRequest[StepFunctionsStateMachineProperties],
148 ) -> ProgressEvent[StepFunctionsStateMachineProperties]:
149 """
150 Fetch resource information
151
152 IAM permissions required:
153 - states:DescribeStateMachine
154 - states:ListTagsForResource
155 """
156 raise NotImplementedError
157
158 def delete(
159 self,
160 request: ResourceRequest[StepFunctionsStateMachineProperties],
161 ) -> ProgressEvent[StepFunctionsStateMachineProperties]:
162 """
163 Delete a resource
164
165 IAM permissions required:
166 - states:DeleteStateMachine
167 - states:DescribeStateMachine
168 """
169 model = request.desired_state
170 step_function = request.aws_client_factory.stepfunctions
171
172 step_function.delete_state_machine(stateMachineArn=model["Arn"])
173
174 return ProgressEvent(
175 status=OperationStatus.SUCCESS,
176 resource_model=model,
177 custom_context=request.custom_context,
178 )
179
180 def update(
181 self,
182 request: ResourceRequest[StepFunctionsStateMachineProperties],
183 ) -> ProgressEvent[StepFunctionsStateMachineProperties]:
184 """
185 Update a resource
186
187 IAM permissions required:
188 - states:UpdateStateMachine
189 - states:TagResource
190 - states:UntagResource
191 - states:ListTagsForResource
192 - iam:PassRole
193 """
194 model = request.desired_state
195 step_function = request.aws_client_factory.stepfunctions
196
197 if not model.get("Arn"):
198 model["Arn"] = request.previous_state["Arn"]
199
200 params = {
201 "stateMachineArn": model["Arn"],
202 "definition": model["DefinitionString"],
203 }
204
205 step_function.update_state_machine(**params)
206
207 return ProgressEvent(
208 status=OperationStatus.SUCCESS,
209 resource_model=model,
210 custom_context=request.custom_context,
211 )
212
213
214 def _apply_substitutions(definition: str, substitutions: dict[str, str]) -> str:
215 substitution_regex = re.compile("\\${[a-zA-Z0-9_]+}") # might be a bit too strict in some cases
216 tokens = substitution_regex.findall(definition)
217 result = definition
218 for token in tokens:
219 raw_token = token[2:-1] # strip ${ and }
220 if raw_token not in substitutions.keys():
221 raise
222 result = result.replace(token, substitutions[raw_token])
223
224 return result
225
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/localstack/services/stepfunctions/resource_providers/aws_stepfunctions_statemachine.py b/localstack/services/stepfunctions/resource_providers/aws_stepfunctions_statemachine.py
--- a/localstack/services/stepfunctions/resource_providers/aws_stepfunctions_statemachine.py
+++ b/localstack/services/stepfunctions/resource_providers/aws_stepfunctions_statemachine.py
@@ -1,6 +1,7 @@
# LocalStack Resource Provider Scaffolding v2
from __future__ import annotations
+import json
import re
from pathlib import Path
from typing import Optional, TypedDict
@@ -129,14 +130,20 @@
)
def _get_definition(self, model, s3_client):
- definition_str = model.get("DefinitionString")
- s3_location = model.get("DefinitionS3Location")
- if not definition_str and s3_location:
+ if "DefinitionString" in model:
+ definition_str = model.get("DefinitionString")
+ elif "DefinitionS3Location" in model:
# TODO: currently not covered by tests - add a test to mimick the behavior of "sam deploy ..."
-
+ s3_location = model.get("DefinitionS3Location")
LOG.debug("Fetching state machine definition from S3: %s", s3_location)
result = s3_client.get_object(Bucket=s3_location["Bucket"], Key=s3_location["Key"])
definition_str = to_str(result["Body"].read())
+ elif "Definition" in model:
+ definition = model.get("Definition")
+ definition_str = json.dumps(definition)
+ else:
+ definition_str = None
+
substitutions = model.get("DefinitionSubstitutions")
if substitutions is not None:
definition_str = _apply_substitutions(definition_str, substitutions)
| {"golden_diff": "diff --git a/localstack/services/stepfunctions/resource_providers/aws_stepfunctions_statemachine.py b/localstack/services/stepfunctions/resource_providers/aws_stepfunctions_statemachine.py\n--- a/localstack/services/stepfunctions/resource_providers/aws_stepfunctions_statemachine.py\n+++ b/localstack/services/stepfunctions/resource_providers/aws_stepfunctions_statemachine.py\n@@ -1,6 +1,7 @@\n # LocalStack Resource Provider Scaffolding v2\n from __future__ import annotations\n \n+import json\n import re\n from pathlib import Path\n from typing import Optional, TypedDict\n@@ -129,14 +130,20 @@\n )\n \n def _get_definition(self, model, s3_client):\n- definition_str = model.get(\"DefinitionString\")\n- s3_location = model.get(\"DefinitionS3Location\")\n- if not definition_str and s3_location:\n+ if \"DefinitionString\" in model:\n+ definition_str = model.get(\"DefinitionString\")\n+ elif \"DefinitionS3Location\" in model:\n # TODO: currently not covered by tests - add a test to mimick the behavior of \"sam deploy ...\"\n-\n+ s3_location = model.get(\"DefinitionS3Location\")\n LOG.debug(\"Fetching state machine definition from S3: %s\", s3_location)\n result = s3_client.get_object(Bucket=s3_location[\"Bucket\"], Key=s3_location[\"Key\"])\n definition_str = to_str(result[\"Body\"].read())\n+ elif \"Definition\" in model:\n+ definition = model.get(\"Definition\")\n+ definition_str = json.dumps(definition)\n+ else:\n+ definition_str = None\n+\n substitutions = model.get(\"DefinitionSubstitutions\")\n if substitutions is not None:\n definition_str = _apply_substitutions(definition_str, substitutions)\n", "issue": "bug: StateMachine yaml Definition is not supported (DefinitionString works)\n### Is there an existing issue for this?\n\n- [X] I have searched the existing issues\n\n### Current Behavior\n\nwhen describing using cloudformation YAML with Definition, LocalStack fails to create StateMachine. Switching to YAML with DefinitionString works as expected. \r\n\r\nExamples taken from here\r\nhttps://docs.aws.amazon.com/step-functions/latest/dg/development-options.html#development-options-format\r\n\r\nAttaching working example \r\n[localstack_stepfunctions_and_serverless.zip](https://github.com/localstack/localstack/files/7791134/localstack_stepfunctions_and_serverless.z\r\n\r\n\r\n\n\n### Expected Behavior\n\nYAML with Definition is much more preferable in a complex state machine description with bunch of substitutions, references and parameters\n\n### How are you starting LocalStack?\n\nWith the `localstack` script\n\n### Steps To Reproduce\n\n#### How are you starting localstack (e.g., `bin/localstack` command, arguments, or `docker-compose.yml`)\r\n\r\n localstack --debug start -d\r\n\r\n#### Client commands (e.g., AWS SDK code snippet, or sequence of \"awslocal\" commands)\r\n\r\n awslocal s3 mb s3://s4-echo-serverless-deployment-share-local\r\nsls deploy --stage local\r\n\n\n### Environment\n\n```markdown\n- OS: OSX Monterey\r\n- LocalStack: latest\n```\n\n\n### Anything else?\n\nError when using YAML with Definition\r\n[error.txt](https://github.com/localstack/localstack/files/7791149/error.txt)\r\n\r\nSuccess when using YAML with DefinitionString\r\n[success.txt](https://github.com/localstack/localstack/files/7791151/success.txt)\r\n\r\n\n", "before_files": [{"content": "# LocalStack Resource Provider Scaffolding v2\nfrom __future__ import annotations\n\nimport re\nfrom pathlib import Path\nfrom typing import Optional, TypedDict\n\nimport localstack.services.cloudformation.provider_utils as util\nfrom localstack.services.cloudformation.resource_provider import (\n LOG,\n OperationStatus,\n ProgressEvent,\n ResourceProvider,\n ResourceRequest,\n)\nfrom localstack.utils.strings import to_str\n\n\nclass StepFunctionsStateMachineProperties(TypedDict):\n RoleArn: Optional[str]\n Arn: Optional[str]\n Definition: Optional[dict]\n DefinitionS3Location: Optional[S3Location]\n DefinitionString: Optional[str]\n DefinitionSubstitutions: Optional[dict]\n LoggingConfiguration: Optional[LoggingConfiguration]\n Name: Optional[str]\n StateMachineName: Optional[str]\n StateMachineRevisionId: Optional[str]\n StateMachineType: Optional[str]\n Tags: Optional[list[TagsEntry]]\n TracingConfiguration: Optional[TracingConfiguration]\n\n\nclass CloudWatchLogsLogGroup(TypedDict):\n LogGroupArn: Optional[str]\n\n\nclass LogDestination(TypedDict):\n CloudWatchLogsLogGroup: Optional[CloudWatchLogsLogGroup]\n\n\nclass LoggingConfiguration(TypedDict):\n Destinations: Optional[list[LogDestination]]\n IncludeExecutionData: Optional[bool]\n Level: Optional[str]\n\n\nclass TracingConfiguration(TypedDict):\n Enabled: Optional[bool]\n\n\nclass S3Location(TypedDict):\n Bucket: Optional[str]\n Key: Optional[str]\n Version: Optional[str]\n\n\nclass TagsEntry(TypedDict):\n Key: Optional[str]\n Value: Optional[str]\n\n\nREPEATED_INVOCATION = \"repeated_invocation\"\n\n\nclass StepFunctionsStateMachineProvider(ResourceProvider[StepFunctionsStateMachineProperties]):\n TYPE = \"AWS::StepFunctions::StateMachine\" # Autogenerated. Don't change\n SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change\n\n def create(\n self,\n request: ResourceRequest[StepFunctionsStateMachineProperties],\n ) -> ProgressEvent[StepFunctionsStateMachineProperties]:\n \"\"\"\n Create a new resource.\n\n Primary identifier fields:\n - /properties/Arn\n\n Required properties:\n - RoleArn\n\n Create-only properties:\n - /properties/StateMachineName\n - /properties/StateMachineType\n\n Read-only properties:\n - /properties/Arn\n - /properties/Name\n - /properties/StateMachineRevisionId\n\n IAM permissions required:\n - states:CreateStateMachine\n - iam:PassRole\n - s3:GetObject\n\n \"\"\"\n model = request.desired_state\n step_function = request.aws_client_factory.stepfunctions\n\n if not model.get(\"StateMachineName\"):\n model[\"StateMachineName\"] = util.generate_default_name(\n stack_name=request.stack_name, logical_resource_id=request.logical_resource_id\n )\n\n params = {\n \"name\": model.get(\"StateMachineName\"),\n \"roleArn\": model.get(\"RoleArn\"),\n \"type\": model.get(\"StateMachineType\", \"STANDARD\"),\n }\n\n # get definition\n s3_client = request.aws_client_factory.s3\n\n definition_str = self._get_definition(model, s3_client)\n\n params[\"definition\"] = definition_str\n\n response = step_function.create_state_machine(**params)\n\n model[\"Arn\"] = response[\"stateMachineArn\"]\n model[\"Name\"] = model[\"StateMachineName\"]\n\n return ProgressEvent(\n status=OperationStatus.SUCCESS,\n resource_model=model,\n custom_context=request.custom_context,\n )\n\n def _get_definition(self, model, s3_client):\n definition_str = model.get(\"DefinitionString\")\n s3_location = model.get(\"DefinitionS3Location\")\n if not definition_str and s3_location:\n # TODO: currently not covered by tests - add a test to mimick the behavior of \"sam deploy ...\"\n\n LOG.debug(\"Fetching state machine definition from S3: %s\", s3_location)\n result = s3_client.get_object(Bucket=s3_location[\"Bucket\"], Key=s3_location[\"Key\"])\n definition_str = to_str(result[\"Body\"].read())\n substitutions = model.get(\"DefinitionSubstitutions\")\n if substitutions is not None:\n definition_str = _apply_substitutions(definition_str, substitutions)\n return definition_str\n\n def read(\n self,\n request: ResourceRequest[StepFunctionsStateMachineProperties],\n ) -> ProgressEvent[StepFunctionsStateMachineProperties]:\n \"\"\"\n Fetch resource information\n\n IAM permissions required:\n - states:DescribeStateMachine\n - states:ListTagsForResource\n \"\"\"\n raise NotImplementedError\n\n def delete(\n self,\n request: ResourceRequest[StepFunctionsStateMachineProperties],\n ) -> ProgressEvent[StepFunctionsStateMachineProperties]:\n \"\"\"\n Delete a resource\n\n IAM permissions required:\n - states:DeleteStateMachine\n - states:DescribeStateMachine\n \"\"\"\n model = request.desired_state\n step_function = request.aws_client_factory.stepfunctions\n\n step_function.delete_state_machine(stateMachineArn=model[\"Arn\"])\n\n return ProgressEvent(\n status=OperationStatus.SUCCESS,\n resource_model=model,\n custom_context=request.custom_context,\n )\n\n def update(\n self,\n request: ResourceRequest[StepFunctionsStateMachineProperties],\n ) -> ProgressEvent[StepFunctionsStateMachineProperties]:\n \"\"\"\n Update a resource\n\n IAM permissions required:\n - states:UpdateStateMachine\n - states:TagResource\n - states:UntagResource\n - states:ListTagsForResource\n - iam:PassRole\n \"\"\"\n model = request.desired_state\n step_function = request.aws_client_factory.stepfunctions\n\n if not model.get(\"Arn\"):\n model[\"Arn\"] = request.previous_state[\"Arn\"]\n\n params = {\n \"stateMachineArn\": model[\"Arn\"],\n \"definition\": model[\"DefinitionString\"],\n }\n\n step_function.update_state_machine(**params)\n\n return ProgressEvent(\n status=OperationStatus.SUCCESS,\n resource_model=model,\n custom_context=request.custom_context,\n )\n\n\ndef _apply_substitutions(definition: str, substitutions: dict[str, str]) -> str:\n substitution_regex = re.compile(\"\\\\${[a-zA-Z0-9_]+}\") # might be a bit too strict in some cases\n tokens = substitution_regex.findall(definition)\n result = definition\n for token in tokens:\n raw_token = token[2:-1] # strip ${ and }\n if raw_token not in substitutions.keys():\n raise\n result = result.replace(token, substitutions[raw_token])\n\n return result\n", "path": "localstack/services/stepfunctions/resource_providers/aws_stepfunctions_statemachine.py"}], "after_files": [{"content": "# LocalStack Resource Provider Scaffolding v2\nfrom __future__ import annotations\n\nimport json\nimport re\nfrom pathlib import Path\nfrom typing import Optional, TypedDict\n\nimport localstack.services.cloudformation.provider_utils as util\nfrom localstack.services.cloudformation.resource_provider import (\n LOG,\n OperationStatus,\n ProgressEvent,\n ResourceProvider,\n ResourceRequest,\n)\nfrom localstack.utils.strings import to_str\n\n\nclass StepFunctionsStateMachineProperties(TypedDict):\n RoleArn: Optional[str]\n Arn: Optional[str]\n Definition: Optional[dict]\n DefinitionS3Location: Optional[S3Location]\n DefinitionString: Optional[str]\n DefinitionSubstitutions: Optional[dict]\n LoggingConfiguration: Optional[LoggingConfiguration]\n Name: Optional[str]\n StateMachineName: Optional[str]\n StateMachineRevisionId: Optional[str]\n StateMachineType: Optional[str]\n Tags: Optional[list[TagsEntry]]\n TracingConfiguration: Optional[TracingConfiguration]\n\n\nclass CloudWatchLogsLogGroup(TypedDict):\n LogGroupArn: Optional[str]\n\n\nclass LogDestination(TypedDict):\n CloudWatchLogsLogGroup: Optional[CloudWatchLogsLogGroup]\n\n\nclass LoggingConfiguration(TypedDict):\n Destinations: Optional[list[LogDestination]]\n IncludeExecutionData: Optional[bool]\n Level: Optional[str]\n\n\nclass TracingConfiguration(TypedDict):\n Enabled: Optional[bool]\n\n\nclass S3Location(TypedDict):\n Bucket: Optional[str]\n Key: Optional[str]\n Version: Optional[str]\n\n\nclass TagsEntry(TypedDict):\n Key: Optional[str]\n Value: Optional[str]\n\n\nREPEATED_INVOCATION = \"repeated_invocation\"\n\n\nclass StepFunctionsStateMachineProvider(ResourceProvider[StepFunctionsStateMachineProperties]):\n TYPE = \"AWS::StepFunctions::StateMachine\" # Autogenerated. Don't change\n SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change\n\n def create(\n self,\n request: ResourceRequest[StepFunctionsStateMachineProperties],\n ) -> ProgressEvent[StepFunctionsStateMachineProperties]:\n \"\"\"\n Create a new resource.\n\n Primary identifier fields:\n - /properties/Arn\n\n Required properties:\n - RoleArn\n\n Create-only properties:\n - /properties/StateMachineName\n - /properties/StateMachineType\n\n Read-only properties:\n - /properties/Arn\n - /properties/Name\n - /properties/StateMachineRevisionId\n\n IAM permissions required:\n - states:CreateStateMachine\n - iam:PassRole\n - s3:GetObject\n\n \"\"\"\n model = request.desired_state\n step_function = request.aws_client_factory.stepfunctions\n\n if not model.get(\"StateMachineName\"):\n model[\"StateMachineName\"] = util.generate_default_name(\n stack_name=request.stack_name, logical_resource_id=request.logical_resource_id\n )\n\n params = {\n \"name\": model.get(\"StateMachineName\"),\n \"roleArn\": model.get(\"RoleArn\"),\n \"type\": model.get(\"StateMachineType\", \"STANDARD\"),\n }\n\n # get definition\n s3_client = request.aws_client_factory.s3\n\n definition_str = self._get_definition(model, s3_client)\n\n params[\"definition\"] = definition_str\n\n response = step_function.create_state_machine(**params)\n\n model[\"Arn\"] = response[\"stateMachineArn\"]\n model[\"Name\"] = model[\"StateMachineName\"]\n\n return ProgressEvent(\n status=OperationStatus.SUCCESS,\n resource_model=model,\n custom_context=request.custom_context,\n )\n\n def _get_definition(self, model, s3_client):\n if \"DefinitionString\" in model:\n definition_str = model.get(\"DefinitionString\")\n elif \"DefinitionS3Location\" in model:\n # TODO: currently not covered by tests - add a test to mimick the behavior of \"sam deploy ...\"\n s3_location = model.get(\"DefinitionS3Location\")\n LOG.debug(\"Fetching state machine definition from S3: %s\", s3_location)\n result = s3_client.get_object(Bucket=s3_location[\"Bucket\"], Key=s3_location[\"Key\"])\n definition_str = to_str(result[\"Body\"].read())\n elif \"Definition\" in model:\n definition = model.get(\"Definition\")\n definition_str = json.dumps(definition)\n else:\n definition_str = None\n\n substitutions = model.get(\"DefinitionSubstitutions\")\n if substitutions is not None:\n definition_str = _apply_substitutions(definition_str, substitutions)\n return definition_str\n\n def read(\n self,\n request: ResourceRequest[StepFunctionsStateMachineProperties],\n ) -> ProgressEvent[StepFunctionsStateMachineProperties]:\n \"\"\"\n Fetch resource information\n\n IAM permissions required:\n - states:DescribeStateMachine\n - states:ListTagsForResource\n \"\"\"\n raise NotImplementedError\n\n def delete(\n self,\n request: ResourceRequest[StepFunctionsStateMachineProperties],\n ) -> ProgressEvent[StepFunctionsStateMachineProperties]:\n \"\"\"\n Delete a resource\n\n IAM permissions required:\n - states:DeleteStateMachine\n - states:DescribeStateMachine\n \"\"\"\n model = request.desired_state\n step_function = request.aws_client_factory.stepfunctions\n\n step_function.delete_state_machine(stateMachineArn=model[\"Arn\"])\n\n return ProgressEvent(\n status=OperationStatus.SUCCESS,\n resource_model=model,\n custom_context=request.custom_context,\n )\n\n def update(\n self,\n request: ResourceRequest[StepFunctionsStateMachineProperties],\n ) -> ProgressEvent[StepFunctionsStateMachineProperties]:\n \"\"\"\n Update a resource\n\n IAM permissions required:\n - states:UpdateStateMachine\n - states:TagResource\n - states:UntagResource\n - states:ListTagsForResource\n - iam:PassRole\n \"\"\"\n model = request.desired_state\n step_function = request.aws_client_factory.stepfunctions\n\n if not model.get(\"Arn\"):\n model[\"Arn\"] = request.previous_state[\"Arn\"]\n\n params = {\n \"stateMachineArn\": model[\"Arn\"],\n \"definition\": model[\"DefinitionString\"],\n }\n\n step_function.update_state_machine(**params)\n\n return ProgressEvent(\n status=OperationStatus.SUCCESS,\n resource_model=model,\n custom_context=request.custom_context,\n )\n\n\ndef _apply_substitutions(definition: str, substitutions: dict[str, str]) -> str:\n substitution_regex = re.compile(\"\\\\${[a-zA-Z0-9_]+}\") # might be a bit too strict in some cases\n tokens = substitution_regex.findall(definition)\n result = definition\n for token in tokens:\n raw_token = token[2:-1] # strip ${ and }\n if raw_token not in substitutions.keys():\n raise\n result = result.replace(token, substitutions[raw_token])\n\n return result\n", "path": "localstack/services/stepfunctions/resource_providers/aws_stepfunctions_statemachine.py"}]} | 2,609 | 384 |
gh_patches_debug_28588 | rasdani/github-patches | git_diff | aws-cloudformation__cfn-lint-920 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Errors when using Fn::Transform
*cfn-lint version: (`0.20.2`)*
Given the following valid template, I see 2 errors:
```yaml
Parameters:
ImageId:
Type: AWS::EC2::Image::Id
InstanceType:
Type: String
Resources:
LaunchConfiguration:
Type: AWS::AutoScaling::LaunchConfiguration
Properties:
ImageId: !Ref ImageId
InstanceType: !Ref InstanceType
UserData:
'Fn::Base64':
'Fn::Sub':
'Fn::Transform': # Returns a string that contains Fn::Sub tokens like ${AWS::Region}
Name: DynamicUserData
```
* `E1019 Sub should be a string or array of 2 items for Resources/LaunchConfiguration/Properties/UserData/Fn::Base64/Fn::Sub` - `Fn::Transform` can return a string or a template, so `Fn::Sub` should be forgiving of it.
* `E0002 Unknown exception while processing rule W2001: expected string or bytes-like object` Same root cause, but fails in a different way due to assumption that it complies with E1019 in a specific way.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cfnlint/rules/functions/Sub.py`
Content:
```
1 """
2 Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
3
4 Permission is hereby granted, free of charge, to any person obtaining a copy of this
5 software and associated documentation files (the "Software"), to deal in the Software
6 without restriction, including without limitation the rights to use, copy, modify,
7 merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
8 permit persons to whom the Software is furnished to do so.
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
11 INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
12 PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
13 HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
14 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
15 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
16 """
17 import six
18 from cfnlint import CloudFormationLintRule
19 from cfnlint import RuleMatch
20
21
22 class Sub(CloudFormationLintRule):
23 """Check if Sub values are correct"""
24 id = 'E1019'
25 shortdesc = 'Sub validation of parameters'
26 description = 'Making sure the sub function is properly configured'
27 source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-sub.html'
28 tags = ['functions', 'sub']
29
30 def _test_string(self, cfn, sub_string, parameters, tree):
31 """Test if a string has appropriate parameters"""
32
33 matches = []
34 string_params = cfn.get_sub_parameters(sub_string)
35
36 for string_param in string_params:
37 if isinstance(string_param, (six.string_types)):
38 matches.extend(self._test_parameter(string_param, cfn, parameters, tree))
39
40 return matches
41
42 def _get_parameters(self, cfn):
43 """Get all Parameter Names"""
44 results = {}
45 parameters = cfn.template.get('Parameters', {})
46 if isinstance(parameters, dict):
47 for param_name, param_values in parameters.items():
48 # This rule isn't here to check the Types but we need
49 # something valid if it doesn't exist
50 results[param_name] = param_values.get('Type', 'String')
51
52 return results
53
54 def _test_parameters(self, parameters, cfn, tree):
55 """Check parameters for appropriate configuration"""
56
57 supported_functions = [
58 'Fn::Base64',
59 'Fn::FindInMap',
60 'Fn::GetAtt',
61 'Fn::GetAZs',
62 'Fn::ImportValue',
63 'Fn::If',
64 'Fn::Join',
65 'Fn::Select',
66 'Fn::Sub',
67 'Ref'
68 ]
69
70 matches = []
71 for parameter_name, parameter_value_obj in parameters.items():
72 param_tree = tree[:] + [parameter_name]
73 if isinstance(parameter_value_obj, dict):
74 if len(parameter_value_obj) == 1:
75 for key, value in parameter_value_obj.items():
76 if key not in supported_functions:
77 message = 'Sub parameter should use a valid function for {0}'
78 matches.append(RuleMatch(
79 param_tree, message.format('/'.join(map(str, tree)))))
80 elif key in ['Ref']:
81 matches.extend(self._test_parameter(value, cfn, {}, tree))
82 elif key in ['Fn::GetAtt']:
83 if isinstance(value, list):
84 matches.extend(self._test_parameter('.'.join(value), cfn, {}, tree))
85 elif isinstance(value, six.string_types):
86 matches.extend(self._test_parameter(value, cfn, {}, tree))
87 else:
88 message = 'Sub parameter should be an object of 1 for {0}'
89 matches.append(RuleMatch(
90 param_tree, message.format('/'.join(map(str, tree)))))
91 elif not isinstance(parameter_value_obj, six.string_types):
92 message = 'Sub parameter should be an object of 1 or string for {0}'
93 matches.append(RuleMatch(
94 param_tree, message.format('/'.join(map(str, tree)))))
95
96 return matches
97
98 def _test_parameter(self, parameter, cfn, parameters, tree):
99 """ Test a parameter """
100
101 matches = []
102 get_atts = cfn.get_valid_getatts()
103
104 valid_pseudo_params = [
105 'AWS::Region',
106 'AWS::StackName',
107 'AWS::URLSuffix',
108 'AWS::StackId',
109 'AWS::Region',
110 'AWS::Partition',
111 'AWS::NotificationARNs',
112 'AWS::AccountId'
113 ]
114
115 odd_list_params = [
116 'CommaDelimitedList',
117 'AWS::SSM::Parameter::Value<CommaDelimitedList>',
118 ]
119
120 valid_params = valid_pseudo_params
121 valid_params.extend(cfn.get_resource_names())
122 template_parameters = self._get_parameters(cfn)
123
124 for key, _ in parameters.items():
125 valid_params.append(key)
126
127 if parameter not in valid_params:
128 found = False
129 if parameter in template_parameters:
130 found = True
131 if (
132 template_parameters.get(parameter) in odd_list_params or
133 template_parameters.get(parameter).startswith('AWS::SSM::Parameter::Value<List') or
134 template_parameters.get(parameter).startswith('List')):
135 message = 'Fn::Sub cannot use list {0} at {1}'
136 matches.append(RuleMatch(
137 tree, message.format(parameter, '/'.join(map(str, tree)))))
138 for resource, attributes in get_atts.items():
139 for attribute_name, attribute_values in attributes.items():
140 if resource == parameter.split('.')[0] and attribute_name == '*':
141 if attribute_values.get('Type') == 'List':
142 message = 'Fn::Sub cannot use list {0} at {1}'
143 matches.append(RuleMatch(
144 tree, message.format(parameter, '/'.join(map(str, tree)))))
145 found = True
146 elif (resource == parameter.split('.')[0] and
147 attribute_name == '.'.join(parameter.split('.')[1:])):
148 if attribute_values.get('Type') == 'List':
149 message = 'Fn::Sub cannot use list {0} at {1}'
150 matches.append(RuleMatch(
151 tree, message.format(parameter, '/'.join(map(str, tree)))))
152 found = True
153 if not found:
154 message = 'Parameter {0} for Fn::Sub not found at {1}'
155 matches.append(RuleMatch(
156 tree, message.format(parameter, '/'.join(map(str, tree)))))
157
158 return matches
159
160 def match(self, cfn):
161 """Check CloudFormation Join"""
162
163 matches = []
164
165 sub_objs = cfn.search_deep_keys('Fn::Sub')
166
167 for sub_obj in sub_objs:
168 sub_value_obj = sub_obj[-1]
169 tree = sub_obj[:-1]
170 if isinstance(sub_value_obj, six.string_types):
171 matches.extend(self._test_string(cfn, sub_value_obj, {}, tree))
172 elif isinstance(sub_value_obj, list):
173 if len(sub_value_obj) == 2:
174 sub_string = sub_value_obj[0]
175 parameters = sub_value_obj[1]
176 if not isinstance(sub_string, six.string_types):
177 message = 'Subs first element should be of type string for {0}'
178 matches.append(RuleMatch(
179 tree + [0], message.format('/'.join(map(str, tree)))))
180 if not isinstance(parameters, dict):
181 message = 'Subs second element should be an object for {0}'
182 matches.append(RuleMatch(
183 tree + [1], message.format('/'.join(map(str, tree)))))
184 else:
185 matches.extend(self._test_string(cfn, sub_string, parameters, tree + [0]))
186 matches.extend(self._test_parameters(parameters, cfn, tree))
187 else:
188 message = 'Sub should be an array of 2 for {0}'
189 matches.append(RuleMatch(
190 tree, message.format('/'.join(map(str, tree)))))
191 else:
192 message = 'Sub should be a string or array of 2 items for {0}'
193 matches.append(RuleMatch(
194 tree, message.format('/'.join(map(str, tree)))))
195
196 return matches
197
```
Path: `src/cfnlint/rules/parameters/Used.py`
Content:
```
1 """
2 Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
3
4 Permission is hereby granted, free of charge, to any person obtaining a copy of this
5 software and associated documentation files (the "Software"), to deal in the Software
6 without restriction, including without limitation the rights to use, copy, modify,
7 merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
8 permit persons to whom the Software is furnished to do so.
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
11 INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
12 PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
13 HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
14 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
15 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
16 """
17 from __future__ import unicode_literals
18 import re
19 import six
20 from cfnlint import CloudFormationLintRule
21 from cfnlint import RuleMatch
22
23
24 class Used(CloudFormationLintRule):
25 """Check if Parameters are used"""
26 id = 'W2001'
27 shortdesc = 'Check if Parameters are Used'
28 description = 'Making sure the parameters defined are used'
29 source_url = 'https://github.com/aws-cloudformation/cfn-python-lint'
30 tags = ['parameters']
31
32 def searchstring(self, string, parameter):
33 """Search string for tokenized fields"""
34 regex = re.compile(r'\${(%s)}' % parameter)
35 return regex.findall(string)
36
37 def isparaminref(self, subs, parameter):
38 """Search sub strings for parameters"""
39 for sub in subs:
40 if isinstance(sub, (six.text_type, six.string_types)):
41 if self.searchstring(sub, parameter):
42 return True
43
44 return False
45
46 def match(self, cfn):
47 """Check CloudFormation Parameters"""
48
49 matches = []
50
51 reftrees = cfn.transform_pre.get('Ref')
52 subtrees = cfn.transform_pre.get('Fn::Sub')
53 refs = []
54 for reftree in reftrees:
55 refs.append(reftree[-1])
56 subs = []
57 for subtree in subtrees:
58 if isinstance(subtree[-1], list):
59 subs.extend(cfn.get_sub_parameters(subtree[-1][0]))
60 else:
61 subs.extend(cfn.get_sub_parameters(subtree[-1]))
62
63 for paramname, _ in cfn.get_parameters().items():
64 if paramname not in refs:
65 if paramname not in subs:
66 message = 'Parameter {0} not used.'
67 matches.append(RuleMatch(
68 ['Parameters', paramname],
69 message.format(paramname)
70 ))
71
72 return matches
73
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/cfnlint/rules/functions/Sub.py b/src/cfnlint/rules/functions/Sub.py
--- a/src/cfnlint/rules/functions/Sub.py
+++ b/src/cfnlint/rules/functions/Sub.py
@@ -188,6 +188,17 @@
message = 'Sub should be an array of 2 for {0}'
matches.append(RuleMatch(
tree, message.format('/'.join(map(str, tree)))))
+ elif isinstance(sub_value_obj, dict):
+ if len(sub_value_obj) == 1:
+ for key, _ in sub_value_obj.items():
+ if not key == 'Fn::Transform':
+ message = 'Sub should be a string or array of 2 items for {0}'
+ matches.append(RuleMatch(
+ tree, message.format('/'.join(map(str, tree)))))
+ else:
+ message = 'Sub should be a string or array of 2 items for {0}'
+ matches.append(RuleMatch(
+ tree, message.format('/'.join(map(str, tree)))))
else:
message = 'Sub should be a string or array of 2 items for {0}'
matches.append(RuleMatch(
diff --git a/src/cfnlint/rules/parameters/Used.py b/src/cfnlint/rules/parameters/Used.py
--- a/src/cfnlint/rules/parameters/Used.py
+++ b/src/cfnlint/rules/parameters/Used.py
@@ -57,7 +57,7 @@
for subtree in subtrees:
if isinstance(subtree[-1], list):
subs.extend(cfn.get_sub_parameters(subtree[-1][0]))
- else:
+ elif isinstance(subtree[-1], six.string_types):
subs.extend(cfn.get_sub_parameters(subtree[-1]))
for paramname, _ in cfn.get_parameters().items():
| {"golden_diff": "diff --git a/src/cfnlint/rules/functions/Sub.py b/src/cfnlint/rules/functions/Sub.py\n--- a/src/cfnlint/rules/functions/Sub.py\n+++ b/src/cfnlint/rules/functions/Sub.py\n@@ -188,6 +188,17 @@\n message = 'Sub should be an array of 2 for {0}'\n matches.append(RuleMatch(\n tree, message.format('/'.join(map(str, tree)))))\n+ elif isinstance(sub_value_obj, dict):\n+ if len(sub_value_obj) == 1:\n+ for key, _ in sub_value_obj.items():\n+ if not key == 'Fn::Transform':\n+ message = 'Sub should be a string or array of 2 items for {0}'\n+ matches.append(RuleMatch(\n+ tree, message.format('/'.join(map(str, tree)))))\n+ else:\n+ message = 'Sub should be a string or array of 2 items for {0}'\n+ matches.append(RuleMatch(\n+ tree, message.format('/'.join(map(str, tree)))))\n else:\n message = 'Sub should be a string or array of 2 items for {0}'\n matches.append(RuleMatch(\ndiff --git a/src/cfnlint/rules/parameters/Used.py b/src/cfnlint/rules/parameters/Used.py\n--- a/src/cfnlint/rules/parameters/Used.py\n+++ b/src/cfnlint/rules/parameters/Used.py\n@@ -57,7 +57,7 @@\n for subtree in subtrees:\n if isinstance(subtree[-1], list):\n subs.extend(cfn.get_sub_parameters(subtree[-1][0]))\n- else:\n+ elif isinstance(subtree[-1], six.string_types):\n subs.extend(cfn.get_sub_parameters(subtree[-1]))\n \n for paramname, _ in cfn.get_parameters().items():\n", "issue": "Errors when using Fn::Transform\n*cfn-lint version: (`0.20.2`)*\r\n\r\nGiven the following valid template, I see 2 errors:\r\n\r\n```yaml\r\nParameters:\r\n ImageId:\r\n Type: AWS::EC2::Image::Id\r\n InstanceType:\r\n Type: String\r\n\r\nResources:\r\n LaunchConfiguration:\r\n Type: AWS::AutoScaling::LaunchConfiguration\r\n Properties:\r\n ImageId: !Ref ImageId\r\n InstanceType: !Ref InstanceType\r\n UserData:\r\n 'Fn::Base64':\r\n 'Fn::Sub':\r\n 'Fn::Transform': # Returns a string that contains Fn::Sub tokens like ${AWS::Region}\r\n Name: DynamicUserData\r\n```\r\n* `E1019 Sub should be a string or array of 2 items for Resources/LaunchConfiguration/Properties/UserData/Fn::Base64/Fn::Sub` - `Fn::Transform` can return a string or a template, so `Fn::Sub` should be forgiving of it.\r\n* `E0002 Unknown exception while processing rule W2001: expected string or bytes-like object` Same root cause, but fails in a different way due to assumption that it complies with E1019 in a specific way.\r\n\r\n\r\n\n", "before_files": [{"content": "\"\"\"\n Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nimport six\nfrom cfnlint import CloudFormationLintRule\nfrom cfnlint import RuleMatch\n\n\nclass Sub(CloudFormationLintRule):\n \"\"\"Check if Sub values are correct\"\"\"\n id = 'E1019'\n shortdesc = 'Sub validation of parameters'\n description = 'Making sure the sub function is properly configured'\n source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-sub.html'\n tags = ['functions', 'sub']\n\n def _test_string(self, cfn, sub_string, parameters, tree):\n \"\"\"Test if a string has appropriate parameters\"\"\"\n\n matches = []\n string_params = cfn.get_sub_parameters(sub_string)\n\n for string_param in string_params:\n if isinstance(string_param, (six.string_types)):\n matches.extend(self._test_parameter(string_param, cfn, parameters, tree))\n\n return matches\n\n def _get_parameters(self, cfn):\n \"\"\"Get all Parameter Names\"\"\"\n results = {}\n parameters = cfn.template.get('Parameters', {})\n if isinstance(parameters, dict):\n for param_name, param_values in parameters.items():\n # This rule isn't here to check the Types but we need\n # something valid if it doesn't exist\n results[param_name] = param_values.get('Type', 'String')\n\n return results\n\n def _test_parameters(self, parameters, cfn, tree):\n \"\"\"Check parameters for appropriate configuration\"\"\"\n\n supported_functions = [\n 'Fn::Base64',\n 'Fn::FindInMap',\n 'Fn::GetAtt',\n 'Fn::GetAZs',\n 'Fn::ImportValue',\n 'Fn::If',\n 'Fn::Join',\n 'Fn::Select',\n 'Fn::Sub',\n 'Ref'\n ]\n\n matches = []\n for parameter_name, parameter_value_obj in parameters.items():\n param_tree = tree[:] + [parameter_name]\n if isinstance(parameter_value_obj, dict):\n if len(parameter_value_obj) == 1:\n for key, value in parameter_value_obj.items():\n if key not in supported_functions:\n message = 'Sub parameter should use a valid function for {0}'\n matches.append(RuleMatch(\n param_tree, message.format('/'.join(map(str, tree)))))\n elif key in ['Ref']:\n matches.extend(self._test_parameter(value, cfn, {}, tree))\n elif key in ['Fn::GetAtt']:\n if isinstance(value, list):\n matches.extend(self._test_parameter('.'.join(value), cfn, {}, tree))\n elif isinstance(value, six.string_types):\n matches.extend(self._test_parameter(value, cfn, {}, tree))\n else:\n message = 'Sub parameter should be an object of 1 for {0}'\n matches.append(RuleMatch(\n param_tree, message.format('/'.join(map(str, tree)))))\n elif not isinstance(parameter_value_obj, six.string_types):\n message = 'Sub parameter should be an object of 1 or string for {0}'\n matches.append(RuleMatch(\n param_tree, message.format('/'.join(map(str, tree)))))\n\n return matches\n\n def _test_parameter(self, parameter, cfn, parameters, tree):\n \"\"\" Test a parameter \"\"\"\n\n matches = []\n get_atts = cfn.get_valid_getatts()\n\n valid_pseudo_params = [\n 'AWS::Region',\n 'AWS::StackName',\n 'AWS::URLSuffix',\n 'AWS::StackId',\n 'AWS::Region',\n 'AWS::Partition',\n 'AWS::NotificationARNs',\n 'AWS::AccountId'\n ]\n\n odd_list_params = [\n 'CommaDelimitedList',\n 'AWS::SSM::Parameter::Value<CommaDelimitedList>',\n ]\n\n valid_params = valid_pseudo_params\n valid_params.extend(cfn.get_resource_names())\n template_parameters = self._get_parameters(cfn)\n\n for key, _ in parameters.items():\n valid_params.append(key)\n\n if parameter not in valid_params:\n found = False\n if parameter in template_parameters:\n found = True\n if (\n template_parameters.get(parameter) in odd_list_params or\n template_parameters.get(parameter).startswith('AWS::SSM::Parameter::Value<List') or\n template_parameters.get(parameter).startswith('List')):\n message = 'Fn::Sub cannot use list {0} at {1}'\n matches.append(RuleMatch(\n tree, message.format(parameter, '/'.join(map(str, tree)))))\n for resource, attributes in get_atts.items():\n for attribute_name, attribute_values in attributes.items():\n if resource == parameter.split('.')[0] and attribute_name == '*':\n if attribute_values.get('Type') == 'List':\n message = 'Fn::Sub cannot use list {0} at {1}'\n matches.append(RuleMatch(\n tree, message.format(parameter, '/'.join(map(str, tree)))))\n found = True\n elif (resource == parameter.split('.')[0] and\n attribute_name == '.'.join(parameter.split('.')[1:])):\n if attribute_values.get('Type') == 'List':\n message = 'Fn::Sub cannot use list {0} at {1}'\n matches.append(RuleMatch(\n tree, message.format(parameter, '/'.join(map(str, tree)))))\n found = True\n if not found:\n message = 'Parameter {0} for Fn::Sub not found at {1}'\n matches.append(RuleMatch(\n tree, message.format(parameter, '/'.join(map(str, tree)))))\n\n return matches\n\n def match(self, cfn):\n \"\"\"Check CloudFormation Join\"\"\"\n\n matches = []\n\n sub_objs = cfn.search_deep_keys('Fn::Sub')\n\n for sub_obj in sub_objs:\n sub_value_obj = sub_obj[-1]\n tree = sub_obj[:-1]\n if isinstance(sub_value_obj, six.string_types):\n matches.extend(self._test_string(cfn, sub_value_obj, {}, tree))\n elif isinstance(sub_value_obj, list):\n if len(sub_value_obj) == 2:\n sub_string = sub_value_obj[0]\n parameters = sub_value_obj[1]\n if not isinstance(sub_string, six.string_types):\n message = 'Subs first element should be of type string for {0}'\n matches.append(RuleMatch(\n tree + [0], message.format('/'.join(map(str, tree)))))\n if not isinstance(parameters, dict):\n message = 'Subs second element should be an object for {0}'\n matches.append(RuleMatch(\n tree + [1], message.format('/'.join(map(str, tree)))))\n else:\n matches.extend(self._test_string(cfn, sub_string, parameters, tree + [0]))\n matches.extend(self._test_parameters(parameters, cfn, tree))\n else:\n message = 'Sub should be an array of 2 for {0}'\n matches.append(RuleMatch(\n tree, message.format('/'.join(map(str, tree)))))\n else:\n message = 'Sub should be a string or array of 2 items for {0}'\n matches.append(RuleMatch(\n tree, message.format('/'.join(map(str, tree)))))\n\n return matches\n", "path": "src/cfnlint/rules/functions/Sub.py"}, {"content": "\"\"\"\n Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nfrom __future__ import unicode_literals\nimport re\nimport six\nfrom cfnlint import CloudFormationLintRule\nfrom cfnlint import RuleMatch\n\n\nclass Used(CloudFormationLintRule):\n \"\"\"Check if Parameters are used\"\"\"\n id = 'W2001'\n shortdesc = 'Check if Parameters are Used'\n description = 'Making sure the parameters defined are used'\n source_url = 'https://github.com/aws-cloudformation/cfn-python-lint'\n tags = ['parameters']\n\n def searchstring(self, string, parameter):\n \"\"\"Search string for tokenized fields\"\"\"\n regex = re.compile(r'\\${(%s)}' % parameter)\n return regex.findall(string)\n\n def isparaminref(self, subs, parameter):\n \"\"\"Search sub strings for parameters\"\"\"\n for sub in subs:\n if isinstance(sub, (six.text_type, six.string_types)):\n if self.searchstring(sub, parameter):\n return True\n\n return False\n\n def match(self, cfn):\n \"\"\"Check CloudFormation Parameters\"\"\"\n\n matches = []\n\n reftrees = cfn.transform_pre.get('Ref')\n subtrees = cfn.transform_pre.get('Fn::Sub')\n refs = []\n for reftree in reftrees:\n refs.append(reftree[-1])\n subs = []\n for subtree in subtrees:\n if isinstance(subtree[-1], list):\n subs.extend(cfn.get_sub_parameters(subtree[-1][0]))\n else:\n subs.extend(cfn.get_sub_parameters(subtree[-1]))\n\n for paramname, _ in cfn.get_parameters().items():\n if paramname not in refs:\n if paramname not in subs:\n message = 'Parameter {0} not used.'\n matches.append(RuleMatch(\n ['Parameters', paramname],\n message.format(paramname)\n ))\n\n return matches\n", "path": "src/cfnlint/rules/parameters/Used.py"}], "after_files": [{"content": "\"\"\"\n Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nimport six\nfrom cfnlint import CloudFormationLintRule\nfrom cfnlint import RuleMatch\n\n\nclass Sub(CloudFormationLintRule):\n \"\"\"Check if Sub values are correct\"\"\"\n id = 'E1019'\n shortdesc = 'Sub validation of parameters'\n description = 'Making sure the sub function is properly configured'\n source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-sub.html'\n tags = ['functions', 'sub']\n\n def _test_string(self, cfn, sub_string, parameters, tree):\n \"\"\"Test if a string has appropriate parameters\"\"\"\n\n matches = []\n string_params = cfn.get_sub_parameters(sub_string)\n\n for string_param in string_params:\n if isinstance(string_param, (six.string_types)):\n matches.extend(self._test_parameter(string_param, cfn, parameters, tree))\n\n return matches\n\n def _get_parameters(self, cfn):\n \"\"\"Get all Parameter Names\"\"\"\n results = {}\n parameters = cfn.template.get('Parameters', {})\n if isinstance(parameters, dict):\n for param_name, param_values in parameters.items():\n # This rule isn't here to check the Types but we need\n # something valid if it doesn't exist\n results[param_name] = param_values.get('Type', 'String')\n\n return results\n\n def _test_parameters(self, parameters, cfn, tree):\n \"\"\"Check parameters for appropriate configuration\"\"\"\n\n supported_functions = [\n 'Fn::Base64',\n 'Fn::FindInMap',\n 'Fn::GetAtt',\n 'Fn::GetAZs',\n 'Fn::ImportValue',\n 'Fn::If',\n 'Fn::Join',\n 'Fn::Select',\n 'Fn::Sub',\n 'Ref'\n ]\n\n matches = []\n for parameter_name, parameter_value_obj in parameters.items():\n param_tree = tree[:] + [parameter_name]\n if isinstance(parameter_value_obj, dict):\n if len(parameter_value_obj) == 1:\n for key, value in parameter_value_obj.items():\n if key not in supported_functions:\n message = 'Sub parameter should use a valid function for {0}'\n matches.append(RuleMatch(\n param_tree, message.format('/'.join(map(str, tree)))))\n elif key in ['Ref']:\n matches.extend(self._test_parameter(value, cfn, {}, tree))\n elif key in ['Fn::GetAtt']:\n if isinstance(value, list):\n matches.extend(self._test_parameter('.'.join(value), cfn, {}, tree))\n elif isinstance(value, six.string_types):\n matches.extend(self._test_parameter(value, cfn, {}, tree))\n else:\n message = 'Sub parameter should be an object of 1 for {0}'\n matches.append(RuleMatch(\n param_tree, message.format('/'.join(map(str, tree)))))\n elif not isinstance(parameter_value_obj, six.string_types):\n message = 'Sub parameter should be an object of 1 or string for {0}'\n matches.append(RuleMatch(\n param_tree, message.format('/'.join(map(str, tree)))))\n\n return matches\n\n def _test_parameter(self, parameter, cfn, parameters, tree):\n \"\"\" Test a parameter \"\"\"\n\n matches = []\n get_atts = cfn.get_valid_getatts()\n\n valid_pseudo_params = [\n 'AWS::Region',\n 'AWS::StackName',\n 'AWS::URLSuffix',\n 'AWS::StackId',\n 'AWS::Region',\n 'AWS::Partition',\n 'AWS::NotificationARNs',\n 'AWS::AccountId'\n ]\n\n odd_list_params = [\n 'CommaDelimitedList',\n 'AWS::SSM::Parameter::Value<CommaDelimitedList>',\n ]\n\n valid_params = valid_pseudo_params\n valid_params.extend(cfn.get_resource_names())\n template_parameters = self._get_parameters(cfn)\n\n for key, _ in parameters.items():\n valid_params.append(key)\n\n if parameter not in valid_params:\n found = False\n if parameter in template_parameters:\n found = True\n if (\n template_parameters.get(parameter) in odd_list_params or\n template_parameters.get(parameter).startswith('AWS::SSM::Parameter::Value<List') or\n template_parameters.get(parameter).startswith('List')):\n message = 'Fn::Sub cannot use list {0} at {1}'\n matches.append(RuleMatch(\n tree, message.format(parameter, '/'.join(map(str, tree)))))\n for resource, attributes in get_atts.items():\n for attribute_name, attribute_values in attributes.items():\n if resource == parameter.split('.')[0] and attribute_name == '*':\n if attribute_values.get('Type') == 'List':\n message = 'Fn::Sub cannot use list {0} at {1}'\n matches.append(RuleMatch(\n tree, message.format(parameter, '/'.join(map(str, tree)))))\n found = True\n elif (resource == parameter.split('.')[0] and\n attribute_name == '.'.join(parameter.split('.')[1:])):\n if attribute_values.get('Type') == 'List':\n message = 'Fn::Sub cannot use list {0} at {1}'\n matches.append(RuleMatch(\n tree, message.format(parameter, '/'.join(map(str, tree)))))\n found = True\n if not found:\n message = 'Parameter {0} for Fn::Sub not found at {1}'\n matches.append(RuleMatch(\n tree, message.format(parameter, '/'.join(map(str, tree)))))\n\n return matches\n\n def match(self, cfn):\n \"\"\"Check CloudFormation Join\"\"\"\n\n matches = []\n\n sub_objs = cfn.search_deep_keys('Fn::Sub')\n\n for sub_obj in sub_objs:\n sub_value_obj = sub_obj[-1]\n tree = sub_obj[:-1]\n if isinstance(sub_value_obj, six.string_types):\n matches.extend(self._test_string(cfn, sub_value_obj, {}, tree))\n elif isinstance(sub_value_obj, list):\n if len(sub_value_obj) == 2:\n sub_string = sub_value_obj[0]\n parameters = sub_value_obj[1]\n if not isinstance(sub_string, six.string_types):\n message = 'Subs first element should be of type string for {0}'\n matches.append(RuleMatch(\n tree + [0], message.format('/'.join(map(str, tree)))))\n if not isinstance(parameters, dict):\n message = 'Subs second element should be an object for {0}'\n matches.append(RuleMatch(\n tree + [1], message.format('/'.join(map(str, tree)))))\n else:\n matches.extend(self._test_string(cfn, sub_string, parameters, tree + [0]))\n matches.extend(self._test_parameters(parameters, cfn, tree))\n else:\n message = 'Sub should be an array of 2 for {0}'\n matches.append(RuleMatch(\n tree, message.format('/'.join(map(str, tree)))))\n elif isinstance(sub_value_obj, dict):\n if len(sub_value_obj) == 1:\n for key, _ in sub_value_obj.items():\n if not key == 'Fn::Transform':\n message = 'Sub should be a string or array of 2 items for {0}'\n matches.append(RuleMatch(\n tree, message.format('/'.join(map(str, tree)))))\n else:\n message = 'Sub should be a string or array of 2 items for {0}'\n matches.append(RuleMatch(\n tree, message.format('/'.join(map(str, tree)))))\n else:\n message = 'Sub should be a string or array of 2 items for {0}'\n matches.append(RuleMatch(\n tree, message.format('/'.join(map(str, tree)))))\n\n return matches\n", "path": "src/cfnlint/rules/functions/Sub.py"}, {"content": "\"\"\"\n Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nfrom __future__ import unicode_literals\nimport re\nimport six\nfrom cfnlint import CloudFormationLintRule\nfrom cfnlint import RuleMatch\n\n\nclass Used(CloudFormationLintRule):\n \"\"\"Check if Parameters are used\"\"\"\n id = 'W2001'\n shortdesc = 'Check if Parameters are Used'\n description = 'Making sure the parameters defined are used'\n source_url = 'https://github.com/aws-cloudformation/cfn-python-lint'\n tags = ['parameters']\n\n def searchstring(self, string, parameter):\n \"\"\"Search string for tokenized fields\"\"\"\n regex = re.compile(r'\\${(%s)}' % parameter)\n return regex.findall(string)\n\n def isparaminref(self, subs, parameter):\n \"\"\"Search sub strings for parameters\"\"\"\n for sub in subs:\n if isinstance(sub, (six.text_type, six.string_types)):\n if self.searchstring(sub, parameter):\n return True\n\n return False\n\n def match(self, cfn):\n \"\"\"Check CloudFormation Parameters\"\"\"\n\n matches = []\n\n reftrees = cfn.transform_pre.get('Ref')\n subtrees = cfn.transform_pre.get('Fn::Sub')\n refs = []\n for reftree in reftrees:\n refs.append(reftree[-1])\n subs = []\n for subtree in subtrees:\n if isinstance(subtree[-1], list):\n subs.extend(cfn.get_sub_parameters(subtree[-1][0]))\n elif isinstance(subtree[-1], six.string_types):\n subs.extend(cfn.get_sub_parameters(subtree[-1]))\n\n for paramname, _ in cfn.get_parameters().items():\n if paramname not in refs:\n if paramname not in subs:\n message = 'Parameter {0} not used.'\n matches.append(RuleMatch(\n ['Parameters', paramname],\n message.format(paramname)\n ))\n\n return matches\n", "path": "src/cfnlint/rules/parameters/Used.py"}]} | 3,522 | 398 |
gh_patches_debug_33794 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-2634 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Spider motel6 is broken
During the global build at 2021-08-18-14-42-26, spider **motel6** failed with **0 features** and **2 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-08-18-14-42-26/logs/motel6.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-08-18-14-42-26/output/motel6.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-08-18-14-42-26/output/motel6.geojson))
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `locations/spiders/motel6.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 import scrapy
3 import json
4 from locations.items import GeojsonPointItem
5
6 brand_lookup = {
7 "MS": "Motel 6",
8 "SS": "Studio 6",
9 "HS": "Hotel 6"
10 }
11
12
13 class Motel6Spider(scrapy.Spider):
14 name = "motel6"
15 allowed_domains = ["motel6.com"]
16 start_urls = (
17 'https://www.motel6.com/var/g6/hotel-summary/ms.infinity.1.json',
18 )
19
20 def parse(self, response):
21 idata = json.loads(response.body_as_unicode())
22 storeids = idata.keys()
23 URL = 'https://www.motel6.com/var/g6/hotel-information/en/{}.json'
24 for storeid in storeids:
25 try:
26 int(storeid)
27 except ValueError:
28 continue
29 try:
30 yield scrapy.Request(URL.format(storeid), callback=self.parse_hotel)
31 except ValueError:
32 continue
33
34 def parse_hotel(self, response):
35 mdata = json.loads(response.body_as_unicode())
36
37 properties = {
38 'ref': mdata["property_id"],
39 'name': mdata["name"],
40 'addr_full': mdata["address"],
41 'city': mdata["city"],
42 'postcode': mdata["zip"],
43 'lat': mdata["latitude"],
44 'lon': mdata["longitude"],
45 'phone': mdata["phone"],
46 'state': mdata["state"],
47 'website': mdata["microsite_url"],
48 'brand': brand_lookup[mdata["brand_id"]],
49 }
50
51 yield GeojsonPointItem(**properties)
52
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/locations/spiders/motel6.py b/locations/spiders/motel6.py
--- a/locations/spiders/motel6.py
+++ b/locations/spiders/motel6.py
@@ -14,20 +14,21 @@
name = "motel6"
allowed_domains = ["motel6.com"]
start_urls = (
- 'https://www.motel6.com/var/g6/hotel-summary/ms.infinity.1.json',
+ 'https://www.motel6.com/content/g6-cache/property-summary.1.json',
)
def parse(self, response):
idata = json.loads(response.body_as_unicode())
- storeids = idata.keys()
- URL = 'https://www.motel6.com/var/g6/hotel-information/en/{}.json'
- for storeid in storeids:
+ url = 'https://www.motel6.com/bin/g6/propertydata.{}.json'
+
+ for storeid in idata.keys():
try:
int(storeid)
except ValueError:
continue
+
try:
- yield scrapy.Request(URL.format(storeid), callback=self.parse_hotel)
+ yield scrapy.Request(url.format(storeid), callback=self.parse_hotel)
except ValueError:
continue
@@ -35,17 +36,17 @@
mdata = json.loads(response.body_as_unicode())
properties = {
- 'ref': mdata["property_id"],
- 'name': mdata["name"],
- 'addr_full': mdata["address"],
- 'city': mdata["city"],
- 'postcode': mdata["zip"],
- 'lat': mdata["latitude"],
- 'lon': mdata["longitude"],
- 'phone': mdata["phone"],
- 'state': mdata["state"],
- 'website': mdata["microsite_url"],
- 'brand': brand_lookup[mdata["brand_id"]],
+ 'ref': mdata["property_id"],
+ 'name': mdata["name"],
+ 'addr_full': mdata["address"],
+ 'city': mdata["city"],
+ 'postcode': mdata["zip"],
+ 'lat': mdata["latitude"],
+ 'lon': mdata["longitude"],
+ 'phone': mdata["phone"],
+ 'state': mdata["state"],
+ 'website': mdata["microsite_url"],
+ 'brand': brand_lookup[mdata["brand_id"]],
}
yield GeojsonPointItem(**properties)
| {"golden_diff": "diff --git a/locations/spiders/motel6.py b/locations/spiders/motel6.py\n--- a/locations/spiders/motel6.py\n+++ b/locations/spiders/motel6.py\n@@ -14,20 +14,21 @@\n name = \"motel6\"\n allowed_domains = [\"motel6.com\"]\n start_urls = (\n- 'https://www.motel6.com/var/g6/hotel-summary/ms.infinity.1.json',\n+ 'https://www.motel6.com/content/g6-cache/property-summary.1.json',\n )\n \n def parse(self, response):\n idata = json.loads(response.body_as_unicode())\n- storeids = idata.keys()\n- URL = 'https://www.motel6.com/var/g6/hotel-information/en/{}.json'\n- for storeid in storeids:\n+ url = 'https://www.motel6.com/bin/g6/propertydata.{}.json'\n+\n+ for storeid in idata.keys():\n try:\n int(storeid)\n except ValueError:\n continue\n+\n try:\n- yield scrapy.Request(URL.format(storeid), callback=self.parse_hotel)\n+ yield scrapy.Request(url.format(storeid), callback=self.parse_hotel)\n except ValueError:\n continue\n \n@@ -35,17 +36,17 @@\n mdata = json.loads(response.body_as_unicode())\n \n properties = {\n- 'ref': mdata[\"property_id\"],\n- 'name': mdata[\"name\"],\n- 'addr_full': mdata[\"address\"],\n- 'city': mdata[\"city\"],\n- 'postcode': mdata[\"zip\"],\n- 'lat': mdata[\"latitude\"],\n- 'lon': mdata[\"longitude\"],\n- 'phone': mdata[\"phone\"],\n- 'state': mdata[\"state\"],\n- 'website': mdata[\"microsite_url\"],\n- 'brand': brand_lookup[mdata[\"brand_id\"]],\n+ 'ref': mdata[\"property_id\"],\n+ 'name': mdata[\"name\"],\n+ 'addr_full': mdata[\"address\"],\n+ 'city': mdata[\"city\"],\n+ 'postcode': mdata[\"zip\"],\n+ 'lat': mdata[\"latitude\"],\n+ 'lon': mdata[\"longitude\"],\n+ 'phone': mdata[\"phone\"],\n+ 'state': mdata[\"state\"],\n+ 'website': mdata[\"microsite_url\"],\n+ 'brand': brand_lookup[mdata[\"brand_id\"]],\n }\n \n yield GeojsonPointItem(**properties)\n", "issue": "Spider motel6 is broken\nDuring the global build at 2021-08-18-14-42-26, spider **motel6** failed with **0 features** and **2 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-08-18-14-42-26/logs/motel6.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-08-18-14-42-26/output/motel6.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-08-18-14-42-26/output/motel6.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\nimport json\nfrom locations.items import GeojsonPointItem\n\nbrand_lookup = {\n \"MS\": \"Motel 6\",\n \"SS\": \"Studio 6\",\n \"HS\": \"Hotel 6\"\n}\n\n\nclass Motel6Spider(scrapy.Spider):\n name = \"motel6\"\n allowed_domains = [\"motel6.com\"]\n start_urls = (\n 'https://www.motel6.com/var/g6/hotel-summary/ms.infinity.1.json',\n )\n\n def parse(self, response):\n idata = json.loads(response.body_as_unicode())\n storeids = idata.keys()\n URL = 'https://www.motel6.com/var/g6/hotel-information/en/{}.json'\n for storeid in storeids:\n try:\n int(storeid)\n except ValueError:\n continue\n try:\n yield scrapy.Request(URL.format(storeid), callback=self.parse_hotel)\n except ValueError:\n continue\n\n def parse_hotel(self, response):\n mdata = json.loads(response.body_as_unicode())\n\n properties = {\n 'ref': mdata[\"property_id\"],\n 'name': mdata[\"name\"],\n 'addr_full': mdata[\"address\"],\n 'city': mdata[\"city\"],\n 'postcode': mdata[\"zip\"],\n 'lat': mdata[\"latitude\"],\n 'lon': mdata[\"longitude\"],\n 'phone': mdata[\"phone\"],\n 'state': mdata[\"state\"],\n 'website': mdata[\"microsite_url\"],\n 'brand': brand_lookup[mdata[\"brand_id\"]],\n }\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/motel6.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\nimport json\nfrom locations.items import GeojsonPointItem\n\nbrand_lookup = {\n \"MS\": \"Motel 6\",\n \"SS\": \"Studio 6\",\n \"HS\": \"Hotel 6\"\n}\n\n\nclass Motel6Spider(scrapy.Spider):\n name = \"motel6\"\n allowed_domains = [\"motel6.com\"]\n start_urls = (\n 'https://www.motel6.com/content/g6-cache/property-summary.1.json',\n )\n\n def parse(self, response):\n idata = json.loads(response.body_as_unicode())\n url = 'https://www.motel6.com/bin/g6/propertydata.{}.json'\n\n for storeid in idata.keys():\n try:\n int(storeid)\n except ValueError:\n continue\n\n try:\n yield scrapy.Request(url.format(storeid), callback=self.parse_hotel)\n except ValueError:\n continue\n\n def parse_hotel(self, response):\n mdata = json.loads(response.body_as_unicode())\n\n properties = {\n 'ref': mdata[\"property_id\"],\n 'name': mdata[\"name\"],\n 'addr_full': mdata[\"address\"],\n 'city': mdata[\"city\"],\n 'postcode': mdata[\"zip\"],\n 'lat': mdata[\"latitude\"],\n 'lon': mdata[\"longitude\"],\n 'phone': mdata[\"phone\"],\n 'state': mdata[\"state\"],\n 'website': mdata[\"microsite_url\"],\n 'brand': brand_lookup[mdata[\"brand_id\"]],\n }\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/motel6.py"}]} | 896 | 553 |
gh_patches_debug_42319 | rasdani/github-patches | git_diff | AlexsLemonade__refinebio-3385 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Weekly stats shows 0 downloads for some users
### Context
A lot of changes has gone into prod recently. One of them is the EngagementBot weekly stats updates.
### Problem or idea
The most recent summary contains lines like "0 downloads from " indicating some (potential) stats inaccuracy.
### Solution or next step
Figure out why the value is empty and fix the issue. If everything is right then just hide those 0 downloads items.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `api/data_refinery_api/management/commands/post_downloads_summary.py`
Content:
```
1 import datetime
2 from collections import Counter
3
4 from django.conf import settings
5 from django.core.management.base import BaseCommand
6 from django.template.defaultfilters import pluralize
7 from django.utils import timezone
8
9 import requests
10
11 from data_refinery_common.models import Dataset, DatasetAnnotation
12
13
14 class Command(BaseCommand):
15 help = "Post downloads summary to Slack"
16
17 def add_arguments(self, parser):
18 parser.add_argument(
19 "--channel",
20 type=str,
21 default="ccdl-general",
22 help=("Optional parameter to choose the channel where the message will be posted."),
23 )
24 parser.add_argument(
25 "--days",
26 type=int,
27 default=7, # Default to a week.
28 help=("Number of days in the past for which to build the stats."),
29 )
30 parser.add_argument(
31 "--top-countries",
32 type=int,
33 default=5,
34 help=("Number of countries to show in the per country downloads summary."),
35 )
36
37 def handle(self, *args, **options):
38 post_downloads_summary(options["days"], options["channel"], options["top_countries"])
39
40
41 def format_user_data(header, data):
42 """
43 Formats user email, downloads count, location information sorted
44 by downloads count.
45 """
46 # Allowed overhead for 2 column sorting: downloads count, email.
47 lines = sorted(data, key=lambda u: u[0].lower())
48 lines = [
49 f"{email.lower()} | {downloads} download{pluralize(downloads)} from {location}"
50 for email, downloads, location in sorted(lines, key=lambda u: u[1], reverse=True)
51 ]
52 lines.insert(0, header)
53
54 return "\n".join(lines)
55
56
57 def get_user_location(ip_address):
58 """Gets user location information based on their IP address."""
59 try:
60 data = requests.get(f"https://ipapi.co/{ip_address}/json/", timeout=10).json()
61 # The list of available fields https://ipapi.co/api/#complete-location
62 return ", ".join((data["city"], data["country_name"]))
63 except (requests.exceptions.RequestException, KeyError, ValueError):
64 return ip_address
65
66
67 def post_downloads_summary(days, channel, top_countries=5):
68 """Posts downloads summary to Slack channel."""
69
70 start_time = timezone.now() - datetime.timedelta(days=days)
71 datasets = Dataset.processed_filtered_objects.filter(
72 created_at__gt=start_time
73 ).prefetch_related("datasetannotation_set")
74 annotations = DatasetAnnotation.objects.filter(dataset__in=datasets)
75 users_emails = set(dataset.email_address for dataset in datasets)
76
77 locations = set()
78 locations_cache = dict()
79 for annotation in annotations:
80 if "location" not in annotation.data:
81 ip_address = annotation.data["ip"]
82 if ip_address not in locations_cache:
83 locations_cache[ip_address] = get_user_location(ip_address)
84
85 # Save the locations permanently, since IP addresses can cycle over time.
86 annotation.data["location"] = locations_cache[ip_address]
87 annotation.save()
88 locations.add(annotation.data["location"])
89
90 downloads_per_country = Counter()
91 downloads_total = 0
92 new_users = []
93 returning_users = []
94 for user_email in users_emails:
95 user_annotations = annotations.filter(dataset__email_address=user_email)
96 user_downloads = user_annotations.count()
97 downloads_total += user_downloads
98
99 user_locations = set()
100 for user_annotation in user_annotations:
101 user_locations.add(user_annotation.data["location"])
102 try:
103 country = user_annotation.data["location"].split(", ")[1]
104 downloads_per_country.update({country: 1})
105 except (IndexError, TypeError):
106 pass
107
108 user_locations = "; ".join(sorted(user_locations))
109 user_data = (user_email, user_downloads, user_locations)
110
111 is_returning_user = Dataset.processed_filtered_objects.filter(
112 created_at__lt=start_time, email_address=user_email
113 )
114 if is_returning_user:
115 returning_users.append(user_data)
116 else:
117 new_users.append(user_data)
118
119 if downloads_total:
120 locations_count = len(locations)
121 users_emails_count = len(users_emails)
122 fallback_text = (
123 f"In the last {days} day{pluralize(days)}, {users_emails_count} "
124 f"user{pluralize(users_emails_count)} downloaded {downloads_total} "
125 f"dataset{pluralize(downloads_total)} from {locations_count} "
126 f"location{pluralize(locations_count)}."
127 )
128 else:
129 fallback_text = f"There were no downloads in the last {days} day{pluralize(days)}."
130
131 blocks = [
132 {"type": "section", "text": {"type": "plain_text", "emoji": True, "text": fallback_text}}
133 ]
134
135 if new_users:
136 blocks.append(
137 {
138 "type": "section",
139 "text": {"type": "mrkdwn", "text": format_user_data("*New users*", new_users)},
140 }
141 )
142
143 if returning_users:
144 blocks.append(
145 {
146 "type": "section",
147 "text": {
148 "type": "mrkdwn",
149 "text": format_user_data("*Returning users*", returning_users),
150 },
151 }
152 )
153
154 if top_countries and downloads_per_country:
155 countries_count = downloads_per_country.most_common(top_countries)
156 top_countries = min(top_countries, len(countries_count))
157 lines = [f"*Top {top_countries} countr{pluralize(top_countries, 'y,ies')}*"]
158 # Allowed overhead for 2 column sorting: downloads count, country.
159 countries_count = sorted(countries_count, key=lambda cc: cc[0])
160 countries_count = sorted(countries_count, key=lambda cc: cc[1], reverse=True)
161 for country, count in countries_count:
162 lines.append(f"{country}: {count} download{pluralize(count)}")
163
164 blocks.append(
165 {
166 "type": "section",
167 "text": {"type": "mrkdwn", "text": "\n".join(lines)},
168 }
169 )
170
171 # Post to Slack.
172 requests.post(
173 settings.ENGAGEMENTBOT_WEBHOOK,
174 json={
175 "username": "EngagementBot",
176 "icon_emoji": ":halal:",
177 "channel": f"#{channel}",
178 "text": fallback_text,
179 "blocks": blocks,
180 },
181 headers={"Content-Type": "application/json"},
182 timeout=10,
183 )
184
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/api/data_refinery_api/management/commands/post_downloads_summary.py b/api/data_refinery_api/management/commands/post_downloads_summary.py
--- a/api/data_refinery_api/management/commands/post_downloads_summary.py
+++ b/api/data_refinery_api/management/commands/post_downloads_summary.py
@@ -66,7 +66,6 @@
def post_downloads_summary(days, channel, top_countries=5):
"""Posts downloads summary to Slack channel."""
-
start_time = timezone.now() - datetime.timedelta(days=days)
datasets = Dataset.processed_filtered_objects.filter(
created_at__gt=start_time
@@ -75,7 +74,7 @@
users_emails = set(dataset.email_address for dataset in datasets)
locations = set()
- locations_cache = dict()
+ locations_cache = {}
for annotation in annotations:
if "location" not in annotation.data:
ip_address = annotation.data["ip"]
@@ -94,8 +93,10 @@
for user_email in users_emails:
user_annotations = annotations.filter(dataset__email_address=user_email)
user_downloads = user_annotations.count()
- downloads_total += user_downloads
+ if user_downloads == 0:
+ continue
+ downloads_total += user_downloads
user_locations = set()
for user_annotation in user_annotations:
user_locations.add(user_annotation.data["location"])
@@ -110,18 +111,18 @@
is_returning_user = Dataset.processed_filtered_objects.filter(
created_at__lt=start_time, email_address=user_email
- )
+ ).exists()
if is_returning_user:
returning_users.append(user_data)
else:
new_users.append(user_data)
- if downloads_total:
+ if downloads_total > 0:
locations_count = len(locations)
- users_emails_count = len(users_emails)
+ users_count = len(new_users) + len(returning_users)
fallback_text = (
- f"In the last {days} day{pluralize(days)}, {users_emails_count} "
- f"user{pluralize(users_emails_count)} downloaded {downloads_total} "
+ f"In the last {days} day{pluralize(days)}, {users_count} "
+ f"user{pluralize(users_count)} downloaded {downloads_total} "
f"dataset{pluralize(downloads_total)} from {locations_count} "
f"location{pluralize(locations_count)}."
)
@@ -129,14 +130,20 @@
fallback_text = f"There were no downloads in the last {days} day{pluralize(days)}."
blocks = [
- {"type": "section", "text": {"type": "plain_text", "emoji": True, "text": fallback_text}}
+ {
+ "type": "section",
+ "text": {"type": "plain_text", "emoji": True, "text": fallback_text},
+ }
]
if new_users:
blocks.append(
{
"type": "section",
- "text": {"type": "mrkdwn", "text": format_user_data("*New users*", new_users)},
+ "text": {
+ "type": "mrkdwn",
+ "text": format_user_data("*New users*", new_users),
+ },
}
)
| {"golden_diff": "diff --git a/api/data_refinery_api/management/commands/post_downloads_summary.py b/api/data_refinery_api/management/commands/post_downloads_summary.py\n--- a/api/data_refinery_api/management/commands/post_downloads_summary.py\n+++ b/api/data_refinery_api/management/commands/post_downloads_summary.py\n@@ -66,7 +66,6 @@\n \n def post_downloads_summary(days, channel, top_countries=5):\n \"\"\"Posts downloads summary to Slack channel.\"\"\"\n-\n start_time = timezone.now() - datetime.timedelta(days=days)\n datasets = Dataset.processed_filtered_objects.filter(\n created_at__gt=start_time\n@@ -75,7 +74,7 @@\n users_emails = set(dataset.email_address for dataset in datasets)\n \n locations = set()\n- locations_cache = dict()\n+ locations_cache = {}\n for annotation in annotations:\n if \"location\" not in annotation.data:\n ip_address = annotation.data[\"ip\"]\n@@ -94,8 +93,10 @@\n for user_email in users_emails:\n user_annotations = annotations.filter(dataset__email_address=user_email)\n user_downloads = user_annotations.count()\n- downloads_total += user_downloads\n+ if user_downloads == 0:\n+ continue\n \n+ downloads_total += user_downloads\n user_locations = set()\n for user_annotation in user_annotations:\n user_locations.add(user_annotation.data[\"location\"])\n@@ -110,18 +111,18 @@\n \n is_returning_user = Dataset.processed_filtered_objects.filter(\n created_at__lt=start_time, email_address=user_email\n- )\n+ ).exists()\n if is_returning_user:\n returning_users.append(user_data)\n else:\n new_users.append(user_data)\n \n- if downloads_total:\n+ if downloads_total > 0:\n locations_count = len(locations)\n- users_emails_count = len(users_emails)\n+ users_count = len(new_users) + len(returning_users)\n fallback_text = (\n- f\"In the last {days} day{pluralize(days)}, {users_emails_count} \"\n- f\"user{pluralize(users_emails_count)} downloaded {downloads_total} \"\n+ f\"In the last {days} day{pluralize(days)}, {users_count} \"\n+ f\"user{pluralize(users_count)} downloaded {downloads_total} \"\n f\"dataset{pluralize(downloads_total)} from {locations_count} \"\n f\"location{pluralize(locations_count)}.\"\n )\n@@ -129,14 +130,20 @@\n fallback_text = f\"There were no downloads in the last {days} day{pluralize(days)}.\"\n \n blocks = [\n- {\"type\": \"section\", \"text\": {\"type\": \"plain_text\", \"emoji\": True, \"text\": fallback_text}}\n+ {\n+ \"type\": \"section\",\n+ \"text\": {\"type\": \"plain_text\", \"emoji\": True, \"text\": fallback_text},\n+ }\n ]\n \n if new_users:\n blocks.append(\n {\n \"type\": \"section\",\n- \"text\": {\"type\": \"mrkdwn\", \"text\": format_user_data(\"*New users*\", new_users)},\n+ \"text\": {\n+ \"type\": \"mrkdwn\",\n+ \"text\": format_user_data(\"*New users*\", new_users),\n+ },\n }\n )\n", "issue": "Weekly stats shows 0 downloads for some users\n### Context\r\n\r\nA lot of changes has gone into prod recently. One of them is the EngagementBot weekly stats updates.\r\n\r\n### Problem or idea\r\n\r\nThe most recent summary contains lines like \"0 downloads from \" indicating some (potential) stats inaccuracy.\r\n\r\n### Solution or next step\r\n\r\nFigure out why the value is empty and fix the issue. If everything is right then just hide those 0 downloads items.\r\n\n", "before_files": [{"content": "import datetime\nfrom collections import Counter\n\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand\nfrom django.template.defaultfilters import pluralize\nfrom django.utils import timezone\n\nimport requests\n\nfrom data_refinery_common.models import Dataset, DatasetAnnotation\n\n\nclass Command(BaseCommand):\n help = \"Post downloads summary to Slack\"\n\n def add_arguments(self, parser):\n parser.add_argument(\n \"--channel\",\n type=str,\n default=\"ccdl-general\",\n help=(\"Optional parameter to choose the channel where the message will be posted.\"),\n )\n parser.add_argument(\n \"--days\",\n type=int,\n default=7, # Default to a week.\n help=(\"Number of days in the past for which to build the stats.\"),\n )\n parser.add_argument(\n \"--top-countries\",\n type=int,\n default=5,\n help=(\"Number of countries to show in the per country downloads summary.\"),\n )\n\n def handle(self, *args, **options):\n post_downloads_summary(options[\"days\"], options[\"channel\"], options[\"top_countries\"])\n\n\ndef format_user_data(header, data):\n \"\"\"\n Formats user email, downloads count, location information sorted\n by downloads count.\n \"\"\"\n # Allowed overhead for 2 column sorting: downloads count, email.\n lines = sorted(data, key=lambda u: u[0].lower())\n lines = [\n f\"{email.lower()} | {downloads} download{pluralize(downloads)} from {location}\"\n for email, downloads, location in sorted(lines, key=lambda u: u[1], reverse=True)\n ]\n lines.insert(0, header)\n\n return \"\\n\".join(lines)\n\n\ndef get_user_location(ip_address):\n \"\"\"Gets user location information based on their IP address.\"\"\"\n try:\n data = requests.get(f\"https://ipapi.co/{ip_address}/json/\", timeout=10).json()\n # The list of available fields https://ipapi.co/api/#complete-location\n return \", \".join((data[\"city\"], data[\"country_name\"]))\n except (requests.exceptions.RequestException, KeyError, ValueError):\n return ip_address\n\n\ndef post_downloads_summary(days, channel, top_countries=5):\n \"\"\"Posts downloads summary to Slack channel.\"\"\"\n\n start_time = timezone.now() - datetime.timedelta(days=days)\n datasets = Dataset.processed_filtered_objects.filter(\n created_at__gt=start_time\n ).prefetch_related(\"datasetannotation_set\")\n annotations = DatasetAnnotation.objects.filter(dataset__in=datasets)\n users_emails = set(dataset.email_address for dataset in datasets)\n\n locations = set()\n locations_cache = dict()\n for annotation in annotations:\n if \"location\" not in annotation.data:\n ip_address = annotation.data[\"ip\"]\n if ip_address not in locations_cache:\n locations_cache[ip_address] = get_user_location(ip_address)\n\n # Save the locations permanently, since IP addresses can cycle over time.\n annotation.data[\"location\"] = locations_cache[ip_address]\n annotation.save()\n locations.add(annotation.data[\"location\"])\n\n downloads_per_country = Counter()\n downloads_total = 0\n new_users = []\n returning_users = []\n for user_email in users_emails:\n user_annotations = annotations.filter(dataset__email_address=user_email)\n user_downloads = user_annotations.count()\n downloads_total += user_downloads\n\n user_locations = set()\n for user_annotation in user_annotations:\n user_locations.add(user_annotation.data[\"location\"])\n try:\n country = user_annotation.data[\"location\"].split(\", \")[1]\n downloads_per_country.update({country: 1})\n except (IndexError, TypeError):\n pass\n\n user_locations = \"; \".join(sorted(user_locations))\n user_data = (user_email, user_downloads, user_locations)\n\n is_returning_user = Dataset.processed_filtered_objects.filter(\n created_at__lt=start_time, email_address=user_email\n )\n if is_returning_user:\n returning_users.append(user_data)\n else:\n new_users.append(user_data)\n\n if downloads_total:\n locations_count = len(locations)\n users_emails_count = len(users_emails)\n fallback_text = (\n f\"In the last {days} day{pluralize(days)}, {users_emails_count} \"\n f\"user{pluralize(users_emails_count)} downloaded {downloads_total} \"\n f\"dataset{pluralize(downloads_total)} from {locations_count} \"\n f\"location{pluralize(locations_count)}.\"\n )\n else:\n fallback_text = f\"There were no downloads in the last {days} day{pluralize(days)}.\"\n\n blocks = [\n {\"type\": \"section\", \"text\": {\"type\": \"plain_text\", \"emoji\": True, \"text\": fallback_text}}\n ]\n\n if new_users:\n blocks.append(\n {\n \"type\": \"section\",\n \"text\": {\"type\": \"mrkdwn\", \"text\": format_user_data(\"*New users*\", new_users)},\n }\n )\n\n if returning_users:\n blocks.append(\n {\n \"type\": \"section\",\n \"text\": {\n \"type\": \"mrkdwn\",\n \"text\": format_user_data(\"*Returning users*\", returning_users),\n },\n }\n )\n\n if top_countries and downloads_per_country:\n countries_count = downloads_per_country.most_common(top_countries)\n top_countries = min(top_countries, len(countries_count))\n lines = [f\"*Top {top_countries} countr{pluralize(top_countries, 'y,ies')}*\"]\n # Allowed overhead for 2 column sorting: downloads count, country.\n countries_count = sorted(countries_count, key=lambda cc: cc[0])\n countries_count = sorted(countries_count, key=lambda cc: cc[1], reverse=True)\n for country, count in countries_count:\n lines.append(f\"{country}: {count} download{pluralize(count)}\")\n\n blocks.append(\n {\n \"type\": \"section\",\n \"text\": {\"type\": \"mrkdwn\", \"text\": \"\\n\".join(lines)},\n }\n )\n\n # Post to Slack.\n requests.post(\n settings.ENGAGEMENTBOT_WEBHOOK,\n json={\n \"username\": \"EngagementBot\",\n \"icon_emoji\": \":halal:\",\n \"channel\": f\"#{channel}\",\n \"text\": fallback_text,\n \"blocks\": blocks,\n },\n headers={\"Content-Type\": \"application/json\"},\n timeout=10,\n )\n", "path": "api/data_refinery_api/management/commands/post_downloads_summary.py"}], "after_files": [{"content": "import datetime\nfrom collections import Counter\n\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand\nfrom django.template.defaultfilters import pluralize\nfrom django.utils import timezone\n\nimport requests\n\nfrom data_refinery_common.models import Dataset, DatasetAnnotation\n\n\nclass Command(BaseCommand):\n help = \"Post downloads summary to Slack\"\n\n def add_arguments(self, parser):\n parser.add_argument(\n \"--channel\",\n type=str,\n default=\"ccdl-general\",\n help=(\"Optional parameter to choose the channel where the message will be posted.\"),\n )\n parser.add_argument(\n \"--days\",\n type=int,\n default=7, # Default to a week.\n help=(\"Number of days in the past for which to build the stats.\"),\n )\n parser.add_argument(\n \"--top-countries\",\n type=int,\n default=5,\n help=(\"Number of countries to show in the per country downloads summary.\"),\n )\n\n def handle(self, *args, **options):\n post_downloads_summary(options[\"days\"], options[\"channel\"], options[\"top_countries\"])\n\n\ndef format_user_data(header, data):\n \"\"\"\n Formats user email, downloads count, location information sorted\n by downloads count.\n \"\"\"\n # Allowed overhead for 2 column sorting: downloads count, email.\n lines = sorted(data, key=lambda u: u[0].lower())\n lines = [\n f\"{email.lower()} | {downloads} download{pluralize(downloads)} from {location}\"\n for email, downloads, location in sorted(lines, key=lambda u: u[1], reverse=True)\n ]\n lines.insert(0, header)\n\n return \"\\n\".join(lines)\n\n\ndef get_user_location(ip_address):\n \"\"\"Gets user location information based on their IP address.\"\"\"\n try:\n data = requests.get(f\"https://ipapi.co/{ip_address}/json/\", timeout=10).json()\n # The list of available fields https://ipapi.co/api/#complete-location\n return \", \".join((data[\"city\"], data[\"country_name\"]))\n except (requests.exceptions.RequestException, KeyError, ValueError):\n return ip_address\n\n\ndef post_downloads_summary(days, channel, top_countries=5):\n \"\"\"Posts downloads summary to Slack channel.\"\"\"\n start_time = timezone.now() - datetime.timedelta(days=days)\n datasets = Dataset.processed_filtered_objects.filter(\n created_at__gt=start_time\n ).prefetch_related(\"datasetannotation_set\")\n annotations = DatasetAnnotation.objects.filter(dataset__in=datasets)\n users_emails = set(dataset.email_address for dataset in datasets)\n\n locations = set()\n locations_cache = {}\n for annotation in annotations:\n if \"location\" not in annotation.data:\n ip_address = annotation.data[\"ip\"]\n if ip_address not in locations_cache:\n locations_cache[ip_address] = get_user_location(ip_address)\n\n # Save the locations permanently, since IP addresses can cycle over time.\n annotation.data[\"location\"] = locations_cache[ip_address]\n annotation.save()\n locations.add(annotation.data[\"location\"])\n\n downloads_per_country = Counter()\n downloads_total = 0\n new_users = []\n returning_users = []\n for user_email in users_emails:\n user_annotations = annotations.filter(dataset__email_address=user_email)\n user_downloads = user_annotations.count()\n if user_downloads == 0:\n continue\n\n downloads_total += user_downloads\n user_locations = set()\n for user_annotation in user_annotations:\n user_locations.add(user_annotation.data[\"location\"])\n try:\n country = user_annotation.data[\"location\"].split(\", \")[1]\n downloads_per_country.update({country: 1})\n except (IndexError, TypeError):\n pass\n\n user_locations = \"; \".join(sorted(user_locations))\n user_data = (user_email, user_downloads, user_locations)\n\n is_returning_user = Dataset.processed_filtered_objects.filter(\n created_at__lt=start_time, email_address=user_email\n ).exists()\n if is_returning_user:\n returning_users.append(user_data)\n else:\n new_users.append(user_data)\n\n if downloads_total > 0:\n locations_count = len(locations)\n users_count = len(new_users) + len(returning_users)\n fallback_text = (\n f\"In the last {days} day{pluralize(days)}, {users_count} \"\n f\"user{pluralize(users_count)} downloaded {downloads_total} \"\n f\"dataset{pluralize(downloads_total)} from {locations_count} \"\n f\"location{pluralize(locations_count)}.\"\n )\n else:\n fallback_text = f\"There were no downloads in the last {days} day{pluralize(days)}.\"\n\n blocks = [\n {\n \"type\": \"section\",\n \"text\": {\"type\": \"plain_text\", \"emoji\": True, \"text\": fallback_text},\n }\n ]\n\n if new_users:\n blocks.append(\n {\n \"type\": \"section\",\n \"text\": {\n \"type\": \"mrkdwn\",\n \"text\": format_user_data(\"*New users*\", new_users),\n },\n }\n )\n\n if returning_users:\n blocks.append(\n {\n \"type\": \"section\",\n \"text\": {\n \"type\": \"mrkdwn\",\n \"text\": format_user_data(\"*Returning users*\", returning_users),\n },\n }\n )\n\n if top_countries and downloads_per_country:\n countries_count = downloads_per_country.most_common(top_countries)\n top_countries = min(top_countries, len(countries_count))\n lines = [f\"*Top {top_countries} countr{pluralize(top_countries, 'y,ies')}*\"]\n # Allowed overhead for 2 column sorting: downloads count, country.\n countries_count = sorted(countries_count, key=lambda cc: cc[0])\n countries_count = sorted(countries_count, key=lambda cc: cc[1], reverse=True)\n for country, count in countries_count:\n lines.append(f\"{country}: {count} download{pluralize(count)}\")\n\n blocks.append(\n {\n \"type\": \"section\",\n \"text\": {\"type\": \"mrkdwn\", \"text\": \"\\n\".join(lines)},\n }\n )\n\n # Post to Slack.\n requests.post(\n settings.ENGAGEMENTBOT_WEBHOOK,\n json={\n \"username\": \"EngagementBot\",\n \"icon_emoji\": \":halal:\",\n \"channel\": f\"#{channel}\",\n \"text\": fallback_text,\n \"blocks\": blocks,\n },\n headers={\"Content-Type\": \"application/json\"},\n timeout=10,\n )\n", "path": "api/data_refinery_api/management/commands/post_downloads_summary.py"}]} | 2,177 | 731 |
gh_patches_debug_3279 | rasdani/github-patches | git_diff | mozilla__pontoon-2667 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Entities of mass deleted terms do not get obsolete
When terms are deleted from the Django Admin, we [obsolete corresponding Entities](https://github.com/mozilla/pontoon/blob/01cddfd0df2f5ddf85d1b5e26a13003f9f320d97/pontoon/terminology/models.py#L239). However, this function doesn't trigger in mass delete actions, which results in the following error when trying to translate deleted Terms:
```
RelatedObjectDoesNotExist: Entity has no term
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pontoon/terminology/models.py`
Content:
```
1 import re
2
3 from django.db import models
4
5 from pontoon.base.models import Entity, ProjectLocale, Resource, TranslatedResource
6
7
8 def update_terminology_project_stats():
9 resource = Resource.objects.get(project__slug="terminology")
10 project = resource.project
11 total_strings = Entity.objects.filter(resource=resource, obsolete=False).count()
12 resource.total_strings = total_strings
13 resource.save(update_fields=["total_strings"])
14
15 translated_resources = list(TranslatedResource.objects.filter(resource=resource))
16
17 for translated_resource in translated_resources:
18 translated_resource.calculate_stats(save=False)
19
20 TranslatedResource.objects.bulk_update(
21 translated_resources,
22 [
23 "total_strings",
24 "approved_strings",
25 "pretranslated_strings",
26 "strings_with_errors",
27 "strings_with_warnings",
28 "unreviewed_strings",
29 ],
30 )
31
32 project.aggregate_stats()
33
34 for locale in project.locales.all():
35 locale.aggregate_stats()
36
37 for projectlocale in ProjectLocale.objects.filter(project=project):
38 projectlocale.aggregate_stats()
39
40
41 class TermQuerySet(models.QuerySet):
42 def for_string(self, string):
43 terms = []
44 available_terms = self.exclude(definition="").exclude(forbidden=True)
45
46 for term in available_terms:
47 term_text = r"\b" + re.escape(term.text)
48 flags = 0 if term.case_sensitive else re.IGNORECASE
49
50 if re.search(term_text, string, flags):
51 terms.append(term)
52
53 return terms
54
55
56 class Term(models.Model):
57 text = models.CharField(max_length=255)
58 entity = models.OneToOneField("base.Entity", models.SET_NULL, null=True, blank=True)
59
60 class PartOfSpeech(models.TextChoices):
61 ADJECTIVE = "adjective", "Adjective"
62 ADVERB = "adverb", "Adverb"
63 NOUN = "noun", "Noun"
64 VERB = "verb", "Verb"
65
66 part_of_speech = models.CharField(max_length=50, choices=PartOfSpeech.choices)
67
68 definition = models.TextField(blank=True)
69 usage = models.TextField(blank=True)
70 notes = models.TextField(blank=True)
71
72 class Status(models.TextChoices):
73 APPROVED = "approved", "Approved"
74 NEW = "new", "New"
75 OBSOLETE = "obsolete", "Obsolete"
76 REVIEW = "review", "Review"
77
78 status = models.CharField(
79 max_length=20, choices=Status.choices, null=True, blank=True
80 )
81
82 case_sensitive = models.BooleanField(default=False)
83 do_not_translate = models.BooleanField(default=False)
84 forbidden = models.BooleanField(default=False)
85
86 created_at = models.DateTimeField(auto_now_add=True)
87 created_by = models.ForeignKey(
88 "auth.User", models.SET_NULL, related_name="terms", null=True, blank=True
89 )
90
91 objects = TermQuerySet.as_manager()
92
93 def translation(self, locale):
94 """
95 Get locale translation of the term.
96 """
97 if self.do_not_translate:
98 return self.text
99 else:
100 try:
101 return self.translations.get(locale=locale).text
102 except (AttributeError, TermTranslation.DoesNotExist):
103 return None
104
105 @property
106 def localizable(self):
107 """
108 Check if the term is localizable.
109 """
110 if self.do_not_translate:
111 return False
112
113 if self.forbidden:
114 return False
115
116 if self.definition == "":
117 return False
118
119 return True
120
121 def entity_comment(self):
122 """
123 Generate entity comment from the term.
124 """
125 comment = "{}. {}.".format(
126 self.part_of_speech.capitalize(),
127 self.definition.capitalize().rstrip("."),
128 )
129
130 if self.usage:
131 comment += " E.g. {}.".format(self.usage.capitalize().rstrip("."))
132
133 return comment
134
135 def create_entity(self):
136 """
137 An Entity must be created (or deobsoleted) for a Term according to the
138 following rules:
139 - Entity.string contains content of Term.text.
140 - Entity.comment contains joint content of several fields:
141 Term.part_of_speech. Term.definition. E.g.: Term.usage.
142 """
143 resource = Resource.objects.get(project__slug="terminology")
144
145 entity, created = Entity.objects.get_or_create(
146 string=self.text,
147 comment=self.entity_comment(),
148 resource=resource,
149 )
150
151 # Using update() to avoid circular Term.save() call
152 Term.objects.filter(pk=self.pk).update(entity_id=entity.id)
153
154 if not created:
155 entity.obsolete = False
156 entity.save(update_fields=["obsolete"])
157
158 # Make sure Term entities are ordered alphabetically
159 entities = list(
160 Entity.objects.filter(resource=resource, obsolete=False).order_by("string")
161 )
162 for index, e in enumerate(entities):
163 e.order = index
164 Entity.objects.bulk_update(entities, ["order"])
165
166 def obsolete_entity(self):
167 entity = self.entity
168
169 # Ignore if term doesn't have entity assigned
170 if entity is None:
171 return
172
173 entity.obsolete = True
174 entity.save(update_fields=["obsolete"])
175
176 def handle_term_update(self):
177 """
178 Before updating an existing Term, update its Entity if neccessary
179 """
180 term = self
181 old_term = Term.objects.get(pk=term.pk)
182
183 # Ignore changes to non-localizable terms that stay non-localizable
184 if not old_term.localizable and not term.localizable:
185 return
186
187 # If localizable term becomes non-localizable, obsolete its Entity
188 if old_term.localizable and not term.localizable:
189 old_term.obsolete_entity()
190
191 # If non-localizable term becomes localizable, create a corresponding Entity
192 elif not old_term.localizable and term.localizable:
193 term.create_entity()
194
195 # If relevant changes are made to the localizable term that stays localizable
196 else:
197 # If Term.text changes, a new Entity instance gets created and the previous one becomes obsolete.
198 if old_term.text != term.text:
199 old_term.obsolete_entity()
200 term.create_entity()
201
202 # If Term.part_of_speech, Term.definition or Term.usage change, Entity.comment gets updated.
203 elif (
204 old_term.part_of_speech != term.part_of_speech
205 or old_term.definition != term.definition
206 or old_term.usage != term.usage
207 ):
208 entity = term.entity
209
210 # Ignore if term doesn't have entity assigned
211 if entity is None:
212 return
213
214 entity.comment = term.entity_comment()
215 entity.save(update_fields=["comment"])
216
217 return
218
219 update_terminology_project_stats()
220
221 def handle_term_create(self):
222 """
223 After creating a new localizable Term, create its Entity
224 """
225 self.create_entity()
226 update_terminology_project_stats()
227
228 def save(self, *args, **kwargs):
229 created = self.pk is None
230
231 if not created:
232 self.handle_term_update()
233
234 super().save(*args, **kwargs)
235
236 if created and self.localizable:
237 self.handle_term_create()
238
239 def delete(self, *args, **kwargs):
240 """
241 Before deleting a Term, obsolete its Entity
242 """
243 self.obsolete_entity()
244 update_terminology_project_stats()
245
246 super().delete(*args, **kwargs)
247
248 def __str__(self):
249 return self.text
250
251
252 class TermTranslation(models.Model):
253 term = models.ForeignKey(Term, models.CASCADE, related_name="translations")
254 locale = models.ForeignKey("base.Locale", models.CASCADE, related_name="terms")
255
256 text = models.CharField(max_length=255)
257
258 def __str__(self):
259 return self.text
260
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pontoon/terminology/models.py b/pontoon/terminology/models.py
--- a/pontoon/terminology/models.py
+++ b/pontoon/terminology/models.py
@@ -52,6 +52,16 @@
return terms
+ def delete(self, *args, **kwargs):
+ """
+ Before deleting Terms, obsolete their Entities
+ """
+ for term in self:
+ term.obsolete_entity()
+ update_terminology_project_stats()
+
+ super().delete(*args, **kwargs)
+
class Term(models.Model):
text = models.CharField(max_length=255)
| {"golden_diff": "diff --git a/pontoon/terminology/models.py b/pontoon/terminology/models.py\n--- a/pontoon/terminology/models.py\n+++ b/pontoon/terminology/models.py\n@@ -52,6 +52,16 @@\n \n return terms\n \n+ def delete(self, *args, **kwargs):\n+ \"\"\"\n+ Before deleting Terms, obsolete their Entities\n+ \"\"\"\n+ for term in self:\n+ term.obsolete_entity()\n+ update_terminology_project_stats()\n+\n+ super().delete(*args, **kwargs)\n+\n \n class Term(models.Model):\n text = models.CharField(max_length=255)\n", "issue": "Entities of mass deleted terms do not get obsolete\nWhen terms are deleted from the Django Admin, we [obsolete corresponding Entities](https://github.com/mozilla/pontoon/blob/01cddfd0df2f5ddf85d1b5e26a13003f9f320d97/pontoon/terminology/models.py#L239). However, this function doesn't trigger in mass delete actions, which results in the following error when trying to translate deleted Terms:\r\n\r\n```\r\nRelatedObjectDoesNotExist: Entity has no term\r\n```\n", "before_files": [{"content": "import re\n\nfrom django.db import models\n\nfrom pontoon.base.models import Entity, ProjectLocale, Resource, TranslatedResource\n\n\ndef update_terminology_project_stats():\n resource = Resource.objects.get(project__slug=\"terminology\")\n project = resource.project\n total_strings = Entity.objects.filter(resource=resource, obsolete=False).count()\n resource.total_strings = total_strings\n resource.save(update_fields=[\"total_strings\"])\n\n translated_resources = list(TranslatedResource.objects.filter(resource=resource))\n\n for translated_resource in translated_resources:\n translated_resource.calculate_stats(save=False)\n\n TranslatedResource.objects.bulk_update(\n translated_resources,\n [\n \"total_strings\",\n \"approved_strings\",\n \"pretranslated_strings\",\n \"strings_with_errors\",\n \"strings_with_warnings\",\n \"unreviewed_strings\",\n ],\n )\n\n project.aggregate_stats()\n\n for locale in project.locales.all():\n locale.aggregate_stats()\n\n for projectlocale in ProjectLocale.objects.filter(project=project):\n projectlocale.aggregate_stats()\n\n\nclass TermQuerySet(models.QuerySet):\n def for_string(self, string):\n terms = []\n available_terms = self.exclude(definition=\"\").exclude(forbidden=True)\n\n for term in available_terms:\n term_text = r\"\\b\" + re.escape(term.text)\n flags = 0 if term.case_sensitive else re.IGNORECASE\n\n if re.search(term_text, string, flags):\n terms.append(term)\n\n return terms\n\n\nclass Term(models.Model):\n text = models.CharField(max_length=255)\n entity = models.OneToOneField(\"base.Entity\", models.SET_NULL, null=True, blank=True)\n\n class PartOfSpeech(models.TextChoices):\n ADJECTIVE = \"adjective\", \"Adjective\"\n ADVERB = \"adverb\", \"Adverb\"\n NOUN = \"noun\", \"Noun\"\n VERB = \"verb\", \"Verb\"\n\n part_of_speech = models.CharField(max_length=50, choices=PartOfSpeech.choices)\n\n definition = models.TextField(blank=True)\n usage = models.TextField(blank=True)\n notes = models.TextField(blank=True)\n\n class Status(models.TextChoices):\n APPROVED = \"approved\", \"Approved\"\n NEW = \"new\", \"New\"\n OBSOLETE = \"obsolete\", \"Obsolete\"\n REVIEW = \"review\", \"Review\"\n\n status = models.CharField(\n max_length=20, choices=Status.choices, null=True, blank=True\n )\n\n case_sensitive = models.BooleanField(default=False)\n do_not_translate = models.BooleanField(default=False)\n forbidden = models.BooleanField(default=False)\n\n created_at = models.DateTimeField(auto_now_add=True)\n created_by = models.ForeignKey(\n \"auth.User\", models.SET_NULL, related_name=\"terms\", null=True, blank=True\n )\n\n objects = TermQuerySet.as_manager()\n\n def translation(self, locale):\n \"\"\"\n Get locale translation of the term.\n \"\"\"\n if self.do_not_translate:\n return self.text\n else:\n try:\n return self.translations.get(locale=locale).text\n except (AttributeError, TermTranslation.DoesNotExist):\n return None\n\n @property\n def localizable(self):\n \"\"\"\n Check if the term is localizable.\n \"\"\"\n if self.do_not_translate:\n return False\n\n if self.forbidden:\n return False\n\n if self.definition == \"\":\n return False\n\n return True\n\n def entity_comment(self):\n \"\"\"\n Generate entity comment from the term.\n \"\"\"\n comment = \"{}. {}.\".format(\n self.part_of_speech.capitalize(),\n self.definition.capitalize().rstrip(\".\"),\n )\n\n if self.usage:\n comment += \" E.g. {}.\".format(self.usage.capitalize().rstrip(\".\"))\n\n return comment\n\n def create_entity(self):\n \"\"\"\n An Entity must be created (or deobsoleted) for a Term according to the\n following rules:\n - Entity.string contains content of Term.text.\n - Entity.comment contains joint content of several fields:\n Term.part_of_speech. Term.definition. E.g.: Term.usage.\n \"\"\"\n resource = Resource.objects.get(project__slug=\"terminology\")\n\n entity, created = Entity.objects.get_or_create(\n string=self.text,\n comment=self.entity_comment(),\n resource=resource,\n )\n\n # Using update() to avoid circular Term.save() call\n Term.objects.filter(pk=self.pk).update(entity_id=entity.id)\n\n if not created:\n entity.obsolete = False\n entity.save(update_fields=[\"obsolete\"])\n\n # Make sure Term entities are ordered alphabetically\n entities = list(\n Entity.objects.filter(resource=resource, obsolete=False).order_by(\"string\")\n )\n for index, e in enumerate(entities):\n e.order = index\n Entity.objects.bulk_update(entities, [\"order\"])\n\n def obsolete_entity(self):\n entity = self.entity\n\n # Ignore if term doesn't have entity assigned\n if entity is None:\n return\n\n entity.obsolete = True\n entity.save(update_fields=[\"obsolete\"])\n\n def handle_term_update(self):\n \"\"\"\n Before updating an existing Term, update its Entity if neccessary\n \"\"\"\n term = self\n old_term = Term.objects.get(pk=term.pk)\n\n # Ignore changes to non-localizable terms that stay non-localizable\n if not old_term.localizable and not term.localizable:\n return\n\n # If localizable term becomes non-localizable, obsolete its Entity\n if old_term.localizable and not term.localizable:\n old_term.obsolete_entity()\n\n # If non-localizable term becomes localizable, create a corresponding Entity\n elif not old_term.localizable and term.localizable:\n term.create_entity()\n\n # If relevant changes are made to the localizable term that stays localizable\n else:\n # If Term.text changes, a new Entity instance gets created and the previous one becomes obsolete.\n if old_term.text != term.text:\n old_term.obsolete_entity()\n term.create_entity()\n\n # If Term.part_of_speech, Term.definition or Term.usage change, Entity.comment gets updated.\n elif (\n old_term.part_of_speech != term.part_of_speech\n or old_term.definition != term.definition\n or old_term.usage != term.usage\n ):\n entity = term.entity\n\n # Ignore if term doesn't have entity assigned\n if entity is None:\n return\n\n entity.comment = term.entity_comment()\n entity.save(update_fields=[\"comment\"])\n\n return\n\n update_terminology_project_stats()\n\n def handle_term_create(self):\n \"\"\"\n After creating a new localizable Term, create its Entity\n \"\"\"\n self.create_entity()\n update_terminology_project_stats()\n\n def save(self, *args, **kwargs):\n created = self.pk is None\n\n if not created:\n self.handle_term_update()\n\n super().save(*args, **kwargs)\n\n if created and self.localizable:\n self.handle_term_create()\n\n def delete(self, *args, **kwargs):\n \"\"\"\n Before deleting a Term, obsolete its Entity\n \"\"\"\n self.obsolete_entity()\n update_terminology_project_stats()\n\n super().delete(*args, **kwargs)\n\n def __str__(self):\n return self.text\n\n\nclass TermTranslation(models.Model):\n term = models.ForeignKey(Term, models.CASCADE, related_name=\"translations\")\n locale = models.ForeignKey(\"base.Locale\", models.CASCADE, related_name=\"terms\")\n\n text = models.CharField(max_length=255)\n\n def __str__(self):\n return self.text\n", "path": "pontoon/terminology/models.py"}], "after_files": [{"content": "import re\n\nfrom django.db import models\n\nfrom pontoon.base.models import Entity, ProjectLocale, Resource, TranslatedResource\n\n\ndef update_terminology_project_stats():\n resource = Resource.objects.get(project__slug=\"terminology\")\n project = resource.project\n total_strings = Entity.objects.filter(resource=resource, obsolete=False).count()\n resource.total_strings = total_strings\n resource.save(update_fields=[\"total_strings\"])\n\n translated_resources = list(TranslatedResource.objects.filter(resource=resource))\n\n for translated_resource in translated_resources:\n translated_resource.calculate_stats(save=False)\n\n TranslatedResource.objects.bulk_update(\n translated_resources,\n [\n \"total_strings\",\n \"approved_strings\",\n \"pretranslated_strings\",\n \"strings_with_errors\",\n \"strings_with_warnings\",\n \"unreviewed_strings\",\n ],\n )\n\n project.aggregate_stats()\n\n for locale in project.locales.all():\n locale.aggregate_stats()\n\n for projectlocale in ProjectLocale.objects.filter(project=project):\n projectlocale.aggregate_stats()\n\n\nclass TermQuerySet(models.QuerySet):\n def for_string(self, string):\n terms = []\n available_terms = self.exclude(definition=\"\").exclude(forbidden=True)\n\n for term in available_terms:\n term_text = r\"\\b\" + re.escape(term.text)\n flags = 0 if term.case_sensitive else re.IGNORECASE\n\n if re.search(term_text, string, flags):\n terms.append(term)\n\n return terms\n\n def delete(self, *args, **kwargs):\n \"\"\"\n Before deleting Terms, obsolete their Entities\n \"\"\"\n for term in self:\n term.obsolete_entity()\n update_terminology_project_stats()\n\n super().delete(*args, **kwargs)\n\n\nclass Term(models.Model):\n text = models.CharField(max_length=255)\n entity = models.OneToOneField(\"base.Entity\", models.SET_NULL, null=True, blank=True)\n\n class PartOfSpeech(models.TextChoices):\n ADJECTIVE = \"adjective\", \"Adjective\"\n ADVERB = \"adverb\", \"Adverb\"\n NOUN = \"noun\", \"Noun\"\n VERB = \"verb\", \"Verb\"\n\n part_of_speech = models.CharField(max_length=50, choices=PartOfSpeech.choices)\n\n definition = models.TextField(blank=True)\n usage = models.TextField(blank=True)\n notes = models.TextField(blank=True)\n\n class Status(models.TextChoices):\n APPROVED = \"approved\", \"Approved\"\n NEW = \"new\", \"New\"\n OBSOLETE = \"obsolete\", \"Obsolete\"\n REVIEW = \"review\", \"Review\"\n\n status = models.CharField(\n max_length=20, choices=Status.choices, null=True, blank=True\n )\n\n case_sensitive = models.BooleanField(default=False)\n do_not_translate = models.BooleanField(default=False)\n forbidden = models.BooleanField(default=False)\n\n created_at = models.DateTimeField(auto_now_add=True)\n created_by = models.ForeignKey(\n \"auth.User\", models.SET_NULL, related_name=\"terms\", null=True, blank=True\n )\n\n objects = TermQuerySet.as_manager()\n\n def translation(self, locale):\n \"\"\"\n Get locale translation of the term.\n \"\"\"\n if self.do_not_translate:\n return self.text\n else:\n try:\n return self.translations.get(locale=locale).text\n except (AttributeError, TermTranslation.DoesNotExist):\n return None\n\n @property\n def localizable(self):\n \"\"\"\n Check if the term is localizable.\n \"\"\"\n if self.do_not_translate:\n return False\n\n if self.forbidden:\n return False\n\n if self.definition == \"\":\n return False\n\n return True\n\n def entity_comment(self):\n \"\"\"\n Generate entity comment from the term.\n \"\"\"\n comment = \"{}. {}.\".format(\n self.part_of_speech.capitalize(),\n self.definition.capitalize().rstrip(\".\"),\n )\n\n if self.usage:\n comment += \" E.g. {}.\".format(self.usage.capitalize().rstrip(\".\"))\n\n return comment\n\n def create_entity(self):\n \"\"\"\n An Entity must be created (or deobsoleted) for a Term according to the\n following rules:\n - Entity.string contains content of Term.text.\n - Entity.comment contains joint content of several fields:\n Term.part_of_speech. Term.definition. E.g.: Term.usage.\n \"\"\"\n resource = Resource.objects.get(project__slug=\"terminology\")\n\n entity, created = Entity.objects.get_or_create(\n string=self.text,\n comment=self.entity_comment(),\n resource=resource,\n )\n\n # Using update() to avoid circular Term.save() call\n Term.objects.filter(pk=self.pk).update(entity_id=entity.id)\n\n if not created:\n entity.obsolete = False\n entity.save(update_fields=[\"obsolete\"])\n\n # Make sure Term entities are ordered alphabetically\n entities = list(\n Entity.objects.filter(resource=resource, obsolete=False).order_by(\"string\")\n )\n for index, e in enumerate(entities):\n e.order = index\n Entity.objects.bulk_update(entities, [\"order\"])\n\n def obsolete_entity(self):\n entity = self.entity\n\n # Ignore if term doesn't have entity assigned\n if entity is None:\n return\n\n entity.obsolete = True\n entity.save(update_fields=[\"obsolete\"])\n\n def handle_term_update(self):\n \"\"\"\n Before updating an existing Term, update its Entity if neccessary\n \"\"\"\n term = self\n old_term = Term.objects.get(pk=term.pk)\n\n # Ignore changes to non-localizable terms that stay non-localizable\n if not old_term.localizable and not term.localizable:\n return\n\n # If localizable term becomes non-localizable, obsolete its Entity\n if old_term.localizable and not term.localizable:\n old_term.obsolete_entity()\n\n # If non-localizable term becomes localizable, create a corresponding Entity\n elif not old_term.localizable and term.localizable:\n term.create_entity()\n\n # If relevant changes are made to the localizable term that stays localizable\n else:\n # If Term.text changes, a new Entity instance gets created and the previous one becomes obsolete.\n if old_term.text != term.text:\n old_term.obsolete_entity()\n term.create_entity()\n\n # If Term.part_of_speech, Term.definition or Term.usage change, Entity.comment gets updated.\n elif (\n old_term.part_of_speech != term.part_of_speech\n or old_term.definition != term.definition\n or old_term.usage != term.usage\n ):\n entity = term.entity\n\n # Ignore if term doesn't have entity assigned\n if entity is None:\n return\n\n entity.comment = term.entity_comment()\n entity.save(update_fields=[\"comment\"])\n\n return\n\n update_terminology_project_stats()\n\n def handle_term_create(self):\n \"\"\"\n After creating a new localizable Term, create its Entity\n \"\"\"\n self.create_entity()\n update_terminology_project_stats()\n\n def save(self, *args, **kwargs):\n created = self.pk is None\n\n if not created:\n self.handle_term_update()\n\n super().save(*args, **kwargs)\n\n if created and self.localizable:\n self.handle_term_create()\n\n def delete(self, *args, **kwargs):\n \"\"\"\n Before deleting a Term, obsolete its Entity\n \"\"\"\n self.obsolete_entity()\n update_terminology_project_stats()\n\n super().delete(*args, **kwargs)\n\n def __str__(self):\n return self.text\n\n\nclass TermTranslation(models.Model):\n term = models.ForeignKey(Term, models.CASCADE, related_name=\"translations\")\n locale = models.ForeignKey(\"base.Locale\", models.CASCADE, related_name=\"terms\")\n\n text = models.CharField(max_length=255)\n\n def __str__(self):\n return self.text\n", "path": "pontoon/terminology/models.py"}]} | 2,691 | 144 |
gh_patches_debug_11088 | rasdani/github-patches | git_diff | getsentry__sentry-python-773 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
DjangoIntegration conflict with MiddlewareMixin & TemplateView
django version: 3.0
sentry-sdk version: 0.14.3
My project has a middleware using Django's `django.utils.deprecation.MiddlewareMixin`. Visiting a view which subclasses `django.views.generic.TemplateView` while Sentry is active results in the following exception:
```
AttributeError: 'function' object has no attribute '__self__'
File "django/core/handlers/exception.py", line 47, in inner
response = get_response(request)
File "django/core/handlers/base.py", line 196, in _get_response
middleware_method.__self__.__class__.__name__,
```
The following classes & url config should be sufficient to demonstrate the problem:
```
from django.utils.deprecation import MiddlewareMixin
from django.views.generic import TemplateView
# ...in myapp.middleware.py
class DemonstratesConflictMiddleware(MiddlewareMixin):
def process_template_response(self, request, response):
return response
# ...in myapp.views.py
class DemonstratesConflictView(TemplateView):
template_name = "index.html"
# ...in urls.py
import myapp.views
urlpatterns += [
path('/', myapp.views.DemonstratesConflictView.as_view(), name='throws-exception'),
]
# ... in settings.py
MIDDLEWARE += ['myapp.middleware.DemonstratesConflictMiddleware']
sentry_sdk.init(
dsn="OMITTED",
integrations=[DjangoIntegration()],
)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sentry_sdk/integrations/django/middleware.py`
Content:
```
1 """
2 Create spans from Django middleware invocations
3 """
4
5 from django import VERSION as DJANGO_VERSION
6
7 from sentry_sdk import Hub
8 from sentry_sdk._functools import wraps
9 from sentry_sdk._types import MYPY
10 from sentry_sdk.utils import (
11 ContextVar,
12 transaction_from_function,
13 capture_internal_exceptions,
14 )
15
16 if MYPY:
17 from typing import Any
18 from typing import Callable
19 from typing import TypeVar
20
21 F = TypeVar("F", bound=Callable[..., Any])
22
23 _import_string_should_wrap_middleware = ContextVar(
24 "import_string_should_wrap_middleware"
25 )
26
27 if DJANGO_VERSION < (1, 7):
28 import_string_name = "import_by_path"
29 else:
30 import_string_name = "import_string"
31
32
33 def patch_django_middlewares():
34 # type: () -> None
35 from django.core.handlers import base
36
37 old_import_string = getattr(base, import_string_name)
38
39 def sentry_patched_import_string(dotted_path):
40 # type: (str) -> Any
41 rv = old_import_string(dotted_path)
42
43 if _import_string_should_wrap_middleware.get(None):
44 rv = _wrap_middleware(rv, dotted_path)
45
46 return rv
47
48 setattr(base, import_string_name, sentry_patched_import_string)
49
50 old_load_middleware = base.BaseHandler.load_middleware
51
52 def sentry_patched_load_middleware(*args, **kwargs):
53 # type: (Any, Any) -> Any
54 _import_string_should_wrap_middleware.set(True)
55 try:
56 return old_load_middleware(*args, **kwargs)
57 finally:
58 _import_string_should_wrap_middleware.set(False)
59
60 base.BaseHandler.load_middleware = sentry_patched_load_middleware
61
62
63 def _wrap_middleware(middleware, middleware_name):
64 # type: (Any, str) -> Any
65 from sentry_sdk.integrations.django import DjangoIntegration
66
67 def _get_wrapped_method(old_method):
68 # type: (F) -> F
69 with capture_internal_exceptions():
70
71 def sentry_wrapped_method(*args, **kwargs):
72 # type: (*Any, **Any) -> Any
73 hub = Hub.current
74 integration = hub.get_integration(DjangoIntegration)
75 if integration is None or not integration.middleware_spans:
76 return old_method(*args, **kwargs)
77
78 function_name = transaction_from_function(old_method)
79
80 description = middleware_name
81 function_basename = getattr(old_method, "__name__", None)
82 if function_basename:
83 description = "{}.{}".format(description, function_basename)
84
85 with hub.start_span(
86 op="django.middleware", description=description
87 ) as span:
88 span.set_tag("django.function_name", function_name)
89 span.set_tag("django.middleware_name", middleware_name)
90 return old_method(*args, **kwargs)
91
92 try:
93 # fails for __call__ of function on Python 2 (see py2.7-django-1.11)
94 return wraps(old_method)(sentry_wrapped_method) # type: ignore
95 except Exception:
96 return sentry_wrapped_method # type: ignore
97
98 return old_method
99
100 class SentryWrappingMiddleware(object):
101 def __init__(self, *args, **kwargs):
102 # type: (*Any, **Any) -> None
103 self._inner = middleware(*args, **kwargs)
104 self._call_method = None
105
106 # We need correct behavior for `hasattr()`, which we can only determine
107 # when we have an instance of the middleware we're wrapping.
108 def __getattr__(self, method_name):
109 # type: (str) -> Any
110 if method_name not in (
111 "process_request",
112 "process_view",
113 "process_template_response",
114 "process_response",
115 "process_exception",
116 ):
117 raise AttributeError()
118
119 old_method = getattr(self._inner, method_name)
120 rv = _get_wrapped_method(old_method)
121 self.__dict__[method_name] = rv
122 return rv
123
124 def __call__(self, *args, **kwargs):
125 # type: (*Any, **Any) -> Any
126 f = self._call_method
127 if f is None:
128 self._call_method = f = _get_wrapped_method(self._inner.__call__)
129 return f(*args, **kwargs)
130
131 if hasattr(middleware, "__name__"):
132 SentryWrappingMiddleware.__name__ = middleware.__name__
133
134 return SentryWrappingMiddleware
135
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sentry_sdk/integrations/django/middleware.py b/sentry_sdk/integrations/django/middleware.py
--- a/sentry_sdk/integrations/django/middleware.py
+++ b/sentry_sdk/integrations/django/middleware.py
@@ -91,9 +91,14 @@
try:
# fails for __call__ of function on Python 2 (see py2.7-django-1.11)
- return wraps(old_method)(sentry_wrapped_method) # type: ignore
+ sentry_wrapped_method = wraps(old_method)(sentry_wrapped_method)
+
+ # Necessary for Django 3.1
+ sentry_wrapped_method.__self__ = old_method.__self__ # type: ignore
except Exception:
- return sentry_wrapped_method # type: ignore
+ pass
+
+ return sentry_wrapped_method # type: ignore
return old_method
| {"golden_diff": "diff --git a/sentry_sdk/integrations/django/middleware.py b/sentry_sdk/integrations/django/middleware.py\n--- a/sentry_sdk/integrations/django/middleware.py\n+++ b/sentry_sdk/integrations/django/middleware.py\n@@ -91,9 +91,14 @@\n \n try:\n # fails for __call__ of function on Python 2 (see py2.7-django-1.11)\n- return wraps(old_method)(sentry_wrapped_method) # type: ignore\n+ sentry_wrapped_method = wraps(old_method)(sentry_wrapped_method)\n+\n+ # Necessary for Django 3.1\n+ sentry_wrapped_method.__self__ = old_method.__self__ # type: ignore\n except Exception:\n- return sentry_wrapped_method # type: ignore\n+ pass\n+\n+ return sentry_wrapped_method # type: ignore\n \n return old_method\n", "issue": "DjangoIntegration conflict with MiddlewareMixin & TemplateView\ndjango version: 3.0\r\nsentry-sdk version: 0.14.3\r\n\r\nMy project has a middleware using Django's `django.utils.deprecation.MiddlewareMixin`. Visiting a view which subclasses `django.views.generic.TemplateView` while Sentry is active results in the following exception:\r\n```\r\nAttributeError: 'function' object has no attribute '__self__'\r\n File \"django/core/handlers/exception.py\", line 47, in inner\r\n response = get_response(request)\r\n File \"django/core/handlers/base.py\", line 196, in _get_response\r\n middleware_method.__self__.__class__.__name__,\r\n```\r\n\r\nThe following classes & url config should be sufficient to demonstrate the problem:\r\n\r\n```\r\nfrom django.utils.deprecation import MiddlewareMixin\r\nfrom django.views.generic import TemplateView\r\n\r\n# ...in myapp.middleware.py\r\nclass DemonstratesConflictMiddleware(MiddlewareMixin):\r\n def process_template_response(self, request, response):\r\n return response\r\n\r\n# ...in myapp.views.py\r\nclass DemonstratesConflictView(TemplateView): \r\n template_name = \"index.html\"\r\n\r\n\r\n# ...in urls.py\r\nimport myapp.views\r\nurlpatterns += [\r\n path('/', myapp.views.DemonstratesConflictView.as_view(), name='throws-exception'),\r\n]\r\n\r\n# ... in settings.py\r\n\r\nMIDDLEWARE += ['myapp.middleware.DemonstratesConflictMiddleware']\r\n\r\nsentry_sdk.init(\r\n dsn=\"OMITTED\",\r\n integrations=[DjangoIntegration()],\r\n)\r\n\r\n```\n", "before_files": [{"content": "\"\"\"\nCreate spans from Django middleware invocations\n\"\"\"\n\nfrom django import VERSION as DJANGO_VERSION\n\nfrom sentry_sdk import Hub\nfrom sentry_sdk._functools import wraps\nfrom sentry_sdk._types import MYPY\nfrom sentry_sdk.utils import (\n ContextVar,\n transaction_from_function,\n capture_internal_exceptions,\n)\n\nif MYPY:\n from typing import Any\n from typing import Callable\n from typing import TypeVar\n\n F = TypeVar(\"F\", bound=Callable[..., Any])\n\n_import_string_should_wrap_middleware = ContextVar(\n \"import_string_should_wrap_middleware\"\n)\n\nif DJANGO_VERSION < (1, 7):\n import_string_name = \"import_by_path\"\nelse:\n import_string_name = \"import_string\"\n\n\ndef patch_django_middlewares():\n # type: () -> None\n from django.core.handlers import base\n\n old_import_string = getattr(base, import_string_name)\n\n def sentry_patched_import_string(dotted_path):\n # type: (str) -> Any\n rv = old_import_string(dotted_path)\n\n if _import_string_should_wrap_middleware.get(None):\n rv = _wrap_middleware(rv, dotted_path)\n\n return rv\n\n setattr(base, import_string_name, sentry_patched_import_string)\n\n old_load_middleware = base.BaseHandler.load_middleware\n\n def sentry_patched_load_middleware(*args, **kwargs):\n # type: (Any, Any) -> Any\n _import_string_should_wrap_middleware.set(True)\n try:\n return old_load_middleware(*args, **kwargs)\n finally:\n _import_string_should_wrap_middleware.set(False)\n\n base.BaseHandler.load_middleware = sentry_patched_load_middleware\n\n\ndef _wrap_middleware(middleware, middleware_name):\n # type: (Any, str) -> Any\n from sentry_sdk.integrations.django import DjangoIntegration\n\n def _get_wrapped_method(old_method):\n # type: (F) -> F\n with capture_internal_exceptions():\n\n def sentry_wrapped_method(*args, **kwargs):\n # type: (*Any, **Any) -> Any\n hub = Hub.current\n integration = hub.get_integration(DjangoIntegration)\n if integration is None or not integration.middleware_spans:\n return old_method(*args, **kwargs)\n\n function_name = transaction_from_function(old_method)\n\n description = middleware_name\n function_basename = getattr(old_method, \"__name__\", None)\n if function_basename:\n description = \"{}.{}\".format(description, function_basename)\n\n with hub.start_span(\n op=\"django.middleware\", description=description\n ) as span:\n span.set_tag(\"django.function_name\", function_name)\n span.set_tag(\"django.middleware_name\", middleware_name)\n return old_method(*args, **kwargs)\n\n try:\n # fails for __call__ of function on Python 2 (see py2.7-django-1.11)\n return wraps(old_method)(sentry_wrapped_method) # type: ignore\n except Exception:\n return sentry_wrapped_method # type: ignore\n\n return old_method\n\n class SentryWrappingMiddleware(object):\n def __init__(self, *args, **kwargs):\n # type: (*Any, **Any) -> None\n self._inner = middleware(*args, **kwargs)\n self._call_method = None\n\n # We need correct behavior for `hasattr()`, which we can only determine\n # when we have an instance of the middleware we're wrapping.\n def __getattr__(self, method_name):\n # type: (str) -> Any\n if method_name not in (\n \"process_request\",\n \"process_view\",\n \"process_template_response\",\n \"process_response\",\n \"process_exception\",\n ):\n raise AttributeError()\n\n old_method = getattr(self._inner, method_name)\n rv = _get_wrapped_method(old_method)\n self.__dict__[method_name] = rv\n return rv\n\n def __call__(self, *args, **kwargs):\n # type: (*Any, **Any) -> Any\n f = self._call_method\n if f is None:\n self._call_method = f = _get_wrapped_method(self._inner.__call__)\n return f(*args, **kwargs)\n\n if hasattr(middleware, \"__name__\"):\n SentryWrappingMiddleware.__name__ = middleware.__name__\n\n return SentryWrappingMiddleware\n", "path": "sentry_sdk/integrations/django/middleware.py"}], "after_files": [{"content": "\"\"\"\nCreate spans from Django middleware invocations\n\"\"\"\n\nfrom django import VERSION as DJANGO_VERSION\n\nfrom sentry_sdk import Hub\nfrom sentry_sdk._functools import wraps\nfrom sentry_sdk._types import MYPY\nfrom sentry_sdk.utils import (\n ContextVar,\n transaction_from_function,\n capture_internal_exceptions,\n)\n\nif MYPY:\n from typing import Any\n from typing import Callable\n from typing import TypeVar\n\n F = TypeVar(\"F\", bound=Callable[..., Any])\n\n_import_string_should_wrap_middleware = ContextVar(\n \"import_string_should_wrap_middleware\"\n)\n\nif DJANGO_VERSION < (1, 7):\n import_string_name = \"import_by_path\"\nelse:\n import_string_name = \"import_string\"\n\n\ndef patch_django_middlewares():\n # type: () -> None\n from django.core.handlers import base\n\n old_import_string = getattr(base, import_string_name)\n\n def sentry_patched_import_string(dotted_path):\n # type: (str) -> Any\n rv = old_import_string(dotted_path)\n\n if _import_string_should_wrap_middleware.get(None):\n rv = _wrap_middleware(rv, dotted_path)\n\n return rv\n\n setattr(base, import_string_name, sentry_patched_import_string)\n\n old_load_middleware = base.BaseHandler.load_middleware\n\n def sentry_patched_load_middleware(*args, **kwargs):\n # type: (Any, Any) -> Any\n _import_string_should_wrap_middleware.set(True)\n try:\n return old_load_middleware(*args, **kwargs)\n finally:\n _import_string_should_wrap_middleware.set(False)\n\n base.BaseHandler.load_middleware = sentry_patched_load_middleware\n\n\ndef _wrap_middleware(middleware, middleware_name):\n # type: (Any, str) -> Any\n from sentry_sdk.integrations.django import DjangoIntegration\n\n def _get_wrapped_method(old_method):\n # type: (F) -> F\n with capture_internal_exceptions():\n\n def sentry_wrapped_method(*args, **kwargs):\n # type: (*Any, **Any) -> Any\n hub = Hub.current\n integration = hub.get_integration(DjangoIntegration)\n if integration is None or not integration.middleware_spans:\n return old_method(*args, **kwargs)\n\n function_name = transaction_from_function(old_method)\n\n description = middleware_name\n function_basename = getattr(old_method, \"__name__\", None)\n if function_basename:\n description = \"{}.{}\".format(description, function_basename)\n\n with hub.start_span(\n op=\"django.middleware\", description=description\n ) as span:\n span.set_tag(\"django.function_name\", function_name)\n span.set_tag(\"django.middleware_name\", middleware_name)\n return old_method(*args, **kwargs)\n\n try:\n # fails for __call__ of function on Python 2 (see py2.7-django-1.11)\n sentry_wrapped_method = wraps(old_method)(sentry_wrapped_method)\n\n # Necessary for Django 3.1\n sentry_wrapped_method.__self__ = old_method.__self__ # type: ignore\n except Exception:\n pass\n\n return sentry_wrapped_method # type: ignore\n\n return old_method\n\n class SentryWrappingMiddleware(object):\n def __init__(self, *args, **kwargs):\n # type: (*Any, **Any) -> None\n self._inner = middleware(*args, **kwargs)\n self._call_method = None\n\n # We need correct behavior for `hasattr()`, which we can only determine\n # when we have an instance of the middleware we're wrapping.\n def __getattr__(self, method_name):\n # type: (str) -> Any\n if method_name not in (\n \"process_request\",\n \"process_view\",\n \"process_template_response\",\n \"process_response\",\n \"process_exception\",\n ):\n raise AttributeError()\n\n old_method = getattr(self._inner, method_name)\n rv = _get_wrapped_method(old_method)\n self.__dict__[method_name] = rv\n return rv\n\n def __call__(self, *args, **kwargs):\n # type: (*Any, **Any) -> Any\n f = self._call_method\n if f is None:\n self._call_method = f = _get_wrapped_method(self._inner.__call__)\n return f(*args, **kwargs)\n\n if hasattr(middleware, \"__name__\"):\n SentryWrappingMiddleware.__name__ = middleware.__name__\n\n return SentryWrappingMiddleware\n", "path": "sentry_sdk/integrations/django/middleware.py"}]} | 1,863 | 213 |
gh_patches_debug_37989 | rasdani/github-patches | git_diff | mathesar-foundation__mathesar-1270 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
API should validate input for number columns
## Description
<!-- A clear and concise description of what the bug is. -->
Currently, the API accepts strings for values input to number-typed columns. In some cases, these strings carry locale-sensitive information, i.e., using specific decimal points and negation styles. This is a problem since confusion will arise whenever the client, service, and database have different locale settings (it's likely the client and DB will have different locale settings by default). Even worse, the locale settings in the database (assuming PostgreSQL) may be applied differently in different contexts.
## Expected behavior
<!-- A clear and concise description of what you expected to happen. -->
Columns which use a number type for storage at the DB layer should only accept numbers in one of two formats:
- an actual JSON number, or
- A string conforming to the [JSON number spec](https://www.json.org/json-en.html), except wrapped in double-quotes.
The validation of this should be locale-independent, and should happen in the Mathesar web service rather than the database.
## To Reproduce
<!-- How can we recreate this bug? Please try to provide a Minimal, Complete, and Verifiable (http://stackoverflow.com/help/mcve) example if code-related. -->
- Create a table with a number-typed column containing a decimal point (e.g., `FLOAT`).
- Send an API request with input for that column as a string, with a comma for a decimal point.
- You can do this easily from the browseable API, see `/api/db/v0/tables/<table_ID>/records/<record_ID>/`
- Observe the database-layer error.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mathesar/api/utils.py`
Content:
```
1 from rest_framework.exceptions import NotFound
2
3 from db.records.operations import group
4 from mathesar.models import Table
5
6 DATA_KEY = 'data'
7 METADATA_KEY = 'metadata'
8
9
10 def get_table_or_404(pk):
11 """
12 Get table if it exists, otherwise throws a DRF NotFound error.
13 Args:
14 pk: id of table
15 Returns:
16 table: return the table based on a specific id
17 """
18 try:
19 table = Table.objects.get(id=pk)
20 except Table.DoesNotExist:
21 raise NotFound
22 return table
23
24
25 def process_annotated_records(record_list, column_name_id_map):
26
27 RESULT_IDX = 'result_indices'
28
29 def _get_record_dict(record):
30 return record._asdict() if not isinstance(record, dict) else record
31
32 split_records = (
33 {DATA_KEY: record_dict}
34 for record_dict in (_get_record_dict(record) for record in record_list)
35 )
36
37 combined_records, groups = group.extract_group_metadata(
38 split_records, data_key=DATA_KEY, metadata_key=METADATA_KEY
39 )
40
41 processed_records, record_metadata = zip(
42 *tuple(tuple(d.values()) for d in combined_records)
43 )
44
45 def _replace_column_names_with_ids(group_metadata_item):
46 try:
47 processed_group_metadata_item = {
48 column_name_id_map[k]: v for k, v in group_metadata_item.items()
49 }
50 except AttributeError:
51 processed_group_metadata_item = group_metadata_item
52 return processed_group_metadata_item
53
54 if groups is not None:
55 groups_by_id = {
56 grp[group.GroupMetadataField.GROUP_ID.value]: {
57 k: _replace_column_names_with_ids(v) for k, v in grp.items()
58 if k != group.GroupMetadataField.GROUP_ID.value
59 } | {RESULT_IDX: []}
60 for grp in groups
61 }
62
63 for i, meta in enumerate(record_metadata):
64 groups_by_id[meta[group.GroupMetadataField.GROUP_ID.value]][RESULT_IDX].append(i)
65
66 output_groups = sorted(list(groups_by_id.values()), key=lambda x: x[RESULT_IDX][0])
67 else:
68 output_groups = None
69
70 return processed_records, output_groups
71
```
Path: `mathesar/api/serializers/records.py`
Content:
```
1 from psycopg2.errors import NotNullViolation
2 from rest_framework import serializers
3 from rest_framework import status
4 from sqlalchemy.exc import IntegrityError
5
6 import mathesar.api.exceptions.database_exceptions.exceptions as database_api_exceptions
7 from mathesar.api.exceptions.mixins import MathesarErrorMessageMixin
8
9
10 class RecordListParameterSerializer(MathesarErrorMessageMixin, serializers.Serializer):
11 filter = serializers.JSONField(required=False, default=None)
12 order_by = serializers.JSONField(required=False, default=[])
13 grouping = serializers.JSONField(required=False, default={})
14 duplicate_only = serializers.JSONField(required=False, default=None)
15
16
17 class RecordSerializer(MathesarErrorMessageMixin, serializers.BaseSerializer):
18 def update(self, instance, validated_data):
19 table = self.context['table']
20 record = table.update_record(instance['id'], validated_data)
21 return record
22
23 def create(self, validated_data):
24 table = self.context['table']
25 try:
26 record = table.create_record_or_records(validated_data)
27 except IntegrityError as e:
28 if type(e.orig) == NotNullViolation:
29 raise database_api_exceptions.NotNullViolationAPIException(
30 e,
31 status_code=status.HTTP_400_BAD_REQUEST,
32 table=table
33 )
34 else:
35 raise database_api_exceptions.MathesarAPIException(e, status_code=status.HTTP_400_BAD_REQUEST)
36 return record
37
38 def to_representation(self, instance):
39 records = instance._asdict() if not isinstance(instance, dict) else instance
40 columns_map = self.context['columns_map']
41 records = {columns_map[column_name]: column_value for column_name, column_value in records.items()}
42 return records
43
44 def to_internal_value(self, data):
45 columns_map = self.context['columns_map'].inverse
46 data = {columns_map[int(column_id)]: value for column_id, value in data.items()}
47 return data
48
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mathesar/api/serializers/records.py b/mathesar/api/serializers/records.py
--- a/mathesar/api/serializers/records.py
+++ b/mathesar/api/serializers/records.py
@@ -5,6 +5,8 @@
import mathesar.api.exceptions.database_exceptions.exceptions as database_api_exceptions
from mathesar.api.exceptions.mixins import MathesarErrorMessageMixin
+from mathesar.models import Column
+from mathesar.api.utils import follows_json_number_spec, is_number
class RecordListParameterSerializer(MathesarErrorMessageMixin, serializers.Serializer):
@@ -44,4 +46,17 @@
def to_internal_value(self, data):
columns_map = self.context['columns_map'].inverse
data = {columns_map[int(column_id)]: value for column_id, value in data.items()}
+ # If the data type of the column is number then the value must be an integer
+ # or a string which follows JSON number spec.
+ for column_name in data.keys():
+ column = Column.objects.get(id=columns_map.inverse[column_name])
+ column_type = column.type
+ value = data[column_name]
+ if is_number(column_type) and type(data[column_name]) is str and not follows_json_number_spec(value):
+ raise database_api_exceptions.MathesarAPIException(
+ IntegrityError,
+ status_code=status.HTTP_400_BAD_REQUEST,
+ message="Number strings should follow JSON number spec",
+ field=column_name
+ )
return data
diff --git a/mathesar/api/utils.py b/mathesar/api/utils.py
--- a/mathesar/api/utils.py
+++ b/mathesar/api/utils.py
@@ -1,7 +1,9 @@
from rest_framework.exceptions import NotFound
+import re
from db.records.operations import group
from mathesar.models import Table
+from mathesar.database.types import _get_type_map
DATA_KEY = 'data'
METADATA_KEY = 'metadata'
@@ -68,3 +70,39 @@
output_groups = None
return processed_records, output_groups
+
+
+def is_number(column_type):
+ """
+ Check if a column data type is a number
+ Args:
+ column_type: data type of column
+ """
+ for type in _get_type_map():
+ if type['name'] == 'Number':
+ if str(column_type).lower() in type['sa_type_names']:
+ return True
+ else:
+ return False
+
+
+def follows_json_number_spec(number):
+ """
+ Check if a string follows JSON number spec
+ Args:
+ number: number as string
+ """
+ patterns = [
+ r"^-?0$",
+ r"^-?0[\.][0-9]+$",
+ r"^-?0[eE][+-]?[0-9]*$",
+ r"^-?0[\.][0-9]+[eE][+-]?[0-9]+$",
+ r"^-?[1-9][0-9]*$",
+ r"^-?[1-9][0-9]*[\.][0-9]+$",
+ r"^-?[1-9][0-9]*[eE][+-]?[0-9]+$",
+ r"^-?[1-9][0-9]*[\.][0-9]+[eE][+-]?[0-9]+$",
+ ]
+ for pattern in patterns:
+ if re.search(pattern, number) is not None:
+ return True
+ return False
| {"golden_diff": "diff --git a/mathesar/api/serializers/records.py b/mathesar/api/serializers/records.py\n--- a/mathesar/api/serializers/records.py\n+++ b/mathesar/api/serializers/records.py\n@@ -5,6 +5,8 @@\n \n import mathesar.api.exceptions.database_exceptions.exceptions as database_api_exceptions\n from mathesar.api.exceptions.mixins import MathesarErrorMessageMixin\n+from mathesar.models import Column\n+from mathesar.api.utils import follows_json_number_spec, is_number\n \n \n class RecordListParameterSerializer(MathesarErrorMessageMixin, serializers.Serializer):\n@@ -44,4 +46,17 @@\n def to_internal_value(self, data):\n columns_map = self.context['columns_map'].inverse\n data = {columns_map[int(column_id)]: value for column_id, value in data.items()}\n+ # If the data type of the column is number then the value must be an integer\n+ # or a string which follows JSON number spec.\n+ for column_name in data.keys():\n+ column = Column.objects.get(id=columns_map.inverse[column_name])\n+ column_type = column.type\n+ value = data[column_name]\n+ if is_number(column_type) and type(data[column_name]) is str and not follows_json_number_spec(value):\n+ raise database_api_exceptions.MathesarAPIException(\n+ IntegrityError,\n+ status_code=status.HTTP_400_BAD_REQUEST,\n+ message=\"Number strings should follow JSON number spec\",\n+ field=column_name\n+ )\n return data\ndiff --git a/mathesar/api/utils.py b/mathesar/api/utils.py\n--- a/mathesar/api/utils.py\n+++ b/mathesar/api/utils.py\n@@ -1,7 +1,9 @@\n from rest_framework.exceptions import NotFound\n+import re\n \n from db.records.operations import group\n from mathesar.models import Table\n+from mathesar.database.types import _get_type_map\n \n DATA_KEY = 'data'\n METADATA_KEY = 'metadata'\n@@ -68,3 +70,39 @@\n output_groups = None\n \n return processed_records, output_groups\n+\n+\n+def is_number(column_type):\n+ \"\"\"\n+ Check if a column data type is a number\n+ Args:\n+ column_type: data type of column\n+ \"\"\"\n+ for type in _get_type_map():\n+ if type['name'] == 'Number':\n+ if str(column_type).lower() in type['sa_type_names']:\n+ return True\n+ else:\n+ return False\n+\n+\n+def follows_json_number_spec(number):\n+ \"\"\"\n+ Check if a string follows JSON number spec\n+ Args:\n+ number: number as string\n+ \"\"\"\n+ patterns = [\n+ r\"^-?0$\",\n+ r\"^-?0[\\.][0-9]+$\",\n+ r\"^-?0[eE][+-]?[0-9]*$\",\n+ r\"^-?0[\\.][0-9]+[eE][+-]?[0-9]+$\",\n+ r\"^-?[1-9][0-9]*$\",\n+ r\"^-?[1-9][0-9]*[\\.][0-9]+$\",\n+ r\"^-?[1-9][0-9]*[eE][+-]?[0-9]+$\",\n+ r\"^-?[1-9][0-9]*[\\.][0-9]+[eE][+-]?[0-9]+$\",\n+ ]\n+ for pattern in patterns:\n+ if re.search(pattern, number) is not None:\n+ return True\n+ return False\n", "issue": "API should validate input for number columns\n## Description\r\n<!-- A clear and concise description of what the bug is. -->\r\n\r\nCurrently, the API accepts strings for values input to number-typed columns. In some cases, these strings carry locale-sensitive information, i.e., using specific decimal points and negation styles. This is a problem since confusion will arise whenever the client, service, and database have different locale settings (it's likely the client and DB will have different locale settings by default). Even worse, the locale settings in the database (assuming PostgreSQL) may be applied differently in different contexts.\r\n\r\n## Expected behavior\r\n<!-- A clear and concise description of what you expected to happen. -->\r\n\r\nColumns which use a number type for storage at the DB layer should only accept numbers in one of two formats:\r\n- an actual JSON number, or\r\n- A string conforming to the [JSON number spec](https://www.json.org/json-en.html), except wrapped in double-quotes.\r\n\r\nThe validation of this should be locale-independent, and should happen in the Mathesar web service rather than the database.\r\n\r\n## To Reproduce\r\n<!-- How can we recreate this bug? Please try to provide a Minimal, Complete, and Verifiable (http://stackoverflow.com/help/mcve) example if code-related. -->\r\n\r\n- Create a table with a number-typed column containing a decimal point (e.g., `FLOAT`).\r\n- Send an API request with input for that column as a string, with a comma for a decimal point. \r\n - You can do this easily from the browseable API, see `/api/db/v0/tables/<table_ID>/records/<record_ID>/`\r\n- Observe the database-layer error.\r\n\n", "before_files": [{"content": "from rest_framework.exceptions import NotFound\n\nfrom db.records.operations import group\nfrom mathesar.models import Table\n\nDATA_KEY = 'data'\nMETADATA_KEY = 'metadata'\n\n\ndef get_table_or_404(pk):\n \"\"\"\n Get table if it exists, otherwise throws a DRF NotFound error.\n Args:\n pk: id of table\n Returns:\n table: return the table based on a specific id\n \"\"\"\n try:\n table = Table.objects.get(id=pk)\n except Table.DoesNotExist:\n raise NotFound\n return table\n\n\ndef process_annotated_records(record_list, column_name_id_map):\n\n RESULT_IDX = 'result_indices'\n\n def _get_record_dict(record):\n return record._asdict() if not isinstance(record, dict) else record\n\n split_records = (\n {DATA_KEY: record_dict}\n for record_dict in (_get_record_dict(record) for record in record_list)\n )\n\n combined_records, groups = group.extract_group_metadata(\n split_records, data_key=DATA_KEY, metadata_key=METADATA_KEY\n )\n\n processed_records, record_metadata = zip(\n *tuple(tuple(d.values()) for d in combined_records)\n )\n\n def _replace_column_names_with_ids(group_metadata_item):\n try:\n processed_group_metadata_item = {\n column_name_id_map[k]: v for k, v in group_metadata_item.items()\n }\n except AttributeError:\n processed_group_metadata_item = group_metadata_item\n return processed_group_metadata_item\n\n if groups is not None:\n groups_by_id = {\n grp[group.GroupMetadataField.GROUP_ID.value]: {\n k: _replace_column_names_with_ids(v) for k, v in grp.items()\n if k != group.GroupMetadataField.GROUP_ID.value\n } | {RESULT_IDX: []}\n for grp in groups\n }\n\n for i, meta in enumerate(record_metadata):\n groups_by_id[meta[group.GroupMetadataField.GROUP_ID.value]][RESULT_IDX].append(i)\n\n output_groups = sorted(list(groups_by_id.values()), key=lambda x: x[RESULT_IDX][0])\n else:\n output_groups = None\n\n return processed_records, output_groups\n", "path": "mathesar/api/utils.py"}, {"content": "from psycopg2.errors import NotNullViolation\nfrom rest_framework import serializers\nfrom rest_framework import status\nfrom sqlalchemy.exc import IntegrityError\n\nimport mathesar.api.exceptions.database_exceptions.exceptions as database_api_exceptions\nfrom mathesar.api.exceptions.mixins import MathesarErrorMessageMixin\n\n\nclass RecordListParameterSerializer(MathesarErrorMessageMixin, serializers.Serializer):\n filter = serializers.JSONField(required=False, default=None)\n order_by = serializers.JSONField(required=False, default=[])\n grouping = serializers.JSONField(required=False, default={})\n duplicate_only = serializers.JSONField(required=False, default=None)\n\n\nclass RecordSerializer(MathesarErrorMessageMixin, serializers.BaseSerializer):\n def update(self, instance, validated_data):\n table = self.context['table']\n record = table.update_record(instance['id'], validated_data)\n return record\n\n def create(self, validated_data):\n table = self.context['table']\n try:\n record = table.create_record_or_records(validated_data)\n except IntegrityError as e:\n if type(e.orig) == NotNullViolation:\n raise database_api_exceptions.NotNullViolationAPIException(\n e,\n status_code=status.HTTP_400_BAD_REQUEST,\n table=table\n )\n else:\n raise database_api_exceptions.MathesarAPIException(e, status_code=status.HTTP_400_BAD_REQUEST)\n return record\n\n def to_representation(self, instance):\n records = instance._asdict() if not isinstance(instance, dict) else instance\n columns_map = self.context['columns_map']\n records = {columns_map[column_name]: column_value for column_name, column_value in records.items()}\n return records\n\n def to_internal_value(self, data):\n columns_map = self.context['columns_map'].inverse\n data = {columns_map[int(column_id)]: value for column_id, value in data.items()}\n return data\n", "path": "mathesar/api/serializers/records.py"}], "after_files": [{"content": "from rest_framework.exceptions import NotFound\nimport re\n\nfrom db.records.operations import group\nfrom mathesar.models import Table\nfrom mathesar.database.types import _get_type_map\n\nDATA_KEY = 'data'\nMETADATA_KEY = 'metadata'\n\n\ndef get_table_or_404(pk):\n \"\"\"\n Get table if it exists, otherwise throws a DRF NotFound error.\n Args:\n pk: id of table\n Returns:\n table: return the table based on a specific id\n \"\"\"\n try:\n table = Table.objects.get(id=pk)\n except Table.DoesNotExist:\n raise NotFound\n return table\n\n\ndef process_annotated_records(record_list, column_name_id_map):\n\n RESULT_IDX = 'result_indices'\n\n def _get_record_dict(record):\n return record._asdict() if not isinstance(record, dict) else record\n\n split_records = (\n {DATA_KEY: record_dict}\n for record_dict in (_get_record_dict(record) for record in record_list)\n )\n\n combined_records, groups = group.extract_group_metadata(\n split_records, data_key=DATA_KEY, metadata_key=METADATA_KEY\n )\n\n processed_records, record_metadata = zip(\n *tuple(tuple(d.values()) for d in combined_records)\n )\n\n def _replace_column_names_with_ids(group_metadata_item):\n try:\n processed_group_metadata_item = {\n column_name_id_map[k]: v for k, v in group_metadata_item.items()\n }\n except AttributeError:\n processed_group_metadata_item = group_metadata_item\n return processed_group_metadata_item\n\n if groups is not None:\n groups_by_id = {\n grp[group.GroupMetadataField.GROUP_ID.value]: {\n k: _replace_column_names_with_ids(v) for k, v in grp.items()\n if k != group.GroupMetadataField.GROUP_ID.value\n } | {RESULT_IDX: []}\n for grp in groups\n }\n\n for i, meta in enumerate(record_metadata):\n groups_by_id[meta[group.GroupMetadataField.GROUP_ID.value]][RESULT_IDX].append(i)\n\n output_groups = sorted(list(groups_by_id.values()), key=lambda x: x[RESULT_IDX][0])\n else:\n output_groups = None\n\n return processed_records, output_groups\n\n\ndef is_number(column_type):\n \"\"\"\n Check if a column data type is a number\n Args:\n column_type: data type of column\n \"\"\"\n for type in _get_type_map():\n if type['name'] == 'Number':\n if str(column_type).lower() in type['sa_type_names']:\n return True\n else:\n return False\n\n\ndef follows_json_number_spec(number):\n \"\"\"\n Check if a string follows JSON number spec\n Args:\n number: number as string\n \"\"\"\n patterns = [\n r\"^-?0$\",\n r\"^-?0[\\.][0-9]+$\",\n r\"^-?0[eE][+-]?[0-9]*$\",\n r\"^-?0[\\.][0-9]+[eE][+-]?[0-9]+$\",\n r\"^-?[1-9][0-9]*$\",\n r\"^-?[1-9][0-9]*[\\.][0-9]+$\",\n r\"^-?[1-9][0-9]*[eE][+-]?[0-9]+$\",\n r\"^-?[1-9][0-9]*[\\.][0-9]+[eE][+-]?[0-9]+$\",\n ]\n for pattern in patterns:\n if re.search(pattern, number) is not None:\n return True\n return False\n", "path": "mathesar/api/utils.py"}, {"content": "from psycopg2.errors import NotNullViolation\nfrom rest_framework import serializers\nfrom rest_framework import status\nfrom sqlalchemy.exc import IntegrityError\n\nimport mathesar.api.exceptions.database_exceptions.exceptions as database_api_exceptions\nfrom mathesar.api.exceptions.mixins import MathesarErrorMessageMixin\nfrom mathesar.models import Column\nfrom mathesar.api.utils import follows_json_number_spec, is_number\n\n\nclass RecordListParameterSerializer(MathesarErrorMessageMixin, serializers.Serializer):\n filter = serializers.JSONField(required=False, default=None)\n order_by = serializers.JSONField(required=False, default=[])\n grouping = serializers.JSONField(required=False, default={})\n duplicate_only = serializers.JSONField(required=False, default=None)\n\n\nclass RecordSerializer(MathesarErrorMessageMixin, serializers.BaseSerializer):\n def update(self, instance, validated_data):\n table = self.context['table']\n record = table.update_record(instance['id'], validated_data)\n return record\n\n def create(self, validated_data):\n table = self.context['table']\n try:\n record = table.create_record_or_records(validated_data)\n except IntegrityError as e:\n if type(e.orig) == NotNullViolation:\n raise database_api_exceptions.NotNullViolationAPIException(\n e,\n status_code=status.HTTP_400_BAD_REQUEST,\n table=table\n )\n else:\n raise database_api_exceptions.MathesarAPIException(e, status_code=status.HTTP_400_BAD_REQUEST)\n return record\n\n def to_representation(self, instance):\n records = instance._asdict() if not isinstance(instance, dict) else instance\n columns_map = self.context['columns_map']\n records = {columns_map[column_name]: column_value for column_name, column_value in records.items()}\n return records\n\n def to_internal_value(self, data):\n columns_map = self.context['columns_map'].inverse\n data = {columns_map[int(column_id)]: value for column_id, value in data.items()}\n # If the data type of the column is number then the value must be an integer\n # or a string which follows JSON number spec.\n for column_name in data.keys():\n column = Column.objects.get(id=columns_map.inverse[column_name])\n column_type = column.type\n value = data[column_name]\n if is_number(column_type) and type(data[column_name]) is str and not follows_json_number_spec(value):\n raise database_api_exceptions.MathesarAPIException(\n IntegrityError,\n status_code=status.HTTP_400_BAD_REQUEST,\n message=\"Number strings should follow JSON number spec\",\n field=column_name\n )\n return data\n", "path": "mathesar/api/serializers/records.py"}]} | 1,702 | 782 |
gh_patches_debug_31432 | rasdani/github-patches | git_diff | facebookresearch__CompilerGym-309 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
CompilerGym cache directory defaults don't doesn't play nicely with shared access machines
## 🐛 Bug
The default locations of the CompilerGym caches are in shared folders. On multi-user machines this can cause permission errors as the directory may be created by one user without write permissions to other users. I propose switching to user-specific defaults like so:
- [x] `COMPILER_GYM_CACHE` -> `/tmp/compiler_gym-$user`
- [x] `COMPILER_YM_TRANSIENT_CACHE` -> `/dev/shm/compiler_gym-$user`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `compiler_gym/util/runfiles_path.py`
Content:
```
1 # Copyright (c) Facebook, Inc. and its affiliates.
2 #
3 # This source code is licensed under the MIT license found in the
4 # LICENSE file in the root directory of this source tree.
5 """Module for resolving a runfiles path."""
6 import getpass
7 import os
8 from pathlib import Path
9
10 # NOTE(cummins): Moving this file may require updating this relative path.
11 _PACKAGE_ROOT = Path(os.path.join(os.path.dirname(__file__), "../../")).resolve(
12 strict=True
13 )
14
15
16 def runfiles_path(relpath: str) -> Path:
17 """Resolve the path to a runfiles data path.
18
19 No checks are to made to ensure that the path, or the containing directory,
20 exist.
21
22 Use environment variable COMPILER_GYM_RUNFILES=/path/to/runfiles if running
23 outside of bazel.
24
25 :param relpath: The relative path within the runfiles tree.
26
27 :return: An absolute path.
28 """
29 # There are three ways of determining a runfiles path:
30 # 1. Set the COMPILER_GYM_RUNFILES environment variable.
31 # 2. Using the rules_python library that is provided by bazel. This will
32 # fail if not being executed within a bazel sandbox.
33 # 3. Computing the path relative to the location of this file. This is the
34 # fallback approach that is used for when the code has been installed
35 # by setuptools.
36 runfiles_path = os.environ.get("COMPILER_GYM_RUNFILES")
37 if runfiles_path:
38 return Path(runfiles_path) / relpath
39 else:
40 try:
41 from rules_python.python.runfiles import runfiles
42
43 return Path(
44 runfiles.Create().Rlocation(
45 "CompilerGym" if relpath == "." else f"CompilerGym/{relpath}"
46 )
47 )
48 except (ModuleNotFoundError, TypeError):
49 return _PACKAGE_ROOT / relpath
50
51
52 def site_data_path(relpath: str) -> Path:
53 """Return a path within the site data directory.
54
55 CompilerGym uses a directory to store persistent site data files in, such as benchmark datasets.
56 The default location is :code:`~/.local/share/compiler_gym`. Set the environment variable
57 :code:`$COMPILER_GYM_SITE_DATA` to override this default location.
58
59 No checks are to made to ensure that the path, or the containing directory,
60 exist.
61
62 :param relpath: The relative path within the site data tree.
63
64 :return: An absolute path.
65 """
66 # NOTE(cummins): This function has a matching implementation in the C++
67 # sources, compiler_gym::service::getSiteDataPath(). Any change to behavior
68 # here must be reflected in the C++ version.
69 forced = os.environ.get("COMPILER_GYM_SITE_DATA")
70 if forced:
71 return Path(forced) / relpath
72 elif os.environ.get("HOME"):
73 return Path("~/.local/share/compiler_gym").expanduser() / relpath
74 else:
75 return Path("/tmp/compiler_gym/site_data") / relpath
76
77
78 def cache_path(relpath: str) -> Path:
79 """Return a path within the cache directory.
80
81 CompilerGym uses a directory to cache files in, such as downloaded content.
82 The default location for this cache is :code:`~/.cache/compiler_gym`. Set
83 the environment variable :code:`$COMPILER_GYM_CACHE` to override this
84 default location.
85
86 No checks are to made to ensure that the path, or the containing directory,
87 exist.
88
89 :param relpath: The relative path within the cache tree.
90
91 :return: An absolute path.
92 """
93 forced = os.environ.get("COMPILER_GYM_CACHE")
94 if forced:
95 return Path(forced) / relpath
96 elif os.environ.get("HOME"):
97 return Path("~/.cache/compiler_gym").expanduser() / relpath
98 else:
99 return Path("/tmp/compiler_gym/cache") / relpath
100
101
102 def transient_cache_path(relpath: str) -> Path:
103 """Return a path within the transient cache directory.
104
105 The transient cache is a directory used to store files that do not need to
106 persist beyond the lifetime of the current process. When available, the
107 temporary filesystem :code:`/dev/shm` will be used. Else,
108 :meth:`cache_path() <compiler_gym.cache_path>` is used as a fallback. Set
109 the environment variable :code:`$COMPILER_GYM_TRANSIENT_CACHE` to override
110 the default location.
111
112 No checks are to made to ensure that the path, or the containing directory,
113 exist.
114
115 :param relpath: The relative path within the cache tree.
116
117 :return: An absolute path.
118 """
119 forced = os.environ.get("COMPILER_GYM_TRANSIENT_CACHE")
120 if forced:
121 return Path(forced) / relpath
122 elif Path("/dev/shm").is_dir():
123 return Path(f"/dev/shm/compiler_gym_{getpass.getuser()}") / relpath
124 else:
125 # Fallback to using the regular cache.
126 return cache_path(relpath)
127
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/compiler_gym/util/runfiles_path.py b/compiler_gym/util/runfiles_path.py
--- a/compiler_gym/util/runfiles_path.py
+++ b/compiler_gym/util/runfiles_path.py
@@ -3,8 +3,8 @@
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Module for resolving a runfiles path."""
-import getpass
import os
+from getpass import getuser
from pathlib import Path
# NOTE(cummins): Moving this file may require updating this relative path.
@@ -72,7 +72,7 @@
elif os.environ.get("HOME"):
return Path("~/.local/share/compiler_gym").expanduser() / relpath
else:
- return Path("/tmp/compiler_gym/site_data") / relpath
+ return Path(f"/tmp/compiler_gym_{getuser()}/site_data") / relpath
def cache_path(relpath: str) -> Path:
@@ -96,7 +96,7 @@
elif os.environ.get("HOME"):
return Path("~/.cache/compiler_gym").expanduser() / relpath
else:
- return Path("/tmp/compiler_gym/cache") / relpath
+ return Path(f"/tmp/compiler_gym_{getuser()}/cache") / relpath
def transient_cache_path(relpath: str) -> Path:
@@ -120,7 +120,7 @@
if forced:
return Path(forced) / relpath
elif Path("/dev/shm").is_dir():
- return Path(f"/dev/shm/compiler_gym_{getpass.getuser()}") / relpath
+ return Path(f"/dev/shm/compiler_gym_{getuser()}") / relpath
else:
# Fallback to using the regular cache.
return cache_path(relpath)
| {"golden_diff": "diff --git a/compiler_gym/util/runfiles_path.py b/compiler_gym/util/runfiles_path.py\n--- a/compiler_gym/util/runfiles_path.py\n+++ b/compiler_gym/util/runfiles_path.py\n@@ -3,8 +3,8 @@\n # This source code is licensed under the MIT license found in the\n # LICENSE file in the root directory of this source tree.\n \"\"\"Module for resolving a runfiles path.\"\"\"\n-import getpass\n import os\n+from getpass import getuser\n from pathlib import Path\n \n # NOTE(cummins): Moving this file may require updating this relative path.\n@@ -72,7 +72,7 @@\n elif os.environ.get(\"HOME\"):\n return Path(\"~/.local/share/compiler_gym\").expanduser() / relpath\n else:\n- return Path(\"/tmp/compiler_gym/site_data\") / relpath\n+ return Path(f\"/tmp/compiler_gym_{getuser()}/site_data\") / relpath\n \n \n def cache_path(relpath: str) -> Path:\n@@ -96,7 +96,7 @@\n elif os.environ.get(\"HOME\"):\n return Path(\"~/.cache/compiler_gym\").expanduser() / relpath\n else:\n- return Path(\"/tmp/compiler_gym/cache\") / relpath\n+ return Path(f\"/tmp/compiler_gym_{getuser()}/cache\") / relpath\n \n \n def transient_cache_path(relpath: str) -> Path:\n@@ -120,7 +120,7 @@\n if forced:\n return Path(forced) / relpath\n elif Path(\"/dev/shm\").is_dir():\n- return Path(f\"/dev/shm/compiler_gym_{getpass.getuser()}\") / relpath\n+ return Path(f\"/dev/shm/compiler_gym_{getuser()}\") / relpath\n else:\n # Fallback to using the regular cache.\n return cache_path(relpath)\n", "issue": "CompilerGym cache directory defaults don't doesn't play nicely with shared access machines\n## \ud83d\udc1b Bug\r\n\r\nThe default locations of the CompilerGym caches are in shared folders. On multi-user machines this can cause permission errors as the directory may be created by one user without write permissions to other users. I propose switching to user-specific defaults like so:\r\n\r\n- [x] `COMPILER_GYM_CACHE` -> `/tmp/compiler_gym-$user`\r\n- [x] `COMPILER_YM_TRANSIENT_CACHE` -> `/dev/shm/compiler_gym-$user`\r\n\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\"\"\"Module for resolving a runfiles path.\"\"\"\nimport getpass\nimport os\nfrom pathlib import Path\n\n# NOTE(cummins): Moving this file may require updating this relative path.\n_PACKAGE_ROOT = Path(os.path.join(os.path.dirname(__file__), \"../../\")).resolve(\n strict=True\n)\n\n\ndef runfiles_path(relpath: str) -> Path:\n \"\"\"Resolve the path to a runfiles data path.\n\n No checks are to made to ensure that the path, or the containing directory,\n exist.\n\n Use environment variable COMPILER_GYM_RUNFILES=/path/to/runfiles if running\n outside of bazel.\n\n :param relpath: The relative path within the runfiles tree.\n\n :return: An absolute path.\n \"\"\"\n # There are three ways of determining a runfiles path:\n # 1. Set the COMPILER_GYM_RUNFILES environment variable.\n # 2. Using the rules_python library that is provided by bazel. This will\n # fail if not being executed within a bazel sandbox.\n # 3. Computing the path relative to the location of this file. This is the\n # fallback approach that is used for when the code has been installed\n # by setuptools.\n runfiles_path = os.environ.get(\"COMPILER_GYM_RUNFILES\")\n if runfiles_path:\n return Path(runfiles_path) / relpath\n else:\n try:\n from rules_python.python.runfiles import runfiles\n\n return Path(\n runfiles.Create().Rlocation(\n \"CompilerGym\" if relpath == \".\" else f\"CompilerGym/{relpath}\"\n )\n )\n except (ModuleNotFoundError, TypeError):\n return _PACKAGE_ROOT / relpath\n\n\ndef site_data_path(relpath: str) -> Path:\n \"\"\"Return a path within the site data directory.\n\n CompilerGym uses a directory to store persistent site data files in, such as benchmark datasets.\n The default location is :code:`~/.local/share/compiler_gym`. Set the environment variable\n :code:`$COMPILER_GYM_SITE_DATA` to override this default location.\n\n No checks are to made to ensure that the path, or the containing directory,\n exist.\n\n :param relpath: The relative path within the site data tree.\n\n :return: An absolute path.\n \"\"\"\n # NOTE(cummins): This function has a matching implementation in the C++\n # sources, compiler_gym::service::getSiteDataPath(). Any change to behavior\n # here must be reflected in the C++ version.\n forced = os.environ.get(\"COMPILER_GYM_SITE_DATA\")\n if forced:\n return Path(forced) / relpath\n elif os.environ.get(\"HOME\"):\n return Path(\"~/.local/share/compiler_gym\").expanduser() / relpath\n else:\n return Path(\"/tmp/compiler_gym/site_data\") / relpath\n\n\ndef cache_path(relpath: str) -> Path:\n \"\"\"Return a path within the cache directory.\n\n CompilerGym uses a directory to cache files in, such as downloaded content.\n The default location for this cache is :code:`~/.cache/compiler_gym`. Set\n the environment variable :code:`$COMPILER_GYM_CACHE` to override this\n default location.\n\n No checks are to made to ensure that the path, or the containing directory,\n exist.\n\n :param relpath: The relative path within the cache tree.\n\n :return: An absolute path.\n \"\"\"\n forced = os.environ.get(\"COMPILER_GYM_CACHE\")\n if forced:\n return Path(forced) / relpath\n elif os.environ.get(\"HOME\"):\n return Path(\"~/.cache/compiler_gym\").expanduser() / relpath\n else:\n return Path(\"/tmp/compiler_gym/cache\") / relpath\n\n\ndef transient_cache_path(relpath: str) -> Path:\n \"\"\"Return a path within the transient cache directory.\n\n The transient cache is a directory used to store files that do not need to\n persist beyond the lifetime of the current process. When available, the\n temporary filesystem :code:`/dev/shm` will be used. Else,\n :meth:`cache_path() <compiler_gym.cache_path>` is used as a fallback. Set\n the environment variable :code:`$COMPILER_GYM_TRANSIENT_CACHE` to override\n the default location.\n\n No checks are to made to ensure that the path, or the containing directory,\n exist.\n\n :param relpath: The relative path within the cache tree.\n\n :return: An absolute path.\n \"\"\"\n forced = os.environ.get(\"COMPILER_GYM_TRANSIENT_CACHE\")\n if forced:\n return Path(forced) / relpath\n elif Path(\"/dev/shm\").is_dir():\n return Path(f\"/dev/shm/compiler_gym_{getpass.getuser()}\") / relpath\n else:\n # Fallback to using the regular cache.\n return cache_path(relpath)\n", "path": "compiler_gym/util/runfiles_path.py"}], "after_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\"\"\"Module for resolving a runfiles path.\"\"\"\nimport os\nfrom getpass import getuser\nfrom pathlib import Path\n\n# NOTE(cummins): Moving this file may require updating this relative path.\n_PACKAGE_ROOT = Path(os.path.join(os.path.dirname(__file__), \"../../\")).resolve(\n strict=True\n)\n\n\ndef runfiles_path(relpath: str) -> Path:\n \"\"\"Resolve the path to a runfiles data path.\n\n No checks are to made to ensure that the path, or the containing directory,\n exist.\n\n Use environment variable COMPILER_GYM_RUNFILES=/path/to/runfiles if running\n outside of bazel.\n\n :param relpath: The relative path within the runfiles tree.\n\n :return: An absolute path.\n \"\"\"\n # There are three ways of determining a runfiles path:\n # 1. Set the COMPILER_GYM_RUNFILES environment variable.\n # 2. Using the rules_python library that is provided by bazel. This will\n # fail if not being executed within a bazel sandbox.\n # 3. Computing the path relative to the location of this file. This is the\n # fallback approach that is used for when the code has been installed\n # by setuptools.\n runfiles_path = os.environ.get(\"COMPILER_GYM_RUNFILES\")\n if runfiles_path:\n return Path(runfiles_path) / relpath\n else:\n try:\n from rules_python.python.runfiles import runfiles\n\n return Path(\n runfiles.Create().Rlocation(\n \"CompilerGym\" if relpath == \".\" else f\"CompilerGym/{relpath}\"\n )\n )\n except (ModuleNotFoundError, TypeError):\n return _PACKAGE_ROOT / relpath\n\n\ndef site_data_path(relpath: str) -> Path:\n \"\"\"Return a path within the site data directory.\n\n CompilerGym uses a directory to store persistent site data files in, such as benchmark datasets.\n The default location is :code:`~/.local/share/compiler_gym`. Set the environment variable\n :code:`$COMPILER_GYM_SITE_DATA` to override this default location.\n\n No checks are to made to ensure that the path, or the containing directory,\n exist.\n\n :param relpath: The relative path within the site data tree.\n\n :return: An absolute path.\n \"\"\"\n # NOTE(cummins): This function has a matching implementation in the C++\n # sources, compiler_gym::service::getSiteDataPath(). Any change to behavior\n # here must be reflected in the C++ version.\n forced = os.environ.get(\"COMPILER_GYM_SITE_DATA\")\n if forced:\n return Path(forced) / relpath\n elif os.environ.get(\"HOME\"):\n return Path(\"~/.local/share/compiler_gym\").expanduser() / relpath\n else:\n return Path(f\"/tmp/compiler_gym_{getuser()}/site_data\") / relpath\n\n\ndef cache_path(relpath: str) -> Path:\n \"\"\"Return a path within the cache directory.\n\n CompilerGym uses a directory to cache files in, such as downloaded content.\n The default location for this cache is :code:`~/.cache/compiler_gym`. Set\n the environment variable :code:`$COMPILER_GYM_CACHE` to override this\n default location.\n\n No checks are to made to ensure that the path, or the containing directory,\n exist.\n\n :param relpath: The relative path within the cache tree.\n\n :return: An absolute path.\n \"\"\"\n forced = os.environ.get(\"COMPILER_GYM_CACHE\")\n if forced:\n return Path(forced) / relpath\n elif os.environ.get(\"HOME\"):\n return Path(\"~/.cache/compiler_gym\").expanduser() / relpath\n else:\n return Path(f\"/tmp/compiler_gym_{getuser()}/cache\") / relpath\n\n\ndef transient_cache_path(relpath: str) -> Path:\n \"\"\"Return a path within the transient cache directory.\n\n The transient cache is a directory used to store files that do not need to\n persist beyond the lifetime of the current process. When available, the\n temporary filesystem :code:`/dev/shm` will be used. Else,\n :meth:`cache_path() <compiler_gym.cache_path>` is used as a fallback. Set\n the environment variable :code:`$COMPILER_GYM_TRANSIENT_CACHE` to override\n the default location.\n\n No checks are to made to ensure that the path, or the containing directory,\n exist.\n\n :param relpath: The relative path within the cache tree.\n\n :return: An absolute path.\n \"\"\"\n forced = os.environ.get(\"COMPILER_GYM_TRANSIENT_CACHE\")\n if forced:\n return Path(forced) / relpath\n elif Path(\"/dev/shm\").is_dir():\n return Path(f\"/dev/shm/compiler_gym_{getuser()}\") / relpath\n else:\n # Fallback to using the regular cache.\n return cache_path(relpath)\n", "path": "compiler_gym/util/runfiles_path.py"}]} | 1,767 | 408 |
gh_patches_debug_11562 | rasdani/github-patches | git_diff | optuna__optuna-1627 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix docstrings
`Sphinx>=3.1.0` doesn't build our documentation for some reason (I guess our `experimental` and `deprecated` decorators are the devils, not sure though).
At first, as https://github.com/optuna/optuna/issues/1368 said, we were optimistic about this, i.e., we thought the next stable would work. However, it's not happened yet. So it's high time we dirtied our hands to enable the latest Sphinx.
~~The latest ongoing pull request is https://github.com/optuna/optuna/pull/1613.~~
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 import os
2 import sys
3
4 import pkg_resources
5 from setuptools import find_packages
6 from setuptools import setup
7
8 from typing import Dict
9 from typing import List
10 from typing import Optional
11
12
13 def get_version() -> str:
14
15 version_filepath = os.path.join(os.path.dirname(__file__), "optuna", "version.py")
16 with open(version_filepath) as f:
17 for line in f:
18 if line.startswith("__version__"):
19 return line.strip().split()[-1][1:-1]
20 assert False
21
22
23 def get_long_description() -> str:
24
25 readme_filepath = os.path.join(os.path.dirname(__file__), "README.md")
26 with open(readme_filepath) as f:
27 return f.read()
28
29
30 def get_install_requires() -> List[str]:
31
32 return [
33 "alembic",
34 "cliff",
35 "cmaes>=0.5.1",
36 "colorlog",
37 "joblib",
38 "numpy",
39 "packaging>=20.0",
40 "scipy!=1.4.0",
41 "sqlalchemy>=1.1.0",
42 "tqdm",
43 ]
44
45
46 def get_tests_require() -> List[str]:
47
48 return get_extras_require()["testing"]
49
50
51 def get_extras_require() -> Dict[str, List[str]]:
52
53 requirements = {
54 "checking": ["black", "hacking", "mypy"],
55 "codecov": ["codecov", "pytest-cov"],
56 "doctest": [
57 "cma",
58 "pandas",
59 "plotly>=4.0.0",
60 "scikit-learn>=0.19.0,<0.23.0",
61 "scikit-optimize",
62 "mlflow",
63 ],
64 "document": [
65 # TODO(hvy): Unpin `sphinx` version after:
66 # https://github.com/sphinx-doc/sphinx/issues/7807.
67 "sphinx>=3.0.0,!=3.1.0,!=3.1.1,!=3.1.2,!=3.2.0",
68 # As reported in: https://github.com/readthedocs/sphinx_rtd_theme/issues/949,
69 # `sphinx_rtd_theme` 0.5.0 is still not compatible with `sphinx` >= 3.0.
70 "sphinx_rtd_theme<0.5.0",
71 "sphinx-gallery",
72 "pillow",
73 "matplotlib",
74 "scikit-learn",
75 ],
76 "example": [
77 "catboost",
78 "chainer",
79 "lightgbm",
80 "mlflow",
81 "mpi4py",
82 "mxnet",
83 "nbval",
84 "scikit-image",
85 "scikit-learn>=0.19.0,<0.23.0", # optuna/visualization/param_importances.py.
86 "xgboost",
87 "keras",
88 "tensorflow>=2.0.0",
89 "tensorflow-datasets",
90 ]
91 + (
92 (
93 ["torch==1.6.0", "torchvision==0.7.0"]
94 if sys.platform == "darwin"
95 else ["torch==1.6.0+cpu", "torchvision==0.7.0+cpu"]
96 )
97 + ["pytorch-ignite", "thop"]
98 if (3, 5) < sys.version_info[:2]
99 else []
100 )
101 + (["stable-baselines3>=0.7.0"] if (3, 5) < sys.version_info[:2] else [])
102 + (
103 ["allennlp==1.0.0", "fastai<2", "pytorch_lightning>=0.7.1"]
104 if (3, 5) < sys.version_info[:2] < (3, 8)
105 else []
106 )
107 + (["pytorch-lightning>=0.7.2"] if (3, 8) == sys.version_info[:2] else [])
108 + (
109 ["llvmlite<=0.31.0", "fsspec<0.8.0"] if (3, 5) == sys.version_info[:2] else []
110 ) # Newer `llvmlite` is not distributed with wheels for Python 3.5.
111 # Newer `fsspec` uses f-strings, which is not compatible with Python 3.5.
112 + (["dask[dataframe]", "dask-ml",] if sys.version_info[:2] < (3, 8) else [])
113 + (["catalyst"] if (3, 5) < sys.version_info[:2] else []),
114 "experimental": ["redis"],
115 "testing": [
116 # TODO(toshihikoyanase): Remove the version constraint after resolving the issue
117 # https://github.com/optuna/optuna/issues/1000.
118 "bokeh<2.0.0",
119 "chainer>=5.0.0",
120 "cma",
121 "fakeredis",
122 "lightgbm",
123 "mlflow",
124 "mpi4py",
125 "mxnet",
126 "pandas",
127 "plotly>=4.0.0",
128 "pytest",
129 "scikit-learn>=0.19.0,<0.23.0",
130 "scikit-optimize",
131 "xgboost",
132 "keras",
133 "tensorflow",
134 "tensorflow-datasets",
135 ]
136 + (
137 (
138 ["torch==1.6.0", "torchvision==0.7.0"]
139 if sys.platform == "darwin"
140 else ["torch==1.6.0+cpu", "torchvision==0.7.0+cpu"]
141 )
142 + ["pytorch-ignite"]
143 if (3, 5) < sys.version_info[:2]
144 else []
145 )
146 + (
147 ["allennlp==1.0.0", "fastai<2", "pytorch_lightning>=0.7.1"]
148 if (3, 5) < sys.version_info[:2] < (3, 8)
149 else []
150 )
151 + (["catalyst"] if (3, 5) < sys.version_info[:2] else [])
152 + (["pytorch-lightning>=0.7.2"] if (3, 8) == sys.version_info[:2] else []),
153 "tests": ["fakeredis", "pytest"],
154 "optional": [
155 "bokeh<2.0.0", # optuna/cli.py, optuna/dashboard.py.
156 "pandas", # optuna/study.py
157 "plotly>=4.0.0", # optuna/visualization.
158 "redis", # optuna/storages/redis.py.
159 "scikit-learn>=0.19.0,<0.23.0", # optuna/visualization/param_importances.py.
160 ],
161 "integration": [
162 # TODO(toshihikoyanase): Remove the version constraint after resolving the issue
163 # https://github.com/optuna/optuna/issues/1000.
164 "chainer>=5.0.0",
165 "cma",
166 "lightgbm",
167 "mlflow",
168 "mpi4py",
169 "mxnet",
170 "pandas",
171 "scikit-learn>=0.19.0,<0.23.0",
172 "scikit-optimize",
173 "xgboost",
174 "keras",
175 "tensorflow",
176 "tensorflow-datasets",
177 ]
178 + (
179 (
180 ["torch==1.6.0", "torchvision==0.7.0"]
181 if sys.platform == "darwin"
182 else ["torch==1.6.0+cpu", "torchvision==0.7.0+cpu"]
183 )
184 + ["pytorch-ignite"]
185 if (3, 5) < sys.version_info[:2]
186 else []
187 )
188 + (
189 ["allennlp==1.0.0", "fastai<2", "pytorch-lightning>=0.7.1"]
190 if (3, 5) < sys.version_info[:2] < (3, 8)
191 else []
192 )
193 + (["catalyst"] if (3, 5) < sys.version_info[:2] else [])
194 + (["pytorch-lightning>=0.7.2"] if (3, 8) == sys.version_info[:2] else []),
195 }
196
197 return requirements
198
199
200 def find_any_distribution(pkgs: List[str]) -> Optional[pkg_resources.Distribution]:
201
202 for pkg in pkgs:
203 try:
204 return pkg_resources.get_distribution(pkg)
205 except pkg_resources.DistributionNotFound:
206 pass
207 return None
208
209
210 setup(
211 name="optuna",
212 version=get_version(),
213 description="A hyperparameter optimization framework",
214 long_description=get_long_description(),
215 long_description_content_type="text/markdown",
216 author="Takuya Akiba",
217 author_email="[email protected]",
218 url="https://optuna.org/",
219 packages=find_packages(),
220 package_data={
221 "optuna": [
222 "storages/_rdb/alembic.ini",
223 "storages/_rdb/alembic/*.*",
224 "storages/_rdb/alembic/versions/*.*",
225 ]
226 },
227 python_requires=">=3.5",
228 install_requires=get_install_requires(),
229 tests_require=get_tests_require(),
230 extras_require=get_extras_require(),
231 entry_points={
232 "console_scripts": ["optuna = optuna.cli:main"],
233 "optuna.command": [
234 "create-study = optuna.cli:_CreateStudy",
235 "delete-study = optuna.cli:_DeleteStudy",
236 "study set-user-attr = optuna.cli:_StudySetUserAttribute",
237 "studies = optuna.cli:_Studies",
238 "dashboard = optuna.cli:_Dashboard",
239 "study optimize = optuna.cli:_StudyOptimize",
240 "storage upgrade = optuna.cli:_StorageUpgrade",
241 ],
242 },
243 classifiers=[
244 "Development Status :: 5 - Production/Stable",
245 "Intended Audience :: Science/Research",
246 "Intended Audience :: Developers",
247 "License :: OSI Approved :: MIT License",
248 "Programming Language :: Python :: 3",
249 "Programming Language :: Python :: 3.5",
250 "Programming Language :: Python :: 3.6",
251 "Programming Language :: Python :: 3.7",
252 "Programming Language :: Python :: 3.8",
253 "Programming Language :: Python :: 3 :: Only",
254 "Topic :: Scientific/Engineering",
255 "Topic :: Scientific/Engineering :: Mathematics",
256 "Topic :: Scientific/Engineering :: Artificial Intelligence",
257 "Topic :: Software Development",
258 "Topic :: Software Development :: Libraries",
259 "Topic :: Software Development :: Libraries :: Python Modules",
260 ],
261 )
262
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -63,8 +63,8 @@
],
"document": [
# TODO(hvy): Unpin `sphinx` version after:
- # https://github.com/sphinx-doc/sphinx/issues/7807.
- "sphinx>=3.0.0,!=3.1.0,!=3.1.1,!=3.1.2,!=3.2.0",
+ # https://github.com/sphinx-doc/sphinx/issues/8105.
+ "sphinx==3.0.4",
# As reported in: https://github.com/readthedocs/sphinx_rtd_theme/issues/949,
# `sphinx_rtd_theme` 0.5.0 is still not compatible with `sphinx` >= 3.0.
"sphinx_rtd_theme<0.5.0",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -63,8 +63,8 @@\n ],\n \"document\": [\n # TODO(hvy): Unpin `sphinx` version after:\n- # https://github.com/sphinx-doc/sphinx/issues/7807.\n- \"sphinx>=3.0.0,!=3.1.0,!=3.1.1,!=3.1.2,!=3.2.0\",\n+ # https://github.com/sphinx-doc/sphinx/issues/8105.\n+ \"sphinx==3.0.4\",\n # As reported in: https://github.com/readthedocs/sphinx_rtd_theme/issues/949,\n # `sphinx_rtd_theme` 0.5.0 is still not compatible with `sphinx` >= 3.0.\n \"sphinx_rtd_theme<0.5.0\",\n", "issue": "Fix docstrings\n`Sphinx>=3.1.0` doesn't build our documentation for some reason (I guess our `experimental` and `deprecated` decorators are the devils, not sure though).\r\n\r\nAt first, as https://github.com/optuna/optuna/issues/1368 said, we were optimistic about this, i.e., we thought the next stable would work. However, it's not happened yet. So it's high time we dirtied our hands to enable the latest Sphinx.\r\n\r\n~~The latest ongoing pull request is https://github.com/optuna/optuna/pull/1613.~~\n", "before_files": [{"content": "import os\nimport sys\n\nimport pkg_resources\nfrom setuptools import find_packages\nfrom setuptools import setup\n\nfrom typing import Dict\nfrom typing import List\nfrom typing import Optional\n\n\ndef get_version() -> str:\n\n version_filepath = os.path.join(os.path.dirname(__file__), \"optuna\", \"version.py\")\n with open(version_filepath) as f:\n for line in f:\n if line.startswith(\"__version__\"):\n return line.strip().split()[-1][1:-1]\n assert False\n\n\ndef get_long_description() -> str:\n\n readme_filepath = os.path.join(os.path.dirname(__file__), \"README.md\")\n with open(readme_filepath) as f:\n return f.read()\n\n\ndef get_install_requires() -> List[str]:\n\n return [\n \"alembic\",\n \"cliff\",\n \"cmaes>=0.5.1\",\n \"colorlog\",\n \"joblib\",\n \"numpy\",\n \"packaging>=20.0\",\n \"scipy!=1.4.0\",\n \"sqlalchemy>=1.1.0\",\n \"tqdm\",\n ]\n\n\ndef get_tests_require() -> List[str]:\n\n return get_extras_require()[\"testing\"]\n\n\ndef get_extras_require() -> Dict[str, List[str]]:\n\n requirements = {\n \"checking\": [\"black\", \"hacking\", \"mypy\"],\n \"codecov\": [\"codecov\", \"pytest-cov\"],\n \"doctest\": [\n \"cma\",\n \"pandas\",\n \"plotly>=4.0.0\",\n \"scikit-learn>=0.19.0,<0.23.0\",\n \"scikit-optimize\",\n \"mlflow\",\n ],\n \"document\": [\n # TODO(hvy): Unpin `sphinx` version after:\n # https://github.com/sphinx-doc/sphinx/issues/7807.\n \"sphinx>=3.0.0,!=3.1.0,!=3.1.1,!=3.1.2,!=3.2.0\",\n # As reported in: https://github.com/readthedocs/sphinx_rtd_theme/issues/949,\n # `sphinx_rtd_theme` 0.5.0 is still not compatible with `sphinx` >= 3.0.\n \"sphinx_rtd_theme<0.5.0\",\n \"sphinx-gallery\",\n \"pillow\",\n \"matplotlib\",\n \"scikit-learn\",\n ],\n \"example\": [\n \"catboost\",\n \"chainer\",\n \"lightgbm\",\n \"mlflow\",\n \"mpi4py\",\n \"mxnet\",\n \"nbval\",\n \"scikit-image\",\n \"scikit-learn>=0.19.0,<0.23.0\", # optuna/visualization/param_importances.py.\n \"xgboost\",\n \"keras\",\n \"tensorflow>=2.0.0\",\n \"tensorflow-datasets\",\n ]\n + (\n (\n [\"torch==1.6.0\", \"torchvision==0.7.0\"]\n if sys.platform == \"darwin\"\n else [\"torch==1.6.0+cpu\", \"torchvision==0.7.0+cpu\"]\n )\n + [\"pytorch-ignite\", \"thop\"]\n if (3, 5) < sys.version_info[:2]\n else []\n )\n + ([\"stable-baselines3>=0.7.0\"] if (3, 5) < sys.version_info[:2] else [])\n + (\n [\"allennlp==1.0.0\", \"fastai<2\", \"pytorch_lightning>=0.7.1\"]\n if (3, 5) < sys.version_info[:2] < (3, 8)\n else []\n )\n + ([\"pytorch-lightning>=0.7.2\"] if (3, 8) == sys.version_info[:2] else [])\n + (\n [\"llvmlite<=0.31.0\", \"fsspec<0.8.0\"] if (3, 5) == sys.version_info[:2] else []\n ) # Newer `llvmlite` is not distributed with wheels for Python 3.5.\n # Newer `fsspec` uses f-strings, which is not compatible with Python 3.5.\n + ([\"dask[dataframe]\", \"dask-ml\",] if sys.version_info[:2] < (3, 8) else [])\n + ([\"catalyst\"] if (3, 5) < sys.version_info[:2] else []),\n \"experimental\": [\"redis\"],\n \"testing\": [\n # TODO(toshihikoyanase): Remove the version constraint after resolving the issue\n # https://github.com/optuna/optuna/issues/1000.\n \"bokeh<2.0.0\",\n \"chainer>=5.0.0\",\n \"cma\",\n \"fakeredis\",\n \"lightgbm\",\n \"mlflow\",\n \"mpi4py\",\n \"mxnet\",\n \"pandas\",\n \"plotly>=4.0.0\",\n \"pytest\",\n \"scikit-learn>=0.19.0,<0.23.0\",\n \"scikit-optimize\",\n \"xgboost\",\n \"keras\",\n \"tensorflow\",\n \"tensorflow-datasets\",\n ]\n + (\n (\n [\"torch==1.6.0\", \"torchvision==0.7.0\"]\n if sys.platform == \"darwin\"\n else [\"torch==1.6.0+cpu\", \"torchvision==0.7.0+cpu\"]\n )\n + [\"pytorch-ignite\"]\n if (3, 5) < sys.version_info[:2]\n else []\n )\n + (\n [\"allennlp==1.0.0\", \"fastai<2\", \"pytorch_lightning>=0.7.1\"]\n if (3, 5) < sys.version_info[:2] < (3, 8)\n else []\n )\n + ([\"catalyst\"] if (3, 5) < sys.version_info[:2] else [])\n + ([\"pytorch-lightning>=0.7.2\"] if (3, 8) == sys.version_info[:2] else []),\n \"tests\": [\"fakeredis\", \"pytest\"],\n \"optional\": [\n \"bokeh<2.0.0\", # optuna/cli.py, optuna/dashboard.py.\n \"pandas\", # optuna/study.py\n \"plotly>=4.0.0\", # optuna/visualization.\n \"redis\", # optuna/storages/redis.py.\n \"scikit-learn>=0.19.0,<0.23.0\", # optuna/visualization/param_importances.py.\n ],\n \"integration\": [\n # TODO(toshihikoyanase): Remove the version constraint after resolving the issue\n # https://github.com/optuna/optuna/issues/1000.\n \"chainer>=5.0.0\",\n \"cma\",\n \"lightgbm\",\n \"mlflow\",\n \"mpi4py\",\n \"mxnet\",\n \"pandas\",\n \"scikit-learn>=0.19.0,<0.23.0\",\n \"scikit-optimize\",\n \"xgboost\",\n \"keras\",\n \"tensorflow\",\n \"tensorflow-datasets\",\n ]\n + (\n (\n [\"torch==1.6.0\", \"torchvision==0.7.0\"]\n if sys.platform == \"darwin\"\n else [\"torch==1.6.0+cpu\", \"torchvision==0.7.0+cpu\"]\n )\n + [\"pytorch-ignite\"]\n if (3, 5) < sys.version_info[:2]\n else []\n )\n + (\n [\"allennlp==1.0.0\", \"fastai<2\", \"pytorch-lightning>=0.7.1\"]\n if (3, 5) < sys.version_info[:2] < (3, 8)\n else []\n )\n + ([\"catalyst\"] if (3, 5) < sys.version_info[:2] else [])\n + ([\"pytorch-lightning>=0.7.2\"] if (3, 8) == sys.version_info[:2] else []),\n }\n\n return requirements\n\n\ndef find_any_distribution(pkgs: List[str]) -> Optional[pkg_resources.Distribution]:\n\n for pkg in pkgs:\n try:\n return pkg_resources.get_distribution(pkg)\n except pkg_resources.DistributionNotFound:\n pass\n return None\n\n\nsetup(\n name=\"optuna\",\n version=get_version(),\n description=\"A hyperparameter optimization framework\",\n long_description=get_long_description(),\n long_description_content_type=\"text/markdown\",\n author=\"Takuya Akiba\",\n author_email=\"[email protected]\",\n url=\"https://optuna.org/\",\n packages=find_packages(),\n package_data={\n \"optuna\": [\n \"storages/_rdb/alembic.ini\",\n \"storages/_rdb/alembic/*.*\",\n \"storages/_rdb/alembic/versions/*.*\",\n ]\n },\n python_requires=\">=3.5\",\n install_requires=get_install_requires(),\n tests_require=get_tests_require(),\n extras_require=get_extras_require(),\n entry_points={\n \"console_scripts\": [\"optuna = optuna.cli:main\"],\n \"optuna.command\": [\n \"create-study = optuna.cli:_CreateStudy\",\n \"delete-study = optuna.cli:_DeleteStudy\",\n \"study set-user-attr = optuna.cli:_StudySetUserAttribute\",\n \"studies = optuna.cli:_Studies\",\n \"dashboard = optuna.cli:_Dashboard\",\n \"study optimize = optuna.cli:_StudyOptimize\",\n \"storage upgrade = optuna.cli:_StorageUpgrade\",\n ],\n },\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Science/Research\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Topic :: Scientific/Engineering\",\n \"Topic :: Scientific/Engineering :: Mathematics\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Topic :: Software Development\",\n \"Topic :: Software Development :: Libraries\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "import os\nimport sys\n\nimport pkg_resources\nfrom setuptools import find_packages\nfrom setuptools import setup\n\nfrom typing import Dict\nfrom typing import List\nfrom typing import Optional\n\n\ndef get_version() -> str:\n\n version_filepath = os.path.join(os.path.dirname(__file__), \"optuna\", \"version.py\")\n with open(version_filepath) as f:\n for line in f:\n if line.startswith(\"__version__\"):\n return line.strip().split()[-1][1:-1]\n assert False\n\n\ndef get_long_description() -> str:\n\n readme_filepath = os.path.join(os.path.dirname(__file__), \"README.md\")\n with open(readme_filepath) as f:\n return f.read()\n\n\ndef get_install_requires() -> List[str]:\n\n return [\n \"alembic\",\n \"cliff\",\n \"cmaes>=0.5.1\",\n \"colorlog\",\n \"joblib\",\n \"numpy\",\n \"packaging>=20.0\",\n \"scipy!=1.4.0\",\n \"sqlalchemy>=1.1.0\",\n \"tqdm\",\n ]\n\n\ndef get_tests_require() -> List[str]:\n\n return get_extras_require()[\"testing\"]\n\n\ndef get_extras_require() -> Dict[str, List[str]]:\n\n requirements = {\n \"checking\": [\"black\", \"hacking\", \"mypy\"],\n \"codecov\": [\"codecov\", \"pytest-cov\"],\n \"doctest\": [\n \"cma\",\n \"pandas\",\n \"plotly>=4.0.0\",\n \"scikit-learn>=0.19.0,<0.23.0\",\n \"scikit-optimize\",\n \"mlflow\",\n ],\n \"document\": [\n # TODO(hvy): Unpin `sphinx` version after:\n # https://github.com/sphinx-doc/sphinx/issues/8105.\n \"sphinx==3.0.4\",\n # As reported in: https://github.com/readthedocs/sphinx_rtd_theme/issues/949,\n # `sphinx_rtd_theme` 0.5.0 is still not compatible with `sphinx` >= 3.0.\n \"sphinx_rtd_theme<0.5.0\",\n \"sphinx-gallery\",\n \"pillow\",\n \"matplotlib\",\n \"scikit-learn\",\n ],\n \"example\": [\n \"catboost\",\n \"chainer\",\n \"lightgbm\",\n \"mlflow\",\n \"mpi4py\",\n \"mxnet\",\n \"nbval\",\n \"scikit-image\",\n \"scikit-learn>=0.19.0,<0.23.0\", # optuna/visualization/param_importances.py.\n \"xgboost\",\n \"keras\",\n \"tensorflow>=2.0.0\",\n \"tensorflow-datasets\",\n ]\n + (\n (\n [\"torch==1.6.0\", \"torchvision==0.7.0\"]\n if sys.platform == \"darwin\"\n else [\"torch==1.6.0+cpu\", \"torchvision==0.7.0+cpu\"]\n )\n + [\"pytorch-ignite\", \"thop\"]\n if (3, 5) < sys.version_info[:2]\n else []\n )\n + ([\"stable-baselines3>=0.7.0\"] if (3, 5) < sys.version_info[:2] else [])\n + (\n [\"allennlp==1.0.0\", \"fastai<2\", \"pytorch_lightning>=0.7.1\"]\n if (3, 5) < sys.version_info[:2] < (3, 8)\n else []\n )\n + ([\"pytorch-lightning>=0.7.2\"] if (3, 8) == sys.version_info[:2] else [])\n + (\n [\"llvmlite<=0.31.0\", \"fsspec<0.8.0\"] if (3, 5) == sys.version_info[:2] else []\n ) # Newer `llvmlite` is not distributed with wheels for Python 3.5.\n # Newer `fsspec` uses f-strings, which is not compatible with Python 3.5.\n + ([\"dask[dataframe]\", \"dask-ml\",] if sys.version_info[:2] < (3, 8) else [])\n + ([\"catalyst\"] if (3, 5) < sys.version_info[:2] else []),\n \"experimental\": [\"redis\"],\n \"testing\": [\n # TODO(toshihikoyanase): Remove the version constraint after resolving the issue\n # https://github.com/optuna/optuna/issues/1000.\n \"bokeh<2.0.0\",\n \"chainer>=5.0.0\",\n \"cma\",\n \"fakeredis\",\n \"lightgbm\",\n \"mlflow\",\n \"mpi4py\",\n \"mxnet\",\n \"pandas\",\n \"plotly>=4.0.0\",\n \"pytest\",\n \"scikit-learn>=0.19.0,<0.23.0\",\n \"scikit-optimize\",\n \"xgboost\",\n \"keras\",\n \"tensorflow\",\n \"tensorflow-datasets\",\n ]\n + (\n (\n [\"torch==1.6.0\", \"torchvision==0.7.0\"]\n if sys.platform == \"darwin\"\n else [\"torch==1.6.0+cpu\", \"torchvision==0.7.0+cpu\"]\n )\n + [\"pytorch-ignite\"]\n if (3, 5) < sys.version_info[:2]\n else []\n )\n + (\n [\"allennlp==1.0.0\", \"fastai<2\", \"pytorch_lightning>=0.7.1\"]\n if (3, 5) < sys.version_info[:2] < (3, 8)\n else []\n )\n + ([\"catalyst\"] if (3, 5) < sys.version_info[:2] else [])\n + ([\"pytorch-lightning>=0.7.2\"] if (3, 8) == sys.version_info[:2] else []),\n \"tests\": [\"fakeredis\", \"pytest\"],\n \"optional\": [\n \"bokeh<2.0.0\", # optuna/cli.py, optuna/dashboard.py.\n \"pandas\", # optuna/study.py\n \"plotly>=4.0.0\", # optuna/visualization.\n \"redis\", # optuna/storages/redis.py.\n \"scikit-learn>=0.19.0,<0.23.0\", # optuna/visualization/param_importances.py.\n ],\n \"integration\": [\n # TODO(toshihikoyanase): Remove the version constraint after resolving the issue\n # https://github.com/optuna/optuna/issues/1000.\n \"chainer>=5.0.0\",\n \"cma\",\n \"lightgbm\",\n \"mlflow\",\n \"mpi4py\",\n \"mxnet\",\n \"pandas\",\n \"scikit-learn>=0.19.0,<0.23.0\",\n \"scikit-optimize\",\n \"xgboost\",\n \"keras\",\n \"tensorflow\",\n \"tensorflow-datasets\",\n ]\n + (\n (\n [\"torch==1.6.0\", \"torchvision==0.7.0\"]\n if sys.platform == \"darwin\"\n else [\"torch==1.6.0+cpu\", \"torchvision==0.7.0+cpu\"]\n )\n + [\"pytorch-ignite\"]\n if (3, 5) < sys.version_info[:2]\n else []\n )\n + (\n [\"allennlp==1.0.0\", \"fastai<2\", \"pytorch-lightning>=0.7.1\"]\n if (3, 5) < sys.version_info[:2] < (3, 8)\n else []\n )\n + ([\"catalyst\"] if (3, 5) < sys.version_info[:2] else [])\n + ([\"pytorch-lightning>=0.7.2\"] if (3, 8) == sys.version_info[:2] else []),\n }\n\n return requirements\n\n\ndef find_any_distribution(pkgs: List[str]) -> Optional[pkg_resources.Distribution]:\n\n for pkg in pkgs:\n try:\n return pkg_resources.get_distribution(pkg)\n except pkg_resources.DistributionNotFound:\n pass\n return None\n\n\nsetup(\n name=\"optuna\",\n version=get_version(),\n description=\"A hyperparameter optimization framework\",\n long_description=get_long_description(),\n long_description_content_type=\"text/markdown\",\n author=\"Takuya Akiba\",\n author_email=\"[email protected]\",\n url=\"https://optuna.org/\",\n packages=find_packages(),\n package_data={\n \"optuna\": [\n \"storages/_rdb/alembic.ini\",\n \"storages/_rdb/alembic/*.*\",\n \"storages/_rdb/alembic/versions/*.*\",\n ]\n },\n python_requires=\">=3.5\",\n install_requires=get_install_requires(),\n tests_require=get_tests_require(),\n extras_require=get_extras_require(),\n entry_points={\n \"console_scripts\": [\"optuna = optuna.cli:main\"],\n \"optuna.command\": [\n \"create-study = optuna.cli:_CreateStudy\",\n \"delete-study = optuna.cli:_DeleteStudy\",\n \"study set-user-attr = optuna.cli:_StudySetUserAttribute\",\n \"studies = optuna.cli:_Studies\",\n \"dashboard = optuna.cli:_Dashboard\",\n \"study optimize = optuna.cli:_StudyOptimize\",\n \"storage upgrade = optuna.cli:_StorageUpgrade\",\n ],\n },\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Science/Research\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Topic :: Scientific/Engineering\",\n \"Topic :: Scientific/Engineering :: Mathematics\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Topic :: Software Development\",\n \"Topic :: Software Development :: Libraries\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n ],\n)\n", "path": "setup.py"}]} | 3,435 | 212 |
gh_patches_debug_1369 | rasdani/github-patches | git_diff | Parsl__parsl-972 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix `ModuleNotFoundError: No module named 'monitoring'`
Looks like this bug was introduced with the recent merge of monitoring back into the parsl repo.
```
Traceback (most recent call last):
File "/Users/awoodard/software/miniconda3/bin/parsl-visualize", line 11, in <module>
load_entry_point('parsl==0.7.2', 'console_scripts', 'parsl-visualize')()
File "/Users/awoodard/software/miniconda3/lib/python3.7/site-packages/pkg_resources/__init__.py", line 484, in load_entry_point
return get_distribution(dist).load_entry_point(group, name)
File "/Users/awoodard/software/miniconda3/lib/python3.7/site-packages/pkg_resources/__init__.py", line 2707, in load_entry_point
return ep.load()
File "/Users/awoodard/software/miniconda3/lib/python3.7/site-packages/pkg_resources/__init__.py", line 2325, in load
return self.resolve()
File "/Users/awoodard/software/miniconda3/lib/python3.7/site-packages/pkg_resources/__init__.py", line 2331, in resolve
module = __import__(self.module_name, fromlist=['__name__'], level=0)
ModuleNotFoundError: No module named 'monitoring'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 from setuptools import setup, find_packages
2
3 with open('parsl/version.py') as f:
4 exec(f.read())
5
6 with open('requirements.txt') as f:
7 install_requires = f.readlines()
8
9 extras_require = {
10 'monitoring' : [
11 'psutil',
12 'sqlalchemy',
13 'sqlalchemy_utils',
14 'pydot',
15 'networkx',
16 'Flask',
17 'flask_sqlalchemy',
18 'pandas',
19 'plotly',
20 'python-daemon'
21 ],
22 'aws' : ['boto3'],
23 'kubernetes' : ['kubernetes'],
24 'extreme_scale' : ['mpi4py'],
25 'docs' : ['nbsphinx', 'sphinx_rtd_theme'],
26 'google_cloud' : ['google-auth', 'google-api-python-client'],
27 'gssapi' : ['python-gssapi'],
28 }
29 extras_require['all'] = sum(extras_require.values(), [])
30
31 setup(
32 name='parsl',
33 version=VERSION,
34 description='Simple data dependent workflows in Python',
35 long_description='Simple parallel workflows system for Python',
36 url='https://github.com/Parsl/parsl',
37 author='The Parsl Team',
38 author_email='[email protected]',
39 license='Apache 2.0',
40 download_url='https://github.com/Parsl/parsl/archive/{}.tar.gz'.format(VERSION),
41 include_package_data=True,
42 packages=find_packages(),
43 install_requires=install_requires,
44 scripts = ['parsl/executors/high_throughput/process_worker_pool.py',
45 'parsl/executors/extreme_scale/mpi_worker_pool.py',
46 'parsl/executors/low_latency/lowlatency_worker.py',
47 ],
48 extras_require=extras_require,
49 classifiers=[
50 # Maturity
51 'Development Status :: 3 - Alpha',
52 # Intended audience
53 'Intended Audience :: Developers',
54 # Licence, must match with licence above
55 'License :: OSI Approved :: Apache Software License',
56 # Python versions supported
57 'Programming Language :: Python :: 3.5',
58 'Programming Language :: Python :: 3.6',
59 ],
60 keywords=['Workflows', 'Scientific computing'],
61 entry_points={'console_scripts':
62 [
63 'parsl-globus-auth=parsl.data_provider.globus:cli_run',
64 'parsl-visualize=monitoring.visualization.app:cli_run',
65 ]}
66 )
67
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -61,6 +61,6 @@
entry_points={'console_scripts':
[
'parsl-globus-auth=parsl.data_provider.globus:cli_run',
- 'parsl-visualize=monitoring.visualization.app:cli_run',
+ 'parsl-visualize=parsl.monitoring.visualization.app:cli_run',
]}
)
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -61,6 +61,6 @@\n entry_points={'console_scripts':\n [\n 'parsl-globus-auth=parsl.data_provider.globus:cli_run',\n- 'parsl-visualize=monitoring.visualization.app:cli_run',\n+ 'parsl-visualize=parsl.monitoring.visualization.app:cli_run',\n ]}\n )\n", "issue": "Fix `ModuleNotFoundError: No module named 'monitoring'`\nLooks like this bug was introduced with the recent merge of monitoring back into the parsl repo.\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/Users/awoodard/software/miniconda3/bin/parsl-visualize\", line 11, in <module>\r\n load_entry_point('parsl==0.7.2', 'console_scripts', 'parsl-visualize')()\r\n File \"/Users/awoodard/software/miniconda3/lib/python3.7/site-packages/pkg_resources/__init__.py\", line 484, in load_entry_point\r\n return get_distribution(dist).load_entry_point(group, name)\r\n File \"/Users/awoodard/software/miniconda3/lib/python3.7/site-packages/pkg_resources/__init__.py\", line 2707, in load_entry_point\r\n return ep.load()\r\n File \"/Users/awoodard/software/miniconda3/lib/python3.7/site-packages/pkg_resources/__init__.py\", line 2325, in load\r\n return self.resolve()\r\n File \"/Users/awoodard/software/miniconda3/lib/python3.7/site-packages/pkg_resources/__init__.py\", line 2331, in resolve\r\n module = __import__(self.module_name, fromlist=['__name__'], level=0)\r\nModuleNotFoundError: No module named 'monitoring'\r\n```\n", "before_files": [{"content": "from setuptools import setup, find_packages\n\nwith open('parsl/version.py') as f:\n exec(f.read())\n\nwith open('requirements.txt') as f:\n install_requires = f.readlines()\n\nextras_require = {\n 'monitoring' : [\n 'psutil',\n 'sqlalchemy',\n 'sqlalchemy_utils',\n 'pydot',\n 'networkx',\n 'Flask',\n 'flask_sqlalchemy',\n 'pandas',\n 'plotly',\n 'python-daemon'\n ],\n 'aws' : ['boto3'],\n 'kubernetes' : ['kubernetes'],\n 'extreme_scale' : ['mpi4py'],\n 'docs' : ['nbsphinx', 'sphinx_rtd_theme'],\n 'google_cloud' : ['google-auth', 'google-api-python-client'],\n 'gssapi' : ['python-gssapi'],\n}\nextras_require['all'] = sum(extras_require.values(), [])\n\nsetup(\n name='parsl',\n version=VERSION,\n description='Simple data dependent workflows in Python',\n long_description='Simple parallel workflows system for Python',\n url='https://github.com/Parsl/parsl',\n author='The Parsl Team',\n author_email='[email protected]',\n license='Apache 2.0',\n download_url='https://github.com/Parsl/parsl/archive/{}.tar.gz'.format(VERSION),\n include_package_data=True,\n packages=find_packages(),\n install_requires=install_requires,\n scripts = ['parsl/executors/high_throughput/process_worker_pool.py',\n 'parsl/executors/extreme_scale/mpi_worker_pool.py',\n 'parsl/executors/low_latency/lowlatency_worker.py',\n ],\n extras_require=extras_require,\n classifiers=[\n # Maturity\n 'Development Status :: 3 - Alpha',\n # Intended audience\n 'Intended Audience :: Developers',\n # Licence, must match with licence above\n 'License :: OSI Approved :: Apache Software License',\n # Python versions supported\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ],\n keywords=['Workflows', 'Scientific computing'],\n entry_points={'console_scripts':\n [\n 'parsl-globus-auth=parsl.data_provider.globus:cli_run',\n 'parsl-visualize=monitoring.visualization.app:cli_run',\n ]}\n)\n", "path": "setup.py"}], "after_files": [{"content": "from setuptools import setup, find_packages\n\nwith open('parsl/version.py') as f:\n exec(f.read())\n\nwith open('requirements.txt') as f:\n install_requires = f.readlines()\n\nextras_require = {\n 'monitoring' : [\n 'psutil',\n 'sqlalchemy',\n 'sqlalchemy_utils',\n 'pydot',\n 'networkx',\n 'Flask',\n 'flask_sqlalchemy',\n 'pandas',\n 'plotly',\n 'python-daemon'\n ],\n 'aws' : ['boto3'],\n 'kubernetes' : ['kubernetes'],\n 'extreme_scale' : ['mpi4py'],\n 'docs' : ['nbsphinx', 'sphinx_rtd_theme'],\n 'google_cloud' : ['google-auth', 'google-api-python-client'],\n 'gssapi' : ['python-gssapi'],\n}\nextras_require['all'] = sum(extras_require.values(), [])\n\nsetup(\n name='parsl',\n version=VERSION,\n description='Simple data dependent workflows in Python',\n long_description='Simple parallel workflows system for Python',\n url='https://github.com/Parsl/parsl',\n author='The Parsl Team',\n author_email='[email protected]',\n license='Apache 2.0',\n download_url='https://github.com/Parsl/parsl/archive/{}.tar.gz'.format(VERSION),\n include_package_data=True,\n packages=find_packages(),\n install_requires=install_requires,\n scripts = ['parsl/executors/high_throughput/process_worker_pool.py',\n 'parsl/executors/extreme_scale/mpi_worker_pool.py',\n 'parsl/executors/low_latency/lowlatency_worker.py',\n ],\n extras_require=extras_require,\n classifiers=[\n # Maturity\n 'Development Status :: 3 - Alpha',\n # Intended audience\n 'Intended Audience :: Developers',\n # Licence, must match with licence above\n 'License :: OSI Approved :: Apache Software License',\n # Python versions supported\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ],\n keywords=['Workflows', 'Scientific computing'],\n entry_points={'console_scripts':\n [\n 'parsl-globus-auth=parsl.data_provider.globus:cli_run',\n 'parsl-visualize=parsl.monitoring.visualization.app:cli_run',\n ]}\n)\n", "path": "setup.py"}]} | 1,207 | 99 |
gh_patches_debug_24962 | rasdani/github-patches | git_diff | translate__pootle-5560 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Suggestions count on stats is wrong
See https://mozilla.locamotion.org/cy/ where it says 1 pending suggestion, but clicking on the link says that there are no results.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pootle/apps/pootle_store/utils.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright (C) Pootle contributors.
4 #
5 # This file is a part of the Pootle project. It is distributed under the GPL3
6 # or later license. See the LICENSE file for a copy of the license and the
7 # AUTHORS file for copyright and authorship information.
8
9 from collections import OrderedDict
10
11 from django.conf import settings
12 from django.contrib.auth import get_user_model
13 from django.template import loader
14 from django.utils import timezone
15
16 from pootle.core.delegate import site
17 from pootle.core.mail import send_mail
18 from pootle.i18n.gettext import ugettext as _
19 from pootle_comment.forms import UnsecuredCommentForm
20 from pootle_statistics.models import (
21 Submission, SubmissionFields, SubmissionTypes)
22
23 from .constants import FUZZY, TRANSLATED
24 from .models import Suggestion, SuggestionStates
25
26
27 User = get_user_model()
28
29
30 class SuggestionsReview(object):
31 accept_email_template = 'editor/email/suggestions_accepted_with_comment.txt'
32 accept_email_subject = _(u"Suggestion accepted with comment")
33 reject_email_template = 'editor/email/suggestions_rejected_with_comment.txt'
34 reject_email_subject = _(u"Suggestion rejected with comment")
35
36 def __init__(self, suggestions=None, reviewer=None):
37 self.suggestions = suggestions
38 self.reviewer = reviewer
39
40 @property
41 def users_and_suggestions(self):
42 users = {}
43 for suggestion in self.suggestions:
44 users[suggestion.user] = users.get(suggestion.user, [])
45 users[suggestion.user].append(suggestion)
46 return users
47
48 def add_comments(self, comment):
49 for suggestion in self.suggestions:
50 UnsecuredCommentForm(
51 suggestion,
52 dict(comment=comment,
53 user=self.reviewer)).save()
54
55 def add(self, unit, translation, user=None, touch=True,
56 similarity=None, mt_similarity=None):
57 """Adds a new suggestion to the unit.
58
59 :param translation: suggested translation text
60 :param user: user who is making the suggestion. If it's ``None``,
61 the ``system`` user will be used.
62 :param touch: whether to update the unit's timestamp after adding
63 the suggestion or not.
64 :param similarity: human similarity for the new suggestion.
65 :param mt_similarity: MT similarity for the new suggestion.
66
67 :return: a tuple ``(suggestion, created)`` where ``created`` is a
68 boolean indicating if the suggestion was successfully added.
69 If the suggestion already exists it's returned as well.
70 """
71 dont_add = (
72 not filter(None, translation)
73 or translation == unit.target)
74 if dont_add:
75 return (None, False)
76 user = user or User.objects.get_system_user()
77 try:
78 suggestion = Suggestion.objects.pending().get(
79 unit=unit,
80 user=user,
81 target_f=translation)
82 return (suggestion, False)
83 except Suggestion.DoesNotExist:
84 suggestion = Suggestion.objects.create(
85 unit=unit,
86 user=user,
87 state=SuggestionStates.PENDING,
88 target=translation,
89 creation_time=timezone.now())
90 self.create_submission(
91 suggestion,
92 SubmissionTypes.SUGG_ADD,
93 user,
94 similarity=similarity,
95 mt_similarity=mt_similarity).save()
96 if touch:
97 unit.save()
98 return (suggestion, True)
99
100 def create_submission(self, suggestion, suggestion_type, user, **kwargs):
101 return Submission(
102 creation_time=kwargs.get("creation_time", suggestion.creation_time),
103 translation_project=suggestion.unit.store.translation_project,
104 submitter=user,
105 unit=suggestion.unit,
106 store=suggestion.unit.store,
107 type=suggestion_type,
108 suggestion=suggestion,
109 similarity=kwargs.get("similarity"),
110 mt_similarity=kwargs.get("mt_similarity"))
111
112 def accept_suggestion(self, suggestion):
113 unit = suggestion.unit
114 translation_project = unit.store.translation_project
115
116 # Save for later
117 old_state = unit.state
118 old_target = unit.target
119
120 # Update some basic attributes so we can create submissions. Note
121 # these do not conflict with `ScoreLog`'s interests, so it's safe
122 unit.target = suggestion.target
123 if unit.state == FUZZY:
124 unit.state = TRANSLATED
125
126 current_time = timezone.now()
127 suggestion.state = SuggestionStates.ACCEPTED
128 suggestion.reviewer = self.reviewer
129 suggestion.review_time = current_time
130 suggestion.save()
131 create_subs = OrderedDict()
132 if old_state != unit.state:
133 create_subs[SubmissionFields.STATE] = [old_state, unit.state]
134 create_subs[SubmissionFields.TARGET] = [old_target, unit.target]
135 subs_created = []
136 for field in create_subs:
137 kwargs = {
138 'creation_time': current_time,
139 'translation_project': translation_project,
140 'submitter': self.reviewer,
141 'unit': unit,
142 'store': unit.store,
143 'field': field,
144 'type': SubmissionTypes.SUGG_ACCEPT,
145 'old_value': create_subs[field][0],
146 'new_value': create_subs[field][1],
147 }
148 if field == SubmissionFields.TARGET:
149 kwargs['suggestion'] = suggestion
150
151 subs_created.append(Submission(**kwargs))
152 if subs_created:
153 unit.submission_set.add(*subs_created, bulk=False)
154
155 # FIXME: remove such a dependency on `ScoreLog`
156 # Update current unit instance's attributes
157 # important to set these attributes after saving Submission
158 # because in the `ScoreLog` we need to access the unit's certain
159 # attributes before it was saved
160 # THIS NEEDS TO GO ^^
161 unit.submitted_by = suggestion.user
162 unit.submitted_on = current_time
163 unit.reviewed_by = self.reviewer
164 unit.reviewed_on = unit.submitted_on
165 unit._log_user = self.reviewer
166 unit.save()
167
168 def reject_suggestion(self, suggestion):
169 suggestion.state = SuggestionStates.REJECTED
170 suggestion.review_time = timezone.now()
171 suggestion.reviewer = self.reviewer
172 suggestion.save()
173 self.create_submission(
174 suggestion,
175 SubmissionTypes.SUGG_REJECT,
176 self.reviewer,
177 creation_time=suggestion.review_time).save()
178
179 def accept_suggestions(self):
180 for suggestion in self.suggestions:
181 self.accept_suggestion(suggestion)
182
183 def accept(self, comment=""):
184 self.accept_suggestions()
185 if self.should_notify(comment):
186 self.notify_suggesters(rejected=False, comment=comment)
187 if comment:
188 self.add_comments(comment=comment)
189
190 def build_absolute_uri(self, url):
191 return site.get().build_absolute_uri(url)
192
193 def get_email_message(self, suggestions, comment, template):
194 for suggestion in suggestions:
195 suggestion.unit_url = (
196 self.build_absolute_uri(
197 suggestion.unit.get_translate_url()))
198 return loader.render_to_string(
199 template,
200 context=dict(suggestions=suggestions,
201 comment=comment))
202
203 def notify_suggesters(self, rejected=True, comment=""):
204 for suggester, suggestions in self.users_and_suggestions.items():
205 if rejected:
206 template = self.reject_email_template
207 subject = self.reject_email_subject
208 else:
209 template = self.accept_email_template
210 subject = self.accept_email_subject
211 self.send_mail(template, subject, suggester, suggestions, comment)
212
213 def reject_suggestions(self):
214 for suggestion in self.suggestions:
215 self.reject_suggestion(suggestion)
216
217 def reject(self, comment=""):
218 self.reject_suggestions()
219 if self.should_notify(comment):
220 self.notify_suggesters(rejected=True, comment=comment)
221 if comment:
222 self.add_comments(comment)
223
224 def send_mail(self, template, subject, suggester, suggestions, comment):
225 send_mail(
226 subject,
227 self.get_email_message(
228 suggestions,
229 comment,
230 template),
231 from_email=None,
232 recipient_list=[suggester.email],
233 fail_silently=True)
234
235 def should_notify(self, comment):
236 return (
237 comment
238 and settings.POOTLE_EMAIL_FEEDBACK_ENABLED)
239
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pootle/apps/pootle_store/utils.py b/pootle/apps/pootle_store/utils.py
--- a/pootle/apps/pootle_store/utils.py
+++ b/pootle/apps/pootle_store/utils.py
@@ -15,6 +15,7 @@
from pootle.core.delegate import site
from pootle.core.mail import send_mail
+from pootle.core.signals import update_data
from pootle.i18n.gettext import ugettext as _
from pootle_comment.forms import UnsecuredCommentForm
from pootle_statistics.models import (
@@ -166,6 +167,7 @@
unit.save()
def reject_suggestion(self, suggestion):
+ store = suggestion.unit.store
suggestion.state = SuggestionStates.REJECTED
suggestion.review_time = timezone.now()
suggestion.reviewer = self.reviewer
@@ -176,6 +178,8 @@
self.reviewer,
creation_time=suggestion.review_time).save()
+ update_data.send(store.__class__, instance=store)
+
def accept_suggestions(self):
for suggestion in self.suggestions:
self.accept_suggestion(suggestion)
| {"golden_diff": "diff --git a/pootle/apps/pootle_store/utils.py b/pootle/apps/pootle_store/utils.py\n--- a/pootle/apps/pootle_store/utils.py\n+++ b/pootle/apps/pootle_store/utils.py\n@@ -15,6 +15,7 @@\n \n from pootle.core.delegate import site\n from pootle.core.mail import send_mail\n+from pootle.core.signals import update_data\n from pootle.i18n.gettext import ugettext as _\n from pootle_comment.forms import UnsecuredCommentForm\n from pootle_statistics.models import (\n@@ -166,6 +167,7 @@\n unit.save()\n \n def reject_suggestion(self, suggestion):\n+ store = suggestion.unit.store\n suggestion.state = SuggestionStates.REJECTED\n suggestion.review_time = timezone.now()\n suggestion.reviewer = self.reviewer\n@@ -176,6 +178,8 @@\n self.reviewer,\n creation_time=suggestion.review_time).save()\n \n+ update_data.send(store.__class__, instance=store)\n+\n def accept_suggestions(self):\n for suggestion in self.suggestions:\n self.accept_suggestion(suggestion)\n", "issue": "Suggestions count on stats is wrong\nSee https://mozilla.locamotion.org/cy/ where it says 1 pending suggestion, but clicking on the link says that there are no results.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nfrom collections import OrderedDict\n\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.template import loader\nfrom django.utils import timezone\n\nfrom pootle.core.delegate import site\nfrom pootle.core.mail import send_mail\nfrom pootle.i18n.gettext import ugettext as _\nfrom pootle_comment.forms import UnsecuredCommentForm\nfrom pootle_statistics.models import (\n Submission, SubmissionFields, SubmissionTypes)\n\nfrom .constants import FUZZY, TRANSLATED\nfrom .models import Suggestion, SuggestionStates\n\n\nUser = get_user_model()\n\n\nclass SuggestionsReview(object):\n accept_email_template = 'editor/email/suggestions_accepted_with_comment.txt'\n accept_email_subject = _(u\"Suggestion accepted with comment\")\n reject_email_template = 'editor/email/suggestions_rejected_with_comment.txt'\n reject_email_subject = _(u\"Suggestion rejected with comment\")\n\n def __init__(self, suggestions=None, reviewer=None):\n self.suggestions = suggestions\n self.reviewer = reviewer\n\n @property\n def users_and_suggestions(self):\n users = {}\n for suggestion in self.suggestions:\n users[suggestion.user] = users.get(suggestion.user, [])\n users[suggestion.user].append(suggestion)\n return users\n\n def add_comments(self, comment):\n for suggestion in self.suggestions:\n UnsecuredCommentForm(\n suggestion,\n dict(comment=comment,\n user=self.reviewer)).save()\n\n def add(self, unit, translation, user=None, touch=True,\n similarity=None, mt_similarity=None):\n \"\"\"Adds a new suggestion to the unit.\n\n :param translation: suggested translation text\n :param user: user who is making the suggestion. If it's ``None``,\n the ``system`` user will be used.\n :param touch: whether to update the unit's timestamp after adding\n the suggestion or not.\n :param similarity: human similarity for the new suggestion.\n :param mt_similarity: MT similarity for the new suggestion.\n\n :return: a tuple ``(suggestion, created)`` where ``created`` is a\n boolean indicating if the suggestion was successfully added.\n If the suggestion already exists it's returned as well.\n \"\"\"\n dont_add = (\n not filter(None, translation)\n or translation == unit.target)\n if dont_add:\n return (None, False)\n user = user or User.objects.get_system_user()\n try:\n suggestion = Suggestion.objects.pending().get(\n unit=unit,\n user=user,\n target_f=translation)\n return (suggestion, False)\n except Suggestion.DoesNotExist:\n suggestion = Suggestion.objects.create(\n unit=unit,\n user=user,\n state=SuggestionStates.PENDING,\n target=translation,\n creation_time=timezone.now())\n self.create_submission(\n suggestion,\n SubmissionTypes.SUGG_ADD,\n user,\n similarity=similarity,\n mt_similarity=mt_similarity).save()\n if touch:\n unit.save()\n return (suggestion, True)\n\n def create_submission(self, suggestion, suggestion_type, user, **kwargs):\n return Submission(\n creation_time=kwargs.get(\"creation_time\", suggestion.creation_time),\n translation_project=suggestion.unit.store.translation_project,\n submitter=user,\n unit=suggestion.unit,\n store=suggestion.unit.store,\n type=suggestion_type,\n suggestion=suggestion,\n similarity=kwargs.get(\"similarity\"),\n mt_similarity=kwargs.get(\"mt_similarity\"))\n\n def accept_suggestion(self, suggestion):\n unit = suggestion.unit\n translation_project = unit.store.translation_project\n\n # Save for later\n old_state = unit.state\n old_target = unit.target\n\n # Update some basic attributes so we can create submissions. Note\n # these do not conflict with `ScoreLog`'s interests, so it's safe\n unit.target = suggestion.target\n if unit.state == FUZZY:\n unit.state = TRANSLATED\n\n current_time = timezone.now()\n suggestion.state = SuggestionStates.ACCEPTED\n suggestion.reviewer = self.reviewer\n suggestion.review_time = current_time\n suggestion.save()\n create_subs = OrderedDict()\n if old_state != unit.state:\n create_subs[SubmissionFields.STATE] = [old_state, unit.state]\n create_subs[SubmissionFields.TARGET] = [old_target, unit.target]\n subs_created = []\n for field in create_subs:\n kwargs = {\n 'creation_time': current_time,\n 'translation_project': translation_project,\n 'submitter': self.reviewer,\n 'unit': unit,\n 'store': unit.store,\n 'field': field,\n 'type': SubmissionTypes.SUGG_ACCEPT,\n 'old_value': create_subs[field][0],\n 'new_value': create_subs[field][1],\n }\n if field == SubmissionFields.TARGET:\n kwargs['suggestion'] = suggestion\n\n subs_created.append(Submission(**kwargs))\n if subs_created:\n unit.submission_set.add(*subs_created, bulk=False)\n\n # FIXME: remove such a dependency on `ScoreLog`\n # Update current unit instance's attributes\n # important to set these attributes after saving Submission\n # because in the `ScoreLog` we need to access the unit's certain\n # attributes before it was saved\n # THIS NEEDS TO GO ^^\n unit.submitted_by = suggestion.user\n unit.submitted_on = current_time\n unit.reviewed_by = self.reviewer\n unit.reviewed_on = unit.submitted_on\n unit._log_user = self.reviewer\n unit.save()\n\n def reject_suggestion(self, suggestion):\n suggestion.state = SuggestionStates.REJECTED\n suggestion.review_time = timezone.now()\n suggestion.reviewer = self.reviewer\n suggestion.save()\n self.create_submission(\n suggestion,\n SubmissionTypes.SUGG_REJECT,\n self.reviewer,\n creation_time=suggestion.review_time).save()\n\n def accept_suggestions(self):\n for suggestion in self.suggestions:\n self.accept_suggestion(suggestion)\n\n def accept(self, comment=\"\"):\n self.accept_suggestions()\n if self.should_notify(comment):\n self.notify_suggesters(rejected=False, comment=comment)\n if comment:\n self.add_comments(comment=comment)\n\n def build_absolute_uri(self, url):\n return site.get().build_absolute_uri(url)\n\n def get_email_message(self, suggestions, comment, template):\n for suggestion in suggestions:\n suggestion.unit_url = (\n self.build_absolute_uri(\n suggestion.unit.get_translate_url()))\n return loader.render_to_string(\n template,\n context=dict(suggestions=suggestions,\n comment=comment))\n\n def notify_suggesters(self, rejected=True, comment=\"\"):\n for suggester, suggestions in self.users_and_suggestions.items():\n if rejected:\n template = self.reject_email_template\n subject = self.reject_email_subject\n else:\n template = self.accept_email_template\n subject = self.accept_email_subject\n self.send_mail(template, subject, suggester, suggestions, comment)\n\n def reject_suggestions(self):\n for suggestion in self.suggestions:\n self.reject_suggestion(suggestion)\n\n def reject(self, comment=\"\"):\n self.reject_suggestions()\n if self.should_notify(comment):\n self.notify_suggesters(rejected=True, comment=comment)\n if comment:\n self.add_comments(comment)\n\n def send_mail(self, template, subject, suggester, suggestions, comment):\n send_mail(\n subject,\n self.get_email_message(\n suggestions,\n comment,\n template),\n from_email=None,\n recipient_list=[suggester.email],\n fail_silently=True)\n\n def should_notify(self, comment):\n return (\n comment\n and settings.POOTLE_EMAIL_FEEDBACK_ENABLED)\n", "path": "pootle/apps/pootle_store/utils.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nfrom collections import OrderedDict\n\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.template import loader\nfrom django.utils import timezone\n\nfrom pootle.core.delegate import site\nfrom pootle.core.mail import send_mail\nfrom pootle.core.signals import update_data\nfrom pootle.i18n.gettext import ugettext as _\nfrom pootle_comment.forms import UnsecuredCommentForm\nfrom pootle_statistics.models import (\n Submission, SubmissionFields, SubmissionTypes)\n\nfrom .constants import FUZZY, TRANSLATED\nfrom .models import Suggestion, SuggestionStates\n\n\nUser = get_user_model()\n\n\nclass SuggestionsReview(object):\n accept_email_template = 'editor/email/suggestions_accepted_with_comment.txt'\n accept_email_subject = _(u\"Suggestion accepted with comment\")\n reject_email_template = 'editor/email/suggestions_rejected_with_comment.txt'\n reject_email_subject = _(u\"Suggestion rejected with comment\")\n\n def __init__(self, suggestions=None, reviewer=None):\n self.suggestions = suggestions\n self.reviewer = reviewer\n\n @property\n def users_and_suggestions(self):\n users = {}\n for suggestion in self.suggestions:\n users[suggestion.user] = users.get(suggestion.user, [])\n users[suggestion.user].append(suggestion)\n return users\n\n def add_comments(self, comment):\n for suggestion in self.suggestions:\n UnsecuredCommentForm(\n suggestion,\n dict(comment=comment,\n user=self.reviewer)).save()\n\n def add(self, unit, translation, user=None, touch=True,\n similarity=None, mt_similarity=None):\n \"\"\"Adds a new suggestion to the unit.\n\n :param translation: suggested translation text\n :param user: user who is making the suggestion. If it's ``None``,\n the ``system`` user will be used.\n :param touch: whether to update the unit's timestamp after adding\n the suggestion or not.\n :param similarity: human similarity for the new suggestion.\n :param mt_similarity: MT similarity for the new suggestion.\n\n :return: a tuple ``(suggestion, created)`` where ``created`` is a\n boolean indicating if the suggestion was successfully added.\n If the suggestion already exists it's returned as well.\n \"\"\"\n dont_add = (\n not filter(None, translation)\n or translation == unit.target)\n if dont_add:\n return (None, False)\n user = user or User.objects.get_system_user()\n try:\n suggestion = Suggestion.objects.pending().get(\n unit=unit,\n user=user,\n target_f=translation)\n return (suggestion, False)\n except Suggestion.DoesNotExist:\n suggestion = Suggestion.objects.create(\n unit=unit,\n user=user,\n state=SuggestionStates.PENDING,\n target=translation,\n creation_time=timezone.now())\n self.create_submission(\n suggestion,\n SubmissionTypes.SUGG_ADD,\n user,\n similarity=similarity,\n mt_similarity=mt_similarity).save()\n if touch:\n unit.save()\n return (suggestion, True)\n\n def create_submission(self, suggestion, suggestion_type, user, **kwargs):\n return Submission(\n creation_time=kwargs.get(\"creation_time\", suggestion.creation_time),\n translation_project=suggestion.unit.store.translation_project,\n submitter=user,\n unit=suggestion.unit,\n store=suggestion.unit.store,\n type=suggestion_type,\n suggestion=suggestion,\n similarity=kwargs.get(\"similarity\"),\n mt_similarity=kwargs.get(\"mt_similarity\"))\n\n def accept_suggestion(self, suggestion):\n unit = suggestion.unit\n translation_project = unit.store.translation_project\n\n # Save for later\n old_state = unit.state\n old_target = unit.target\n\n # Update some basic attributes so we can create submissions. Note\n # these do not conflict with `ScoreLog`'s interests, so it's safe\n unit.target = suggestion.target\n if unit.state == FUZZY:\n unit.state = TRANSLATED\n\n current_time = timezone.now()\n suggestion.state = SuggestionStates.ACCEPTED\n suggestion.reviewer = self.reviewer\n suggestion.review_time = current_time\n suggestion.save()\n create_subs = OrderedDict()\n if old_state != unit.state:\n create_subs[SubmissionFields.STATE] = [old_state, unit.state]\n create_subs[SubmissionFields.TARGET] = [old_target, unit.target]\n subs_created = []\n for field in create_subs:\n kwargs = {\n 'creation_time': current_time,\n 'translation_project': translation_project,\n 'submitter': self.reviewer,\n 'unit': unit,\n 'store': unit.store,\n 'field': field,\n 'type': SubmissionTypes.SUGG_ACCEPT,\n 'old_value': create_subs[field][0],\n 'new_value': create_subs[field][1],\n }\n if field == SubmissionFields.TARGET:\n kwargs['suggestion'] = suggestion\n\n subs_created.append(Submission(**kwargs))\n if subs_created:\n unit.submission_set.add(*subs_created, bulk=False)\n\n # FIXME: remove such a dependency on `ScoreLog`\n # Update current unit instance's attributes\n # important to set these attributes after saving Submission\n # because in the `ScoreLog` we need to access the unit's certain\n # attributes before it was saved\n # THIS NEEDS TO GO ^^\n unit.submitted_by = suggestion.user\n unit.submitted_on = current_time\n unit.reviewed_by = self.reviewer\n unit.reviewed_on = unit.submitted_on\n unit._log_user = self.reviewer\n unit.save()\n\n def reject_suggestion(self, suggestion):\n store = suggestion.unit.store\n suggestion.state = SuggestionStates.REJECTED\n suggestion.review_time = timezone.now()\n suggestion.reviewer = self.reviewer\n suggestion.save()\n self.create_submission(\n suggestion,\n SubmissionTypes.SUGG_REJECT,\n self.reviewer,\n creation_time=suggestion.review_time).save()\n\n update_data.send(store.__class__, instance=store)\n\n def accept_suggestions(self):\n for suggestion in self.suggestions:\n self.accept_suggestion(suggestion)\n\n def accept(self, comment=\"\"):\n self.accept_suggestions()\n if self.should_notify(comment):\n self.notify_suggesters(rejected=False, comment=comment)\n if comment:\n self.add_comments(comment=comment)\n\n def build_absolute_uri(self, url):\n return site.get().build_absolute_uri(url)\n\n def get_email_message(self, suggestions, comment, template):\n for suggestion in suggestions:\n suggestion.unit_url = (\n self.build_absolute_uri(\n suggestion.unit.get_translate_url()))\n return loader.render_to_string(\n template,\n context=dict(suggestions=suggestions,\n comment=comment))\n\n def notify_suggesters(self, rejected=True, comment=\"\"):\n for suggester, suggestions in self.users_and_suggestions.items():\n if rejected:\n template = self.reject_email_template\n subject = self.reject_email_subject\n else:\n template = self.accept_email_template\n subject = self.accept_email_subject\n self.send_mail(template, subject, suggester, suggestions, comment)\n\n def reject_suggestions(self):\n for suggestion in self.suggestions:\n self.reject_suggestion(suggestion)\n\n def reject(self, comment=\"\"):\n self.reject_suggestions()\n if self.should_notify(comment):\n self.notify_suggesters(rejected=True, comment=comment)\n if comment:\n self.add_comments(comment)\n\n def send_mail(self, template, subject, suggester, suggestions, comment):\n send_mail(\n subject,\n self.get_email_message(\n suggestions,\n comment,\n template),\n from_email=None,\n recipient_list=[suggester.email],\n fail_silently=True)\n\n def should_notify(self, comment):\n return (\n comment\n and settings.POOTLE_EMAIL_FEEDBACK_ENABLED)\n", "path": "pootle/apps/pootle_store/utils.py"}]} | 2,644 | 260 |
gh_patches_debug_13031 | rasdani/github-patches | git_diff | inventree__InvenTree-6284 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Platform UI refuses to log out
### Please verify that this bug has NOT been raised before.
- [X] I checked and didn't find a similar issue
### Describe the bug*
Discovered when I was setting up Platorm UI for dev, trying to log out simply sends you to the Home page and tells you that you were already logged in

### Steps to Reproduce
Not sure about the exact trigger here. It's still occuring to me as it did yesterday.
### Expected behaviour
Being able to log out
### Deployment Method
- [ ] Docker
- [ ] Bare metal
### Version Information
InvenTree - inventree.org
The Open-Source Inventory Management System
Installation paths:
Base /workspaces/InvenTree
Config /workspaces/InvenTree/dev/config.yaml
Media /workspaces/InvenTree/dev/media
Static /workspaces/InvenTree/dev/static
Versions:
Python 3.10.10
Django 3.2.23
InvenTree 0.13.0 dev
API 152
Node v20.9.0
Yarn 1.22.19
Commit hash:dabd95d
Commit date:2023-11-21
### Please verify if you can reproduce this bug on the demo site.
- [ ] I can reproduce this bug on the demo site.
### Relevant log output
```shell
Created new API token for user 'admin' (name='inventree-web-app')
[22/Nov/2023 17:23:56] "GET /api/user/token/?name=inventree-web-app HTTP/1.1" 200 114
[22/Nov/2023 17:23:56] "GET /api/user/me/ HTTP/1.1" 200 134
[22/Nov/2023 17:23:56] "GET /api/notifications/?read=false&limit=1 HTTP/1.1" 200 52
[22/Nov/2023 17:23:57] "GET /api/user/roles/ HTTP/1.1" 200 527
[22/Nov/2023 17:23:57] "GET /api/settings/global/ HTTP/1.1" 200 27344
Created new API token for user 'admin' (name='inventree-web-app')
[22/Nov/2023 17:23:57] "GET /api/user/token/?name=inventree-web-app HTTP/1.1" 200 114
Background worker check failed
Email backend not configured
InvenTree system health checks failed
[22/Nov/2023 17:23:57] "GET /api/ HTTP/1.1" 200 1145
[22/Nov/2023 17:23:57] "GET /api/user/me/ HTTP/1.1" 200 134
[22/Nov/2023 17:23:57] "GET /api/generic/status/ HTTP/1.1" 200 5851
[22/Nov/2023 17:23:57] "GET /api/user/roles/ HTTP/1.1" 200 527
Background worker check failed
Email backend not configured
InvenTree system health checks failed
[22/Nov/2023 17:23:58] "GET /api/settings/global/ HTTP/1.1" 200 27344
[22/Nov/2023 17:23:58] "GET /api/ HTTP/1.1" 200 1145
[22/Nov/2023 17:23:58] "GET /api/settings/user/ HTTP/1.1" 200 13878
[22/Nov/2023 17:23:58] "GET /api/generic/status/ HTTP/1.1" 200 5851
[22/Nov/2023 17:23:58] "GET /api/settings/user/ HTTP/1.1" 200 13878
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `InvenTree/web/urls.py`
Content:
```
1 """URLs for web app."""
2
3 from django.conf import settings
4 from django.shortcuts import redirect
5 from django.urls import include, path
6 from django.views.decorators.csrf import ensure_csrf_cookie
7 from django.views.generic import TemplateView
8
9
10 class RedirectAssetView(TemplateView):
11 """View to redirect to static asset."""
12
13 def get(self, request, *args, **kwargs):
14 """Redirect to static asset."""
15 return redirect(
16 f"{settings.STATIC_URL}web/assets/{kwargs['path']}", permanent=True
17 )
18
19
20 spa_view = ensure_csrf_cookie(TemplateView.as_view(template_name='web/index.html'))
21 assets_path = path('assets/<path:path>', RedirectAssetView.as_view())
22
23
24 urlpatterns = [
25 path(
26 f'{settings.FRONTEND_URL_BASE}/',
27 include([
28 assets_path,
29 path(
30 'set-password?uid=<uid>&token=<token>',
31 spa_view,
32 name='password_reset_confirm',
33 ),
34 path('', spa_view),
35 ]),
36 ),
37 assets_path,
38 path(settings.FRONTEND_URL_BASE, spa_view, name='platform'),
39 ]
40
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/InvenTree/web/urls.py b/InvenTree/web/urls.py
--- a/InvenTree/web/urls.py
+++ b/InvenTree/web/urls.py
@@ -2,7 +2,7 @@
from django.conf import settings
from django.shortcuts import redirect
-from django.urls import include, path
+from django.urls import include, path, re_path
from django.views.decorators.csrf import ensure_csrf_cookie
from django.views.generic import TemplateView
@@ -31,7 +31,7 @@
spa_view,
name='password_reset_confirm',
),
- path('', spa_view),
+ re_path('.*', spa_view),
]),
),
assets_path,
| {"golden_diff": "diff --git a/InvenTree/web/urls.py b/InvenTree/web/urls.py\n--- a/InvenTree/web/urls.py\n+++ b/InvenTree/web/urls.py\n@@ -2,7 +2,7 @@\n \n from django.conf import settings\n from django.shortcuts import redirect\n-from django.urls import include, path\n+from django.urls import include, path, re_path\n from django.views.decorators.csrf import ensure_csrf_cookie\n from django.views.generic import TemplateView\n \n@@ -31,7 +31,7 @@\n spa_view,\n name='password_reset_confirm',\n ),\n- path('', spa_view),\n+ re_path('.*', spa_view),\n ]),\n ),\n assets_path,\n", "issue": "Platform UI refuses to log out\n### Please verify that this bug has NOT been raised before.\n\n- [X] I checked and didn't find a similar issue\n\n### Describe the bug*\n\nDiscovered when I was setting up Platorm UI for dev, trying to log out simply sends you to the Home page and tells you that you were already logged in\r\n\r\n\n\n### Steps to Reproduce\n\nNot sure about the exact trigger here. It's still occuring to me as it did yesterday.\r\n\n\n### Expected behaviour\n\nBeing able to log out\n\n### Deployment Method\n\n- [ ] Docker\n- [ ] Bare metal\n\n### Version Information\n\nInvenTree - inventree.org\r\nThe Open-Source Inventory Management System\r\n\r\n\r\nInstallation paths:\r\nBase /workspaces/InvenTree\r\nConfig /workspaces/InvenTree/dev/config.yaml\r\nMedia /workspaces/InvenTree/dev/media\r\nStatic /workspaces/InvenTree/dev/static\r\n\r\nVersions:\r\nPython 3.10.10\r\nDjango 3.2.23\r\nInvenTree 0.13.0 dev\r\nAPI 152\r\nNode v20.9.0\r\nYarn 1.22.19\r\n\r\nCommit hash:dabd95d\r\nCommit date:2023-11-21\n\n### Please verify if you can reproduce this bug on the demo site.\n\n- [ ] I can reproduce this bug on the demo site.\n\n### Relevant log output\n\n```shell\nCreated new API token for user 'admin' (name='inventree-web-app')\r\n[22/Nov/2023 17:23:56] \"GET /api/user/token/?name=inventree-web-app HTTP/1.1\" 200 114\r\n[22/Nov/2023 17:23:56] \"GET /api/user/me/ HTTP/1.1\" 200 134\r\n[22/Nov/2023 17:23:56] \"GET /api/notifications/?read=false&limit=1 HTTP/1.1\" 200 52\r\n[22/Nov/2023 17:23:57] \"GET /api/user/roles/ HTTP/1.1\" 200 527\r\n[22/Nov/2023 17:23:57] \"GET /api/settings/global/ HTTP/1.1\" 200 27344\r\nCreated new API token for user 'admin' (name='inventree-web-app')\r\n[22/Nov/2023 17:23:57] \"GET /api/user/token/?name=inventree-web-app HTTP/1.1\" 200 114\r\nBackground worker check failed\r\nEmail backend not configured\r\nInvenTree system health checks failed\r\n[22/Nov/2023 17:23:57] \"GET /api/ HTTP/1.1\" 200 1145\r\n[22/Nov/2023 17:23:57] \"GET /api/user/me/ HTTP/1.1\" 200 134\r\n[22/Nov/2023 17:23:57] \"GET /api/generic/status/ HTTP/1.1\" 200 5851\r\n[22/Nov/2023 17:23:57] \"GET /api/user/roles/ HTTP/1.1\" 200 527\r\nBackground worker check failed\r\nEmail backend not configured\r\nInvenTree system health checks failed\r\n[22/Nov/2023 17:23:58] \"GET /api/settings/global/ HTTP/1.1\" 200 27344\r\n[22/Nov/2023 17:23:58] \"GET /api/ HTTP/1.1\" 200 1145\r\n[22/Nov/2023 17:23:58] \"GET /api/settings/user/ HTTP/1.1\" 200 13878\r\n[22/Nov/2023 17:23:58] \"GET /api/generic/status/ HTTP/1.1\" 200 5851\r\n[22/Nov/2023 17:23:58] \"GET /api/settings/user/ HTTP/1.1\" 200 13878\n```\n\n", "before_files": [{"content": "\"\"\"URLs for web app.\"\"\"\n\nfrom django.conf import settings\nfrom django.shortcuts import redirect\nfrom django.urls import include, path\nfrom django.views.decorators.csrf import ensure_csrf_cookie\nfrom django.views.generic import TemplateView\n\n\nclass RedirectAssetView(TemplateView):\n \"\"\"View to redirect to static asset.\"\"\"\n\n def get(self, request, *args, **kwargs):\n \"\"\"Redirect to static asset.\"\"\"\n return redirect(\n f\"{settings.STATIC_URL}web/assets/{kwargs['path']}\", permanent=True\n )\n\n\nspa_view = ensure_csrf_cookie(TemplateView.as_view(template_name='web/index.html'))\nassets_path = path('assets/<path:path>', RedirectAssetView.as_view())\n\n\nurlpatterns = [\n path(\n f'{settings.FRONTEND_URL_BASE}/',\n include([\n assets_path,\n path(\n 'set-password?uid=<uid>&token=<token>',\n spa_view,\n name='password_reset_confirm',\n ),\n path('', spa_view),\n ]),\n ),\n assets_path,\n path(settings.FRONTEND_URL_BASE, spa_view, name='platform'),\n]\n", "path": "InvenTree/web/urls.py"}], "after_files": [{"content": "\"\"\"URLs for web app.\"\"\"\n\nfrom django.conf import settings\nfrom django.shortcuts import redirect\nfrom django.urls import include, path, re_path\nfrom django.views.decorators.csrf import ensure_csrf_cookie\nfrom django.views.generic import TemplateView\n\n\nclass RedirectAssetView(TemplateView):\n \"\"\"View to redirect to static asset.\"\"\"\n\n def get(self, request, *args, **kwargs):\n \"\"\"Redirect to static asset.\"\"\"\n return redirect(\n f\"{settings.STATIC_URL}web/assets/{kwargs['path']}\", permanent=True\n )\n\n\nspa_view = ensure_csrf_cookie(TemplateView.as_view(template_name='web/index.html'))\nassets_path = path('assets/<path:path>', RedirectAssetView.as_view())\n\n\nurlpatterns = [\n path(\n f'{settings.FRONTEND_URL_BASE}/',\n include([\n assets_path,\n path(\n 'set-password?uid=<uid>&token=<token>',\n spa_view,\n name='password_reset_confirm',\n ),\n re_path('.*', spa_view),\n ]),\n ),\n assets_path,\n path(settings.FRONTEND_URL_BASE, spa_view, name='platform'),\n]\n", "path": "InvenTree/web/urls.py"}]} | 1,678 | 155 |
gh_patches_debug_2424 | rasdani/github-patches | git_diff | microsoft__ptvsd-362 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
PTVSD fails to run on windows
```
Traceback (most recent call last):
File "C:\Users\karth\.vscode\extensions\ms-python.python-2018.3.1\pythonFiles\experimental\ptvsd_launcher.py", line 96,
in <module>
vspd.debug(filename, port_num, debug_id, debug_options, run_as)
File "c:\git\ptvsd\ptvsd\debugger.py", line 36, in debug
run(address, filename, *args, **kwargs)
File "c:\git\ptvsd\ptvsd\__main__.py", line 37, in run_file
run(argv, addr, **kwargs)
File "c:\git\ptvsd\ptvsd\__main__.py", line 85, in _run
daemon = _install(_pydevd, addr, **kwargs)
File "c:\git\ptvsd\ptvsd\pydevd_hooks.py", line 52, in install
daemon = Daemon(**kwargs)
File "c:\git\ptvsd\ptvsd\daemon.py", line 53, in __init__
self.install_exit_handlers()
File "c:\git\ptvsd\ptvsd\daemon.py", line 91, in install_exit_handlers
signal.SIGHUP: [],
AttributeError: module 'signal' has no attribute 'SIGHUP'
```
PTVSD fails to run on windows
```
Traceback (most recent call last):
File "C:\Users\karth\.vscode\extensions\ms-python.python-2018.3.1\pythonFiles\experimental\ptvsd_launcher.py", line 96,
in <module>
vspd.debug(filename, port_num, debug_id, debug_options, run_as)
File "c:\git\ptvsd\ptvsd\debugger.py", line 36, in debug
run(address, filename, *args, **kwargs)
File "c:\git\ptvsd\ptvsd\__main__.py", line 37, in run_file
run(argv, addr, **kwargs)
File "c:\git\ptvsd\ptvsd\__main__.py", line 85, in _run
daemon = _install(_pydevd, addr, **kwargs)
File "c:\git\ptvsd\ptvsd\pydevd_hooks.py", line 52, in install
daemon = Daemon(**kwargs)
File "c:\git\ptvsd\ptvsd\daemon.py", line 53, in __init__
self.install_exit_handlers()
File "c:\git\ptvsd\ptvsd\daemon.py", line 91, in install_exit_handlers
signal.SIGHUP: [],
AttributeError: module 'signal' has no attribute 'SIGHUP'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ptvsd/daemon.py`
Content:
```
1 import atexit
2 import os
3 import platform
4 import signal
5 import sys
6
7 from ptvsd import wrapper
8 from ptvsd.socket import close_socket
9
10
11 def _wait_on_exit():
12 if sys.__stdout__ is not None:
13 try:
14 import msvcrt
15 except ImportError:
16 sys.__stdout__.write('Press Enter to continue . . . ')
17 sys.__stdout__.flush()
18 sys.__stdin__.read(1)
19 else:
20 sys.__stdout__.write('Press any key to continue . . . ')
21 sys.__stdout__.flush()
22 msvcrt.getch()
23
24
25 class DaemonClosedError(RuntimeError):
26 """Indicates that a Daemon was unexpectedly closed."""
27 def __init__(self, msg='closed'):
28 super(DaemonClosedError, self).__init__(msg)
29
30
31 class Daemon(object):
32 """The process-level manager for the VSC protocol debug adapter."""
33
34 exitcode = 0
35
36 def __init__(self, wait_on_exit=_wait_on_exit,
37 addhandlers=True, killonclose=True):
38 self.wait_on_exit = wait_on_exit
39 self.killonclose = killonclose
40
41 self._closed = False
42 self._exiting_via_atexit_handler = False
43
44 self._pydevd = None
45 self._server = None
46 self._client = None
47 self._adapter = None
48
49 self._signal_handlers = None
50 self._atexit_handlers = None
51 self._handlers_installed = False
52 if addhandlers:
53 self.install_exit_handlers()
54
55 @property
56 def pydevd(self):
57 return self._pydevd
58
59 @property
60 def server(self):
61 return self._server
62
63 @property
64 def client(self):
65 return self._client
66
67 @property
68 def adapter(self):
69 return self._adapter
70
71 def start(self, server=None):
72 """Return the "socket" to use for pydevd after setting it up."""
73 if self._closed:
74 raise DaemonClosedError()
75 if self._pydevd is not None:
76 raise RuntimeError('already started')
77 self._pydevd = wrapper.PydevdSocket(
78 self._handle_pydevd_message,
79 self._handle_pydevd_close,
80 self._getpeername,
81 self._getsockname,
82 )
83 self._server = server
84 return self._pydevd
85
86 def install_exit_handlers(self):
87 """Set the placeholder handlers."""
88 if self._signal_handlers is not None:
89 raise RuntimeError('exit handlers already installed')
90 self._signal_handlers = {
91 signal.SIGHUP: [],
92 }
93 self._atexit_handlers = []
94
95 if platform.system() != 'Windows':
96 try:
97 for sig in self._signal_handlers:
98 signal.signal(sig, self._signal_handler)
99 except ValueError:
100 # Wasn't called in main thread!
101 raise
102 atexit.register(self._atexit_handler)
103
104 def set_connection(self, client):
105 """Set the client socket to use for the debug adapter.
106
107 A VSC message loop is started for the client.
108 """
109 if self._closed:
110 raise DaemonClosedError()
111 if self._pydevd is None:
112 raise RuntimeError('not started yet')
113 if self._client is not None:
114 raise RuntimeError('connection already set')
115 self._client = client
116
117 self._adapter = wrapper.VSCodeMessageProcessor(
118 client,
119 self._pydevd.pydevd_notify,
120 self._pydevd.pydevd_request,
121 self._handle_vsc_disconnect,
122 self._handle_vsc_close,
123 )
124 name = 'ptvsd.Client' if self._server is None else 'ptvsd.Server'
125 self._adapter.start(name)
126 if self._signal_handlers is not None:
127 self._add_signal_handlers()
128 self._add_atexit_handler()
129 return self._adapter
130
131 def close(self):
132 """Stop all loops and release all resources."""
133 if self._closed:
134 raise DaemonClosedError('already closed')
135 self._closed = True
136
137 if self._adapter is not None:
138 normal, abnormal = self._adapter._wait_options()
139 if (normal and not self.exitcode) or (abnormal and self.exitcode):
140 self.wait_on_exit()
141
142 if self._pydevd is not None:
143 close_socket(self._pydevd)
144 if self._client is not None:
145 self._release_connection()
146
147 def re_build_breakpoints(self):
148 self.adapter.re_build_breakpoints()
149
150 # internal methods
151
152 def _signal_handler(self, signum, frame):
153 for handle_signal in self._signal_handlers.get(signum, ()):
154 handle_signal(signum, frame)
155
156 def _atexit_handler(self):
157 for handle_atexit in self._atexit_handlers:
158 handle_atexit()
159
160 def _add_atexit_handler(self):
161 def handler():
162 self._exiting_via_atexit_handler = True
163 if not self._closed:
164 self.close()
165 if self._adapter is not None:
166 # TODO: Do this in VSCodeMessageProcessor.close()?
167 self._adapter._wait_for_server_thread()
168 self._atexit_handlers.append(handler)
169
170 def _add_signal_handlers(self):
171 def handler(signum, frame):
172 if not self._closed:
173 self.close()
174 sys.exit(0)
175 self._signal_handlers[signal.SIGHUP].append(handler)
176
177 def _release_connection(self):
178 if self._adapter is not None:
179 # TODO: This is not correct in the "attach" case.
180 self._adapter.handle_pydevd_stopped(self.exitcode)
181 self._adapter.close()
182 close_socket(self._client)
183
184 # internal methods for PyDevdSocket().
185
186 def _handle_pydevd_message(self, cmdid, seq, text):
187 if self._adapter is not None:
188 self._adapter.on_pydevd_event(cmdid, seq, text)
189
190 def _handle_pydevd_close(self):
191 if self._closed:
192 return
193 self.close()
194
195 def _getpeername(self):
196 if self._client is None:
197 raise NotImplementedError
198 return self._client.getpeername()
199
200 def _getsockname(self):
201 if self._client is None:
202 raise NotImplementedError
203 return self._client.getsockname()
204
205 # internal methods for VSCodeMessageProcessor
206
207 def _handle_vsc_disconnect(self, kill=False):
208 if not self._closed:
209 self.close()
210 if kill and self.killonclose and not self._exiting_via_atexit_handler:
211 os.kill(os.getpid(), signal.SIGTERM)
212
213 def _handle_vsc_close(self):
214 if self._closed:
215 return
216 self.close()
217
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ptvsd/daemon.py b/ptvsd/daemon.py
--- a/ptvsd/daemon.py
+++ b/ptvsd/daemon.py
@@ -168,6 +168,9 @@
self._atexit_handlers.append(handler)
def _add_signal_handlers(self):
+ if platform.system() == 'Windows':
+ return
+
def handler(signum, frame):
if not self._closed:
self.close()
| {"golden_diff": "diff --git a/ptvsd/daemon.py b/ptvsd/daemon.py\n--- a/ptvsd/daemon.py\n+++ b/ptvsd/daemon.py\n@@ -168,6 +168,9 @@\n self._atexit_handlers.append(handler)\n \n def _add_signal_handlers(self):\n+ if platform.system() == 'Windows':\n+ return\n+\n def handler(signum, frame):\n if not self._closed:\n self.close()\n", "issue": "PTVSD fails to run on windows\n```\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\karth\\.vscode\\extensions\\ms-python.python-2018.3.1\\pythonFiles\\experimental\\ptvsd_launcher.py\", line 96,\r\nin <module>\r\n vspd.debug(filename, port_num, debug_id, debug_options, run_as)\r\n File \"c:\\git\\ptvsd\\ptvsd\\debugger.py\", line 36, in debug\r\n run(address, filename, *args, **kwargs)\r\n File \"c:\\git\\ptvsd\\ptvsd\\__main__.py\", line 37, in run_file\r\n run(argv, addr, **kwargs)\r\n File \"c:\\git\\ptvsd\\ptvsd\\__main__.py\", line 85, in _run\r\n daemon = _install(_pydevd, addr, **kwargs)\r\n File \"c:\\git\\ptvsd\\ptvsd\\pydevd_hooks.py\", line 52, in install\r\n daemon = Daemon(**kwargs)\r\n File \"c:\\git\\ptvsd\\ptvsd\\daemon.py\", line 53, in __init__\r\n self.install_exit_handlers()\r\n File \"c:\\git\\ptvsd\\ptvsd\\daemon.py\", line 91, in install_exit_handlers\r\n signal.SIGHUP: [],\r\nAttributeError: module 'signal' has no attribute 'SIGHUP'\r\n```\nPTVSD fails to run on windows\n```\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\karth\\.vscode\\extensions\\ms-python.python-2018.3.1\\pythonFiles\\experimental\\ptvsd_launcher.py\", line 96,\r\nin <module>\r\n vspd.debug(filename, port_num, debug_id, debug_options, run_as)\r\n File \"c:\\git\\ptvsd\\ptvsd\\debugger.py\", line 36, in debug\r\n run(address, filename, *args, **kwargs)\r\n File \"c:\\git\\ptvsd\\ptvsd\\__main__.py\", line 37, in run_file\r\n run(argv, addr, **kwargs)\r\n File \"c:\\git\\ptvsd\\ptvsd\\__main__.py\", line 85, in _run\r\n daemon = _install(_pydevd, addr, **kwargs)\r\n File \"c:\\git\\ptvsd\\ptvsd\\pydevd_hooks.py\", line 52, in install\r\n daemon = Daemon(**kwargs)\r\n File \"c:\\git\\ptvsd\\ptvsd\\daemon.py\", line 53, in __init__\r\n self.install_exit_handlers()\r\n File \"c:\\git\\ptvsd\\ptvsd\\daemon.py\", line 91, in install_exit_handlers\r\n signal.SIGHUP: [],\r\nAttributeError: module 'signal' has no attribute 'SIGHUP'\r\n```\n", "before_files": [{"content": "import atexit\nimport os\nimport platform\nimport signal\nimport sys\n\nfrom ptvsd import wrapper\nfrom ptvsd.socket import close_socket\n\n\ndef _wait_on_exit():\n if sys.__stdout__ is not None:\n try:\n import msvcrt\n except ImportError:\n sys.__stdout__.write('Press Enter to continue . . . ')\n sys.__stdout__.flush()\n sys.__stdin__.read(1)\n else:\n sys.__stdout__.write('Press any key to continue . . . ')\n sys.__stdout__.flush()\n msvcrt.getch()\n\n\nclass DaemonClosedError(RuntimeError):\n \"\"\"Indicates that a Daemon was unexpectedly closed.\"\"\"\n def __init__(self, msg='closed'):\n super(DaemonClosedError, self).__init__(msg)\n\n\nclass Daemon(object):\n \"\"\"The process-level manager for the VSC protocol debug adapter.\"\"\"\n\n exitcode = 0\n\n def __init__(self, wait_on_exit=_wait_on_exit,\n addhandlers=True, killonclose=True):\n self.wait_on_exit = wait_on_exit\n self.killonclose = killonclose\n\n self._closed = False\n self._exiting_via_atexit_handler = False\n\n self._pydevd = None\n self._server = None\n self._client = None\n self._adapter = None\n\n self._signal_handlers = None\n self._atexit_handlers = None\n self._handlers_installed = False\n if addhandlers:\n self.install_exit_handlers()\n\n @property\n def pydevd(self):\n return self._pydevd\n\n @property\n def server(self):\n return self._server\n\n @property\n def client(self):\n return self._client\n\n @property\n def adapter(self):\n return self._adapter\n\n def start(self, server=None):\n \"\"\"Return the \"socket\" to use for pydevd after setting it up.\"\"\"\n if self._closed:\n raise DaemonClosedError()\n if self._pydevd is not None:\n raise RuntimeError('already started')\n self._pydevd = wrapper.PydevdSocket(\n self._handle_pydevd_message,\n self._handle_pydevd_close,\n self._getpeername,\n self._getsockname,\n )\n self._server = server\n return self._pydevd\n\n def install_exit_handlers(self):\n \"\"\"Set the placeholder handlers.\"\"\"\n if self._signal_handlers is not None:\n raise RuntimeError('exit handlers already installed')\n self._signal_handlers = {\n signal.SIGHUP: [],\n }\n self._atexit_handlers = []\n\n if platform.system() != 'Windows':\n try:\n for sig in self._signal_handlers:\n signal.signal(sig, self._signal_handler)\n except ValueError:\n # Wasn't called in main thread!\n raise\n atexit.register(self._atexit_handler)\n\n def set_connection(self, client):\n \"\"\"Set the client socket to use for the debug adapter.\n\n A VSC message loop is started for the client.\n \"\"\"\n if self._closed:\n raise DaemonClosedError()\n if self._pydevd is None:\n raise RuntimeError('not started yet')\n if self._client is not None:\n raise RuntimeError('connection already set')\n self._client = client\n\n self._adapter = wrapper.VSCodeMessageProcessor(\n client,\n self._pydevd.pydevd_notify,\n self._pydevd.pydevd_request,\n self._handle_vsc_disconnect,\n self._handle_vsc_close,\n )\n name = 'ptvsd.Client' if self._server is None else 'ptvsd.Server'\n self._adapter.start(name)\n if self._signal_handlers is not None:\n self._add_signal_handlers()\n self._add_atexit_handler()\n return self._adapter\n\n def close(self):\n \"\"\"Stop all loops and release all resources.\"\"\"\n if self._closed:\n raise DaemonClosedError('already closed')\n self._closed = True\n\n if self._adapter is not None:\n normal, abnormal = self._adapter._wait_options()\n if (normal and not self.exitcode) or (abnormal and self.exitcode):\n self.wait_on_exit()\n\n if self._pydevd is not None:\n close_socket(self._pydevd)\n if self._client is not None:\n self._release_connection()\n\n def re_build_breakpoints(self):\n self.adapter.re_build_breakpoints()\n\n # internal methods\n\n def _signal_handler(self, signum, frame):\n for handle_signal in self._signal_handlers.get(signum, ()):\n handle_signal(signum, frame)\n\n def _atexit_handler(self):\n for handle_atexit in self._atexit_handlers:\n handle_atexit()\n\n def _add_atexit_handler(self):\n def handler():\n self._exiting_via_atexit_handler = True\n if not self._closed:\n self.close()\n if self._adapter is not None:\n # TODO: Do this in VSCodeMessageProcessor.close()?\n self._adapter._wait_for_server_thread()\n self._atexit_handlers.append(handler)\n\n def _add_signal_handlers(self):\n def handler(signum, frame):\n if not self._closed:\n self.close()\n sys.exit(0)\n self._signal_handlers[signal.SIGHUP].append(handler)\n\n def _release_connection(self):\n if self._adapter is not None:\n # TODO: This is not correct in the \"attach\" case.\n self._adapter.handle_pydevd_stopped(self.exitcode)\n self._adapter.close()\n close_socket(self._client)\n\n # internal methods for PyDevdSocket().\n\n def _handle_pydevd_message(self, cmdid, seq, text):\n if self._adapter is not None:\n self._adapter.on_pydevd_event(cmdid, seq, text)\n\n def _handle_pydevd_close(self):\n if self._closed:\n return\n self.close()\n\n def _getpeername(self):\n if self._client is None:\n raise NotImplementedError\n return self._client.getpeername()\n\n def _getsockname(self):\n if self._client is None:\n raise NotImplementedError\n return self._client.getsockname()\n\n # internal methods for VSCodeMessageProcessor\n\n def _handle_vsc_disconnect(self, kill=False):\n if not self._closed:\n self.close()\n if kill and self.killonclose and not self._exiting_via_atexit_handler:\n os.kill(os.getpid(), signal.SIGTERM)\n\n def _handle_vsc_close(self):\n if self._closed:\n return\n self.close()\n", "path": "ptvsd/daemon.py"}], "after_files": [{"content": "import atexit\nimport os\nimport platform\nimport signal\nimport sys\n\nfrom ptvsd import wrapper\nfrom ptvsd.socket import close_socket\n\n\ndef _wait_on_exit():\n if sys.__stdout__ is not None:\n try:\n import msvcrt\n except ImportError:\n sys.__stdout__.write('Press Enter to continue . . . ')\n sys.__stdout__.flush()\n sys.__stdin__.read(1)\n else:\n sys.__stdout__.write('Press any key to continue . . . ')\n sys.__stdout__.flush()\n msvcrt.getch()\n\n\nclass DaemonClosedError(RuntimeError):\n \"\"\"Indicates that a Daemon was unexpectedly closed.\"\"\"\n def __init__(self, msg='closed'):\n super(DaemonClosedError, self).__init__(msg)\n\n\nclass Daemon(object):\n \"\"\"The process-level manager for the VSC protocol debug adapter.\"\"\"\n\n exitcode = 0\n\n def __init__(self, wait_on_exit=_wait_on_exit,\n addhandlers=True, killonclose=True):\n self.wait_on_exit = wait_on_exit\n self.killonclose = killonclose\n\n self._closed = False\n self._exiting_via_atexit_handler = False\n\n self._pydevd = None\n self._server = None\n self._client = None\n self._adapter = None\n\n self._signal_handlers = None\n self._atexit_handlers = None\n self._handlers_installed = False\n if addhandlers:\n self.install_exit_handlers()\n\n @property\n def pydevd(self):\n return self._pydevd\n\n @property\n def server(self):\n return self._server\n\n @property\n def client(self):\n return self._client\n\n @property\n def adapter(self):\n return self._adapter\n\n def start(self, server=None):\n \"\"\"Return the \"socket\" to use for pydevd after setting it up.\"\"\"\n if self._closed:\n raise DaemonClosedError()\n if self._pydevd is not None:\n raise RuntimeError('already started')\n self._pydevd = wrapper.PydevdSocket(\n self._handle_pydevd_message,\n self._handle_pydevd_close,\n self._getpeername,\n self._getsockname,\n )\n self._server = server\n return self._pydevd\n\n def install_exit_handlers(self):\n \"\"\"Set the placeholder handlers.\"\"\"\n if self._signal_handlers is not None:\n raise RuntimeError('exit handlers already installed')\n self._signal_handlers = {\n signal.SIGHUP: [],\n }\n self._atexit_handlers = []\n\n if platform.system() != 'Windows':\n try:\n for sig in self._signal_handlers:\n signal.signal(sig, self._signal_handler)\n except ValueError:\n # Wasn't called in main thread!\n raise\n atexit.register(self._atexit_handler)\n\n def set_connection(self, client):\n \"\"\"Set the client socket to use for the debug adapter.\n\n A VSC message loop is started for the client.\n \"\"\"\n if self._closed:\n raise DaemonClosedError()\n if self._pydevd is None:\n raise RuntimeError('not started yet')\n if self._client is not None:\n raise RuntimeError('connection already set')\n self._client = client\n\n self._adapter = wrapper.VSCodeMessageProcessor(\n client,\n self._pydevd.pydevd_notify,\n self._pydevd.pydevd_request,\n self._handle_vsc_disconnect,\n self._handle_vsc_close,\n )\n name = 'ptvsd.Client' if self._server is None else 'ptvsd.Server'\n self._adapter.start(name)\n if self._signal_handlers is not None:\n self._add_signal_handlers()\n self._add_atexit_handler()\n return self._adapter\n\n def close(self):\n \"\"\"Stop all loops and release all resources.\"\"\"\n if self._closed:\n raise DaemonClosedError('already closed')\n self._closed = True\n\n if self._adapter is not None:\n normal, abnormal = self._adapter._wait_options()\n if (normal and not self.exitcode) or (abnormal and self.exitcode):\n self.wait_on_exit()\n\n if self._pydevd is not None:\n close_socket(self._pydevd)\n if self._client is not None:\n self._release_connection()\n\n def re_build_breakpoints(self):\n self.adapter.re_build_breakpoints()\n\n # internal methods\n\n def _signal_handler(self, signum, frame):\n for handle_signal in self._signal_handlers.get(signum, ()):\n handle_signal(signum, frame)\n\n def _atexit_handler(self):\n for handle_atexit in self._atexit_handlers:\n handle_atexit()\n\n def _add_atexit_handler(self):\n def handler():\n self._exiting_via_atexit_handler = True\n if not self._closed:\n self.close()\n if self._adapter is not None:\n # TODO: Do this in VSCodeMessageProcessor.close()?\n self._adapter._wait_for_server_thread()\n self._atexit_handlers.append(handler)\n\n def _add_signal_handlers(self):\n if platform.system() == 'Windows':\n return\n\n def handler(signum, frame):\n if not self._closed:\n self.close()\n sys.exit(0)\n self._signal_handlers[signal.SIGHUP].append(handler)\n\n def _release_connection(self):\n if self._adapter is not None:\n # TODO: This is not correct in the \"attach\" case.\n self._adapter.handle_pydevd_stopped(self.exitcode)\n self._adapter.close()\n close_socket(self._client)\n\n # internal methods for PyDevdSocket().\n\n def _handle_pydevd_message(self, cmdid, seq, text):\n if self._adapter is not None:\n self._adapter.on_pydevd_event(cmdid, seq, text)\n\n def _handle_pydevd_close(self):\n if self._closed:\n return\n self.close()\n\n def _getpeername(self):\n if self._client is None:\n raise NotImplementedError\n return self._client.getpeername()\n\n def _getsockname(self):\n if self._client is None:\n raise NotImplementedError\n return self._client.getsockname()\n\n # internal methods for VSCodeMessageProcessor\n\n def _handle_vsc_disconnect(self, kill=False):\n if not self._closed:\n self.close()\n if kill and self.killonclose and not self._exiting_via_atexit_handler:\n os.kill(os.getpid(), signal.SIGTERM)\n\n def _handle_vsc_close(self):\n if self._closed:\n return\n self.close()\n", "path": "ptvsd/daemon.py"}]} | 2,922 | 106 |
gh_patches_debug_24482 | rasdani/github-patches | git_diff | sunpy__sunpy-3515 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Error in documentation for "Finding bright regions with ndimage" example.
<!-- This comments are hidden when you submit the issue so you do not need to remove them!
Please be sure to check out our contributing guidelines: https://github.com/sunpy/sunpy/blob/master/CONTRIBUTING.rst
Please be sure to check out our code of conduct:
https://github.com/sunpy/sunpy/blob/master/CODE_OF_CONDUCT.rst -->
<!-- Please have a search on our GitHub repository to see if a similar issue has already been posted.
If a similar issue is closed, have a quick look to see if you are satisfied by the resolution.
If not please go ahead and open an issue! -->
### Description
<!-- Provide a general description of the bug. -->
There seems to be an error in the documentation for the "Finding bright regions with ndimage" example.
In the part where a mask is made, the surrounding text states: " We choose the criterion that the data should be at least 5% of the maximum value. " However, if you look at the code immediately below, the threshold is based off 10% the max value:
`mask = aiamap.data < aiamap.max() * 0.10`
### Expected behavior
<!-- What did you expect to happen. -->
Documentation needs to be modified to reflect that the threshold is based off a 10% threshold.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/map/image_bright_regions_gallery_example.py`
Content:
```
1 # coding: utf-8
2 """
3 ===================================
4 Finding bright regions with ndimage
5 ===================================
6
7 How you can to find the brightest regions in an AIA image and
8 count the approximate number of regions of interest using ndimage.
9 """
10 # sphinx_gallery_thumbnail_number = 2
11
12 from scipy import ndimage
13 import matplotlib.pyplot as plt
14
15 import sunpy.map
16 from sunpy.data.sample import AIA_193_IMAGE
17
18 ###############################################################################
19 # We start with the sample data
20 aiamap_mask = sunpy.map.Map(AIA_193_IMAGE)
21 aiamap = sunpy.map.Map(AIA_193_IMAGE)
22
23 ##############################################################################
24 # First we make a mask, which tells us which regions are bright. We
25 # choose the criterion that the data should be at least 5% of the maximum
26 # value. Pixels with intensity values greater than this are included in the
27 # mask, while all other pixels are excluded.
28 mask = aiamap.data < aiamap.max() * 0.10
29
30 ##############################################################################
31 # Mask is a `boolean` array. It can be used to modify the original map object
32 # without modifying the data. Once this mask attribute is set, we can plot the
33 # image again.
34 aiamap_mask.mask = mask
35 plt.figure()
36 aiamap.plot()
37 plt.colorbar()
38 plt.show()
39
40 ##############################################################################
41 # Only the brightest pixels remain in the image.
42 # However, these areas are artificially broken up into small regions.
43 # We can solve this by applying some smoothing to the image data.
44 # Here we apply a 2D Gaussian smoothing function to the data.
45 data2 = ndimage.gaussian_filter(aiamap.data * ~mask, 14)
46
47 ##############################################################################
48 # The issue with the filtering is that it create pixels where the values are
49 # small (<100), so when we go on later to label this array,
50 # we get one large region which encompasses the entire array.
51 # If you want to see, just remove this line.
52 data2[data2 < 100] = 0
53
54 ##############################################################################
55 # Now we will make a second SunPy map with this smoothed data.
56 aiamap2 = sunpy.map.Map(data2, aiamap.meta)
57
58 ##############################################################################
59 # The function `label` from the `scipy.ndimage` module, counts the number of
60 # contiguous regions in an image.
61 labels, n = ndimage.label(aiamap2.data)
62
63 ##############################################################################
64 # Finally, we plot the smoothed bright image data, along with the estimate of
65 # the number of distinct regions. We can see that approximately 6 distinct hot
66 # regions are present above the 5% of the maximum level.
67 plt.figure()
68 ax = plt.subplot(projection=aiamap)
69 aiamap.plot()
70 plt.contour(labels)
71 plt.figtext(0.3, 0.2, f'Number of regions = {n}', color='white')
72 plt.show()
73
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/map/image_bright_regions_gallery_example.py b/examples/map/image_bright_regions_gallery_example.py
--- a/examples/map/image_bright_regions_gallery_example.py
+++ b/examples/map/image_bright_regions_gallery_example.py
@@ -22,7 +22,7 @@
##############################################################################
# First we make a mask, which tells us which regions are bright. We
-# choose the criterion that the data should be at least 5% of the maximum
+# choose the criterion that the data should be at least 10% of the maximum
# value. Pixels with intensity values greater than this are included in the
# mask, while all other pixels are excluded.
mask = aiamap.data < aiamap.max() * 0.10
@@ -63,7 +63,7 @@
##############################################################################
# Finally, we plot the smoothed bright image data, along with the estimate of
# the number of distinct regions. We can see that approximately 6 distinct hot
-# regions are present above the 5% of the maximum level.
+# regions are present above the 10% of the maximum level.
plt.figure()
ax = plt.subplot(projection=aiamap)
aiamap.plot()
| {"golden_diff": "diff --git a/examples/map/image_bright_regions_gallery_example.py b/examples/map/image_bright_regions_gallery_example.py\n--- a/examples/map/image_bright_regions_gallery_example.py\n+++ b/examples/map/image_bright_regions_gallery_example.py\n@@ -22,7 +22,7 @@\n \n ##############################################################################\n # First we make a mask, which tells us which regions are bright. We\n-# choose the criterion that the data should be at least 5% of the maximum\n+# choose the criterion that the data should be at least 10% of the maximum\n # value. Pixels with intensity values greater than this are included in the\n # mask, while all other pixels are excluded.\n mask = aiamap.data < aiamap.max() * 0.10\n@@ -63,7 +63,7 @@\n ##############################################################################\n # Finally, we plot the smoothed bright image data, along with the estimate of\n # the number of distinct regions. We can see that approximately 6 distinct hot\n-# regions are present above the 5% of the maximum level.\n+# regions are present above the 10% of the maximum level.\n plt.figure()\n ax = plt.subplot(projection=aiamap)\n aiamap.plot()\n", "issue": "Error in documentation for \"Finding bright regions with ndimage\" example.\n<!-- This comments are hidden when you submit the issue so you do not need to remove them!\r\nPlease be sure to check out our contributing guidelines: https://github.com/sunpy/sunpy/blob/master/CONTRIBUTING.rst\r\nPlease be sure to check out our code of conduct:\r\nhttps://github.com/sunpy/sunpy/blob/master/CODE_OF_CONDUCT.rst -->\r\n\r\n<!-- Please have a search on our GitHub repository to see if a similar issue has already been posted.\r\nIf a similar issue is closed, have a quick look to see if you are satisfied by the resolution.\r\nIf not please go ahead and open an issue! -->\r\n\r\n### Description\r\n<!-- Provide a general description of the bug. -->\r\nThere seems to be an error in the documentation for the \"Finding bright regions with ndimage\" example.\r\n\r\nIn the part where a mask is made, the surrounding text states: \" We choose the criterion that the data should be at least 5% of the maximum value. \" However, if you look at the code immediately below, the threshold is based off 10% the max value:\r\n`mask = aiamap.data < aiamap.max() * 0.10`\r\n\r\n### Expected behavior\r\n<!-- What did you expect to happen. -->\r\n\r\nDocumentation needs to be modified to reflect that the threshold is based off a 10% threshold. \n", "before_files": [{"content": "# coding: utf-8\n\"\"\"\n===================================\nFinding bright regions with ndimage\n===================================\n\nHow you can to find the brightest regions in an AIA image and\ncount the approximate number of regions of interest using ndimage.\n\"\"\"\n# sphinx_gallery_thumbnail_number = 2\n\nfrom scipy import ndimage\nimport matplotlib.pyplot as plt\n\nimport sunpy.map\nfrom sunpy.data.sample import AIA_193_IMAGE\n\n###############################################################################\n# We start with the sample data\naiamap_mask = sunpy.map.Map(AIA_193_IMAGE)\naiamap = sunpy.map.Map(AIA_193_IMAGE)\n\n##############################################################################\n# First we make a mask, which tells us which regions are bright. We\n# choose the criterion that the data should be at least 5% of the maximum\n# value. Pixels with intensity values greater than this are included in the\n# mask, while all other pixels are excluded.\nmask = aiamap.data < aiamap.max() * 0.10\n\n##############################################################################\n# Mask is a `boolean` array. It can be used to modify the original map object\n# without modifying the data. Once this mask attribute is set, we can plot the\n# image again.\naiamap_mask.mask = mask\nplt.figure()\naiamap.plot()\nplt.colorbar()\nplt.show()\n\n##############################################################################\n# Only the brightest pixels remain in the image.\n# However, these areas are artificially broken up into small regions.\n# We can solve this by applying some smoothing to the image data.\n# Here we apply a 2D Gaussian smoothing function to the data.\ndata2 = ndimage.gaussian_filter(aiamap.data * ~mask, 14)\n\n##############################################################################\n# The issue with the filtering is that it create pixels where the values are\n# small (<100), so when we go on later to label this array,\n# we get one large region which encompasses the entire array.\n# If you want to see, just remove this line.\ndata2[data2 < 100] = 0\n\n##############################################################################\n# Now we will make a second SunPy map with this smoothed data.\naiamap2 = sunpy.map.Map(data2, aiamap.meta)\n\n##############################################################################\n# The function `label` from the `scipy.ndimage` module, counts the number of\n# contiguous regions in an image.\nlabels, n = ndimage.label(aiamap2.data)\n\n##############################################################################\n# Finally, we plot the smoothed bright image data, along with the estimate of\n# the number of distinct regions. We can see that approximately 6 distinct hot\n# regions are present above the 5% of the maximum level.\nplt.figure()\nax = plt.subplot(projection=aiamap)\naiamap.plot()\nplt.contour(labels)\nplt.figtext(0.3, 0.2, f'Number of regions = {n}', color='white')\nplt.show()\n", "path": "examples/map/image_bright_regions_gallery_example.py"}], "after_files": [{"content": "# coding: utf-8\n\"\"\"\n===================================\nFinding bright regions with ndimage\n===================================\n\nHow you can to find the brightest regions in an AIA image and\ncount the approximate number of regions of interest using ndimage.\n\"\"\"\n# sphinx_gallery_thumbnail_number = 2\n\nfrom scipy import ndimage\nimport matplotlib.pyplot as plt\n\nimport sunpy.map\nfrom sunpy.data.sample import AIA_193_IMAGE\n\n###############################################################################\n# We start with the sample data\naiamap_mask = sunpy.map.Map(AIA_193_IMAGE)\naiamap = sunpy.map.Map(AIA_193_IMAGE)\n\n##############################################################################\n# First we make a mask, which tells us which regions are bright. We\n# choose the criterion that the data should be at least 10% of the maximum\n# value. Pixels with intensity values greater than this are included in the\n# mask, while all other pixels are excluded.\nmask = aiamap.data < aiamap.max() * 0.10\n\n##############################################################################\n# Mask is a `boolean` array. It can be used to modify the original map object\n# without modifying the data. Once this mask attribute is set, we can plot the\n# image again.\naiamap_mask.mask = mask\nplt.figure()\naiamap.plot()\nplt.colorbar()\nplt.show()\n\n##############################################################################\n# Only the brightest pixels remain in the image.\n# However, these areas are artificially broken up into small regions.\n# We can solve this by applying some smoothing to the image data.\n# Here we apply a 2D Gaussian smoothing function to the data.\ndata2 = ndimage.gaussian_filter(aiamap.data * ~mask, 14)\n\n##############################################################################\n# The issue with the filtering is that it create pixels where the values are\n# small (<100), so when we go on later to label this array,\n# we get one large region which encompasses the entire array.\n# If you want to see, just remove this line.\ndata2[data2 < 100] = 0\n\n##############################################################################\n# Now we will make a second SunPy map with this smoothed data.\naiamap2 = sunpy.map.Map(data2, aiamap.meta)\n\n##############################################################################\n# The function `label` from the `scipy.ndimage` module, counts the number of\n# contiguous regions in an image.\nlabels, n = ndimage.label(aiamap2.data)\n\n##############################################################################\n# Finally, we plot the smoothed bright image data, along with the estimate of\n# the number of distinct regions. We can see that approximately 6 distinct hot\n# regions are present above the 10% of the maximum level.\nplt.figure()\nax = plt.subplot(projection=aiamap)\naiamap.plot()\nplt.contour(labels)\nplt.figtext(0.3, 0.2, f'Number of regions = {n}', color='white')\nplt.show()\n", "path": "examples/map/image_bright_regions_gallery_example.py"}]} | 1,311 | 259 |
gh_patches_debug_38902 | rasdani/github-patches | git_diff | pypi__warehouse-3352 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Unnecessary purges on `User` model change
Currently right now any time the `User` model changes, we purge all the cache keys for that user's project.
This includes attribute changes that don't actually affect the project pages, like `last_login`, `password` etc.
We should filter out "purge-able" attribute changes and only issue purges when necessary. Said attributes include:
* `username`
* `name`
* `emails`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `warehouse/packaging/__init__.py`
Content:
```
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 # See the License for the specific language governing permissions and
11 # limitations under the License.
12
13 from celery.schedules import crontab
14
15 from warehouse.accounts.models import User
16 from warehouse.cache.origin import key_factory
17 from warehouse.packaging.interfaces import IFileStorage
18 from warehouse.packaging.models import Project, Release
19 from warehouse.packaging.tasks import compute_trending
20
21
22 def includeme(config):
23 # Register whatever file storage backend has been configured for storing
24 # our package files.
25 storage_class = config.maybe_dotted(
26 config.registry.settings["files.backend"],
27 )
28 config.register_service_factory(storage_class.create_service, IFileStorage)
29
30 # Register our origin cache keys
31 config.register_origin_cache_keys(
32 Project,
33 cache_keys=["project/{obj.normalized_name}"],
34 purge_keys=[
35 key_factory("project/{obj.normalized_name}"),
36 key_factory("user/{itr.username}", iterate_on='users'),
37 key_factory("all-projects"),
38 ],
39 )
40 config.register_origin_cache_keys(
41 Release,
42 cache_keys=["project/{obj.project.normalized_name}"],
43 purge_keys=[
44 key_factory("project/{obj.project.normalized_name}"),
45 key_factory("user/{itr.username}", iterate_on='project.users'),
46 key_factory("all-projects"),
47 ],
48 )
49 config.register_origin_cache_keys(
50 User,
51 cache_keys=["user/{obj.username}"],
52 purge_keys=[
53 key_factory("user/{obj.username}"),
54 key_factory("project/{itr.normalized_name}", iterate_on='projects')
55 ],
56 )
57
58 # Add a periodic task to compute trending once a day, assuming we have
59 # been configured to be able to access BigQuery.
60 if config.get_settings().get("warehouse.trending_table"):
61 config.add_periodic_task(crontab(minute=0, hour=3), compute_trending)
62
```
Path: `warehouse/cache/origin/__init__.py`
Content:
```
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 # See the License for the specific language governing permissions and
11 # limitations under the License.
12
13 import collections
14 import functools
15 import operator
16 from itertools import chain
17
18 from warehouse import db
19 from warehouse.cache.origin.interfaces import IOriginCache
20
21
22 @db.listens_for(db.Session, "after_flush")
23 def store_purge_keys(config, session, flush_context):
24 cache_keys = config.registry["cache_keys"]
25
26 # We'll (ab)use the session.info dictionary to store a list of pending
27 # purges to the session.
28 purges = session.info.setdefault("warehouse.cache.origin.purges", set())
29
30 # Go through each new, changed, and deleted object and attempt to store
31 # a cache key that we'll want to purge when the session has been committed.
32 for obj in (session.new | session.dirty | session.deleted):
33 try:
34 key_maker = cache_keys[obj.__class__]
35 except KeyError:
36 continue
37
38 purges.update(key_maker(obj).purge)
39
40
41 @db.listens_for(db.Session, "after_commit")
42 def execute_purge(config, session):
43 purges = session.info.pop("warehouse.cache.origin.purges", set())
44
45 try:
46 cacher_factory = config.find_service_factory(IOriginCache)
47 except ValueError:
48 return
49
50 cacher = cacher_factory(None, config)
51 cacher.purge(purges)
52
53
54 def origin_cache(seconds, keys=None, stale_while_revalidate=None,
55 stale_if_error=None):
56 if keys is None:
57 keys = []
58
59 def inner(view):
60 @functools.wraps(view)
61 def wrapped(context, request):
62 cache_keys = request.registry["cache_keys"]
63
64 context_keys = []
65 if context.__class__ in cache_keys:
66 context_keys = cache_keys[context.__class__](context).cache
67
68 try:
69 cacher = request.find_service(IOriginCache)
70 except ValueError:
71 pass
72 else:
73 request.add_response_callback(
74 functools.partial(
75 cacher.cache,
76 sorted(context_keys + keys),
77 seconds=seconds,
78 stale_while_revalidate=stale_while_revalidate,
79 stale_if_error=stale_if_error,
80 )
81 )
82
83 return view(context, request)
84 return wrapped
85
86 return inner
87
88
89 CacheKeys = collections.namedtuple("CacheKeys", ["cache", "purge"])
90
91
92 def key_factory(keystring, iterate_on=None):
93
94 def generate_key(obj):
95 if iterate_on:
96 for itr in operator.attrgetter(iterate_on)(obj):
97 yield keystring.format(itr=itr, obj=obj)
98 else:
99 yield keystring.format(obj=obj)
100
101 return generate_key
102
103
104 def key_maker_factory(cache_keys, purge_keys):
105 if cache_keys is None:
106 cache_keys = []
107
108 if purge_keys is None:
109 purge_keys = []
110
111 def key_maker(obj):
112 return CacheKeys(
113 # Note: this does not support setting the `cache` argument via
114 # multiple `key_factories` as we do with `purge` because there is
115 # a limit to how many surrogate keys we can attach to a single HTTP
116 # response, and being able to use use `iterate_on` would allow this
117 # size to be unbounded.
118 # ref: https://github.com/pypa/warehouse/pull/3189
119 cache=[k.format(obj=obj) for k in cache_keys],
120 purge=chain.from_iterable(key(obj) for key in purge_keys),
121 )
122
123 return key_maker
124
125
126 def register_origin_cache_keys(config, klass, cache_keys=None,
127 purge_keys=None):
128 key_makers = config.registry.setdefault("cache_keys", {})
129 key_makers[klass] = key_maker_factory(
130 cache_keys=cache_keys,
131 purge_keys=purge_keys,
132 )
133
134
135 def includeme(config):
136 if "origin_cache.backend" in config.registry.settings:
137 cache_class = config.maybe_dotted(
138 config.registry.settings["origin_cache.backend"],
139 )
140 config.register_service_factory(
141 cache_class.create_service,
142 IOriginCache,
143 )
144
145 config.add_directive(
146 "register_origin_cache_keys",
147 register_origin_cache_keys,
148 )
149
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/warehouse/cache/origin/__init__.py b/warehouse/cache/origin/__init__.py
--- a/warehouse/cache/origin/__init__.py
+++ b/warehouse/cache/origin/__init__.py
@@ -15,6 +15,8 @@
import operator
from itertools import chain
+from sqlalchemy.orm.session import Session
+
from warehouse import db
from warehouse.cache.origin.interfaces import IOriginCache
@@ -132,6 +134,19 @@
)
+def receive_set(attribute, config, target):
+ cache_keys = config.registry["cache_keys"]
+ session = Session.object_session(target)
+ if session:
+ purges = session.info.setdefault(
+ "warehouse.cache.origin.purges",
+ set()
+ )
+ key_maker = cache_keys[attribute]
+ keys = key_maker(target).purge
+ purges.update(list(keys))
+
+
def includeme(config):
if "origin_cache.backend" in config.registry.settings:
cache_class = config.maybe_dotted(
diff --git a/warehouse/packaging/__init__.py b/warehouse/packaging/__init__.py
--- a/warehouse/packaging/__init__.py
+++ b/warehouse/packaging/__init__.py
@@ -11,14 +11,25 @@
# limitations under the License.
from celery.schedules import crontab
+from warehouse import db
-from warehouse.accounts.models import User
-from warehouse.cache.origin import key_factory
+from warehouse.accounts.models import User, Email
+from warehouse.cache.origin import key_factory, receive_set
from warehouse.packaging.interfaces import IFileStorage
from warehouse.packaging.models import Project, Release
from warehouse.packaging.tasks import compute_trending
[email protected]_for(User.name, 'set')
+def user_name_receive_set(config, target, value, oldvalue, initiator):
+ receive_set(User.name, config, target)
+
+
[email protected]_for(Email.primary, 'set')
+def email_primary_receive_set(config, target, value, oldvalue, initiator):
+ receive_set(Email.primary, config, target)
+
+
def includeme(config):
# Register whatever file storage backend has been configured for storing
# our package files.
@@ -49,11 +60,24 @@
config.register_origin_cache_keys(
User,
cache_keys=["user/{obj.username}"],
+ )
+ config.register_origin_cache_keys(
+ User.name,
purge_keys=[
key_factory("user/{obj.username}"),
key_factory("project/{itr.normalized_name}", iterate_on='projects')
],
)
+ config.register_origin_cache_keys(
+ Email.primary,
+ purge_keys=[
+ key_factory("user/{obj.user.username}"),
+ key_factory(
+ "project/{itr.normalized_name}",
+ iterate_on='user.projects',
+ )
+ ],
+ )
# Add a periodic task to compute trending once a day, assuming we have
# been configured to be able to access BigQuery.
| {"golden_diff": "diff --git a/warehouse/cache/origin/__init__.py b/warehouse/cache/origin/__init__.py\n--- a/warehouse/cache/origin/__init__.py\n+++ b/warehouse/cache/origin/__init__.py\n@@ -15,6 +15,8 @@\n import operator\n from itertools import chain\n \n+from sqlalchemy.orm.session import Session\n+\n from warehouse import db\n from warehouse.cache.origin.interfaces import IOriginCache\n \n@@ -132,6 +134,19 @@\n )\n \n \n+def receive_set(attribute, config, target):\n+ cache_keys = config.registry[\"cache_keys\"]\n+ session = Session.object_session(target)\n+ if session:\n+ purges = session.info.setdefault(\n+ \"warehouse.cache.origin.purges\",\n+ set()\n+ )\n+ key_maker = cache_keys[attribute]\n+ keys = key_maker(target).purge\n+ purges.update(list(keys))\n+\n+\n def includeme(config):\n if \"origin_cache.backend\" in config.registry.settings:\n cache_class = config.maybe_dotted(\ndiff --git a/warehouse/packaging/__init__.py b/warehouse/packaging/__init__.py\n--- a/warehouse/packaging/__init__.py\n+++ b/warehouse/packaging/__init__.py\n@@ -11,14 +11,25 @@\n # limitations under the License.\n \n from celery.schedules import crontab\n+from warehouse import db\n \n-from warehouse.accounts.models import User\n-from warehouse.cache.origin import key_factory\n+from warehouse.accounts.models import User, Email\n+from warehouse.cache.origin import key_factory, receive_set\n from warehouse.packaging.interfaces import IFileStorage\n from warehouse.packaging.models import Project, Release\n from warehouse.packaging.tasks import compute_trending\n \n \[email protected]_for(User.name, 'set')\n+def user_name_receive_set(config, target, value, oldvalue, initiator):\n+ receive_set(User.name, config, target)\n+\n+\[email protected]_for(Email.primary, 'set')\n+def email_primary_receive_set(config, target, value, oldvalue, initiator):\n+ receive_set(Email.primary, config, target)\n+\n+\n def includeme(config):\n # Register whatever file storage backend has been configured for storing\n # our package files.\n@@ -49,11 +60,24 @@\n config.register_origin_cache_keys(\n User,\n cache_keys=[\"user/{obj.username}\"],\n+ )\n+ config.register_origin_cache_keys(\n+ User.name,\n purge_keys=[\n key_factory(\"user/{obj.username}\"),\n key_factory(\"project/{itr.normalized_name}\", iterate_on='projects')\n ],\n )\n+ config.register_origin_cache_keys(\n+ Email.primary,\n+ purge_keys=[\n+ key_factory(\"user/{obj.user.username}\"),\n+ key_factory(\n+ \"project/{itr.normalized_name}\",\n+ iterate_on='user.projects',\n+ )\n+ ],\n+ )\n \n # Add a periodic task to compute trending once a day, assuming we have\n # been configured to be able to access BigQuery.\n", "issue": "Unnecessary purges on `User` model change\nCurrently right now any time the `User` model changes, we purge all the cache keys for that user's project.\r\n\r\nThis includes attribute changes that don't actually affect the project pages, like `last_login`, `password` etc.\r\n\r\nWe should filter out \"purge-able\" attribute changes and only issue purges when necessary. Said attributes include:\r\n* `username`\r\n* `name`\r\n* `emails`\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom celery.schedules import crontab\n\nfrom warehouse.accounts.models import User\nfrom warehouse.cache.origin import key_factory\nfrom warehouse.packaging.interfaces import IFileStorage\nfrom warehouse.packaging.models import Project, Release\nfrom warehouse.packaging.tasks import compute_trending\n\n\ndef includeme(config):\n # Register whatever file storage backend has been configured for storing\n # our package files.\n storage_class = config.maybe_dotted(\n config.registry.settings[\"files.backend\"],\n )\n config.register_service_factory(storage_class.create_service, IFileStorage)\n\n # Register our origin cache keys\n config.register_origin_cache_keys(\n Project,\n cache_keys=[\"project/{obj.normalized_name}\"],\n purge_keys=[\n key_factory(\"project/{obj.normalized_name}\"),\n key_factory(\"user/{itr.username}\", iterate_on='users'),\n key_factory(\"all-projects\"),\n ],\n )\n config.register_origin_cache_keys(\n Release,\n cache_keys=[\"project/{obj.project.normalized_name}\"],\n purge_keys=[\n key_factory(\"project/{obj.project.normalized_name}\"),\n key_factory(\"user/{itr.username}\", iterate_on='project.users'),\n key_factory(\"all-projects\"),\n ],\n )\n config.register_origin_cache_keys(\n User,\n cache_keys=[\"user/{obj.username}\"],\n purge_keys=[\n key_factory(\"user/{obj.username}\"),\n key_factory(\"project/{itr.normalized_name}\", iterate_on='projects')\n ],\n )\n\n # Add a periodic task to compute trending once a day, assuming we have\n # been configured to be able to access BigQuery.\n if config.get_settings().get(\"warehouse.trending_table\"):\n config.add_periodic_task(crontab(minute=0, hour=3), compute_trending)\n", "path": "warehouse/packaging/__init__.py"}, {"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport collections\nimport functools\nimport operator\nfrom itertools import chain\n\nfrom warehouse import db\nfrom warehouse.cache.origin.interfaces import IOriginCache\n\n\[email protected]_for(db.Session, \"after_flush\")\ndef store_purge_keys(config, session, flush_context):\n cache_keys = config.registry[\"cache_keys\"]\n\n # We'll (ab)use the session.info dictionary to store a list of pending\n # purges to the session.\n purges = session.info.setdefault(\"warehouse.cache.origin.purges\", set())\n\n # Go through each new, changed, and deleted object and attempt to store\n # a cache key that we'll want to purge when the session has been committed.\n for obj in (session.new | session.dirty | session.deleted):\n try:\n key_maker = cache_keys[obj.__class__]\n except KeyError:\n continue\n\n purges.update(key_maker(obj).purge)\n\n\[email protected]_for(db.Session, \"after_commit\")\ndef execute_purge(config, session):\n purges = session.info.pop(\"warehouse.cache.origin.purges\", set())\n\n try:\n cacher_factory = config.find_service_factory(IOriginCache)\n except ValueError:\n return\n\n cacher = cacher_factory(None, config)\n cacher.purge(purges)\n\n\ndef origin_cache(seconds, keys=None, stale_while_revalidate=None,\n stale_if_error=None):\n if keys is None:\n keys = []\n\n def inner(view):\n @functools.wraps(view)\n def wrapped(context, request):\n cache_keys = request.registry[\"cache_keys\"]\n\n context_keys = []\n if context.__class__ in cache_keys:\n context_keys = cache_keys[context.__class__](context).cache\n\n try:\n cacher = request.find_service(IOriginCache)\n except ValueError:\n pass\n else:\n request.add_response_callback(\n functools.partial(\n cacher.cache,\n sorted(context_keys + keys),\n seconds=seconds,\n stale_while_revalidate=stale_while_revalidate,\n stale_if_error=stale_if_error,\n )\n )\n\n return view(context, request)\n return wrapped\n\n return inner\n\n\nCacheKeys = collections.namedtuple(\"CacheKeys\", [\"cache\", \"purge\"])\n\n\ndef key_factory(keystring, iterate_on=None):\n\n def generate_key(obj):\n if iterate_on:\n for itr in operator.attrgetter(iterate_on)(obj):\n yield keystring.format(itr=itr, obj=obj)\n else:\n yield keystring.format(obj=obj)\n\n return generate_key\n\n\ndef key_maker_factory(cache_keys, purge_keys):\n if cache_keys is None:\n cache_keys = []\n\n if purge_keys is None:\n purge_keys = []\n\n def key_maker(obj):\n return CacheKeys(\n # Note: this does not support setting the `cache` argument via\n # multiple `key_factories` as we do with `purge` because there is\n # a limit to how many surrogate keys we can attach to a single HTTP\n # response, and being able to use use `iterate_on` would allow this\n # size to be unbounded.\n # ref: https://github.com/pypa/warehouse/pull/3189\n cache=[k.format(obj=obj) for k in cache_keys],\n purge=chain.from_iterable(key(obj) for key in purge_keys),\n )\n\n return key_maker\n\n\ndef register_origin_cache_keys(config, klass, cache_keys=None,\n purge_keys=None):\n key_makers = config.registry.setdefault(\"cache_keys\", {})\n key_makers[klass] = key_maker_factory(\n cache_keys=cache_keys,\n purge_keys=purge_keys,\n )\n\n\ndef includeme(config):\n if \"origin_cache.backend\" in config.registry.settings:\n cache_class = config.maybe_dotted(\n config.registry.settings[\"origin_cache.backend\"],\n )\n config.register_service_factory(\n cache_class.create_service,\n IOriginCache,\n )\n\n config.add_directive(\n \"register_origin_cache_keys\",\n register_origin_cache_keys,\n )\n", "path": "warehouse/cache/origin/__init__.py"}], "after_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom celery.schedules import crontab\nfrom warehouse import db\n\nfrom warehouse.accounts.models import User, Email\nfrom warehouse.cache.origin import key_factory, receive_set\nfrom warehouse.packaging.interfaces import IFileStorage\nfrom warehouse.packaging.models import Project, Release\nfrom warehouse.packaging.tasks import compute_trending\n\n\[email protected]_for(User.name, 'set')\ndef user_name_receive_set(config, target, value, oldvalue, initiator):\n receive_set(User.name, config, target)\n\n\[email protected]_for(Email.primary, 'set')\ndef email_primary_receive_set(config, target, value, oldvalue, initiator):\n receive_set(Email.primary, config, target)\n\n\ndef includeme(config):\n # Register whatever file storage backend has been configured for storing\n # our package files.\n storage_class = config.maybe_dotted(\n config.registry.settings[\"files.backend\"],\n )\n config.register_service_factory(storage_class.create_service, IFileStorage)\n\n # Register our origin cache keys\n config.register_origin_cache_keys(\n Project,\n cache_keys=[\"project/{obj.normalized_name}\"],\n purge_keys=[\n key_factory(\"project/{obj.normalized_name}\"),\n key_factory(\"user/{itr.username}\", iterate_on='users'),\n key_factory(\"all-projects\"),\n ],\n )\n config.register_origin_cache_keys(\n Release,\n cache_keys=[\"project/{obj.project.normalized_name}\"],\n purge_keys=[\n key_factory(\"project/{obj.project.normalized_name}\"),\n key_factory(\"user/{itr.username}\", iterate_on='project.users'),\n key_factory(\"all-projects\"),\n ],\n )\n config.register_origin_cache_keys(\n User,\n cache_keys=[\"user/{obj.username}\"],\n )\n config.register_origin_cache_keys(\n User.name,\n purge_keys=[\n key_factory(\"user/{obj.username}\"),\n key_factory(\"project/{itr.normalized_name}\", iterate_on='projects')\n ],\n )\n config.register_origin_cache_keys(\n Email.primary,\n purge_keys=[\n key_factory(\"user/{obj.user.username}\"),\n key_factory(\n \"project/{itr.normalized_name}\",\n iterate_on='user.projects',\n )\n ],\n )\n\n # Add a periodic task to compute trending once a day, assuming we have\n # been configured to be able to access BigQuery.\n if config.get_settings().get(\"warehouse.trending_table\"):\n config.add_periodic_task(crontab(minute=0, hour=3), compute_trending)\n", "path": "warehouse/packaging/__init__.py"}, {"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport collections\nimport functools\nimport operator\nfrom itertools import chain\n\nfrom sqlalchemy.orm.session import Session\n\nfrom warehouse import db\nfrom warehouse.cache.origin.interfaces import IOriginCache\n\n\[email protected]_for(db.Session, \"after_flush\")\ndef store_purge_keys(config, session, flush_context):\n cache_keys = config.registry[\"cache_keys\"]\n\n # We'll (ab)use the session.info dictionary to store a list of pending\n # purges to the session.\n purges = session.info.setdefault(\"warehouse.cache.origin.purges\", set())\n\n # Go through each new, changed, and deleted object and attempt to store\n # a cache key that we'll want to purge when the session has been committed.\n for obj in (session.new | session.dirty | session.deleted):\n try:\n key_maker = cache_keys[obj.__class__]\n except KeyError:\n continue\n\n purges.update(key_maker(obj).purge)\n\n\[email protected]_for(db.Session, \"after_commit\")\ndef execute_purge(config, session):\n purges = session.info.pop(\"warehouse.cache.origin.purges\", set())\n\n try:\n cacher_factory = config.find_service_factory(IOriginCache)\n except ValueError:\n return\n\n cacher = cacher_factory(None, config)\n cacher.purge(purges)\n\n\ndef origin_cache(seconds, keys=None, stale_while_revalidate=None,\n stale_if_error=None):\n if keys is None:\n keys = []\n\n def inner(view):\n @functools.wraps(view)\n def wrapped(context, request):\n cache_keys = request.registry[\"cache_keys\"]\n\n context_keys = []\n if context.__class__ in cache_keys:\n context_keys = cache_keys[context.__class__](context).cache\n\n try:\n cacher = request.find_service(IOriginCache)\n except ValueError:\n pass\n else:\n request.add_response_callback(\n functools.partial(\n cacher.cache,\n sorted(context_keys + keys),\n seconds=seconds,\n stale_while_revalidate=stale_while_revalidate,\n stale_if_error=stale_if_error,\n )\n )\n\n return view(context, request)\n return wrapped\n\n return inner\n\n\nCacheKeys = collections.namedtuple(\"CacheKeys\", [\"cache\", \"purge\"])\n\n\ndef key_factory(keystring, iterate_on=None):\n\n def generate_key(obj):\n if iterate_on:\n for itr in operator.attrgetter(iterate_on)(obj):\n yield keystring.format(itr=itr, obj=obj)\n else:\n yield keystring.format(obj=obj)\n\n return generate_key\n\n\ndef key_maker_factory(cache_keys, purge_keys):\n if cache_keys is None:\n cache_keys = []\n\n if purge_keys is None:\n purge_keys = []\n\n def key_maker(obj):\n return CacheKeys(\n # Note: this does not support setting the `cache` argument via\n # multiple `key_factories` as we do with `purge` because there is\n # a limit to how many surrogate keys we can attach to a single HTTP\n # response, and being able to use use `iterate_on` would allow this\n # size to be unbounded.\n # ref: https://github.com/pypa/warehouse/pull/3189\n cache=[k.format(obj=obj) for k in cache_keys],\n purge=chain.from_iterable(key(obj) for key in purge_keys),\n )\n\n return key_maker\n\n\ndef register_origin_cache_keys(config, klass, cache_keys=None,\n purge_keys=None):\n key_makers = config.registry.setdefault(\"cache_keys\", {})\n key_makers[klass] = key_maker_factory(\n cache_keys=cache_keys,\n purge_keys=purge_keys,\n )\n\n\ndef receive_set(attribute, config, target):\n cache_keys = config.registry[\"cache_keys\"]\n session = Session.object_session(target)\n if session:\n purges = session.info.setdefault(\n \"warehouse.cache.origin.purges\",\n set()\n )\n key_maker = cache_keys[attribute]\n keys = key_maker(target).purge\n purges.update(list(keys))\n\n\ndef includeme(config):\n if \"origin_cache.backend\" in config.registry.settings:\n cache_class = config.maybe_dotted(\n config.registry.settings[\"origin_cache.backend\"],\n )\n config.register_service_factory(\n cache_class.create_service,\n IOriginCache,\n )\n\n config.add_directive(\n \"register_origin_cache_keys\",\n register_origin_cache_keys,\n )\n", "path": "warehouse/cache/origin/__init__.py"}]} | 2,319 | 665 |
gh_patches_debug_7546 | rasdani/github-patches | git_diff | akvo__akvo-rsr-1594 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add content_owner to organisation REST API filters
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `akvo/rest/views/organisation.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # Akvo RSR is covered by the GNU Affero General Public License.
4 # See more details in the license.txt file located at the root folder of the Akvo RSR module.
5 # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
6
7 from django.conf import settings
8
9 from rest_framework.compat import etree, six
10 from rest_framework.exceptions import ParseError
11 from rest_framework.parsers import XMLParser, JSONParser
12
13 from akvo.rsr.models import Organisation, Country
14
15 from ..serializers import OrganisationSerializer
16 from ..viewsets import BaseRSRViewSet
17
18
19 class AkvoOrganisationParser(XMLParser):
20 def parse(self, stream, media_type=None, parser_context=None):
21 assert etree, 'XMLParser requires defusedxml to be installed'
22
23 parser_context = parser_context or {}
24 encoding = parser_context.get('encoding', settings.DEFAULT_CHARSET)
25 parser = etree.DefusedXMLParser(encoding=encoding)
26 try:
27 tree = etree.parse(stream, parser=parser, forbid_dtd=True)
28 except (etree.ParseError, ValueError) as exc:
29 raise ParseError('XML parse error - %s' % six.text_type(exc))
30 return self.organisation_data_from_etree(tree.getroot())
31
32 def organisation_data_from_etree(self, tree):
33 def find_text(tree, str):
34 element = tree.find(str)
35 if element is None:
36 return ''
37 return element.text.strip() if element.text else ""
38
39 def location_data(location_tree):
40 if location_tree is None:
41 return []
42 iso_code = find_text(location_tree, 'iso_code').lower()
43 country, created = Country.objects.get_or_create(**Country.fields_from_iso_code(iso_code))
44 country = country.id
45 latitude = find_text(location_tree, 'latitude') or 0
46 longitude = find_text(location_tree, 'longitude') or 0
47 primary = True
48 return [dict(latitude=latitude, longitude=longitude, country=country, primary=primary)]
49
50 #id = find_text(tree, 'org_id')
51 long_name = find_text(tree, 'name')
52 name = long_name[:25]
53 description = find_text(tree, 'description')
54 url = find_text(tree, 'url')
55 iati_type = find_text(tree, 'iati_organisation_type')
56 new_organisation_type = int(iati_type) if iati_type else 22
57 organisation_type = Organisation.org_type_from_iati_type(new_organisation_type)
58 locations = location_data(tree.find('location/object'))
59 return dict(
60 name=name, long_name=long_name, description=description, url=url,
61 organisation_type=organisation_type, new_organisation_type=new_organisation_type,
62 locations=locations
63 )
64
65
66 class OrganisationViewSet(BaseRSRViewSet):
67 """
68 API endpoint that allows organisations to be viewed or edited.
69 """
70 queryset = Organisation.objects.all()
71 serializer_class = OrganisationSerializer
72 parser_classes = (AkvoOrganisationParser, JSONParser,)
73 filter_fields = ('name', 'long_name', 'iati_org_id', )
74
75 def get_queryset(self):
76 """ Enable filtering of Organisations on iati_org_id or name
77 """
78 queryset = super(OrganisationViewSet, self).get_queryset()
79 pk = self.request.QUERY_PARAMS.get('id', None)
80 if pk is not None:
81 try:
82 queryset = queryset.filter(pk=pk)
83 except ValueError:
84 pass
85 iati_org_id = self.request.QUERY_PARAMS.get('iati_org_id', None)
86 if iati_org_id is not None:
87 queryset = queryset.filter(iati_org_id=iati_org_id)
88 name = self.request.QUERY_PARAMS.get('name', None)
89 if name is not None:
90 queryset = queryset.filter(name=name)
91 long_name = self.request.QUERY_PARAMS.get('long_name', None)
92 if long_name is not None:
93 queryset = queryset.filter(long_name=long_name)
94 return queryset
95
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/akvo/rest/views/organisation.py b/akvo/rest/views/organisation.py
--- a/akvo/rest/views/organisation.py
+++ b/akvo/rest/views/organisation.py
@@ -70,7 +70,7 @@
queryset = Organisation.objects.all()
serializer_class = OrganisationSerializer
parser_classes = (AkvoOrganisationParser, JSONParser,)
- filter_fields = ('name', 'long_name', 'iati_org_id', )
+ filter_fields = ('name', 'long_name', 'iati_org_id', 'content_owner')
def get_queryset(self):
""" Enable filtering of Organisations on iati_org_id or name
| {"golden_diff": "diff --git a/akvo/rest/views/organisation.py b/akvo/rest/views/organisation.py\n--- a/akvo/rest/views/organisation.py\n+++ b/akvo/rest/views/organisation.py\n@@ -70,7 +70,7 @@\n queryset = Organisation.objects.all()\n serializer_class = OrganisationSerializer\n parser_classes = (AkvoOrganisationParser, JSONParser,)\n- filter_fields = ('name', 'long_name', 'iati_org_id', )\n+ filter_fields = ('name', 'long_name', 'iati_org_id', 'content_owner')\n \n def get_queryset(self):\n \"\"\" Enable filtering of Organisations on iati_org_id or name\n", "issue": "Add content_owner to organisation REST API filters\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\nfrom django.conf import settings\n\nfrom rest_framework.compat import etree, six\nfrom rest_framework.exceptions import ParseError\nfrom rest_framework.parsers import XMLParser, JSONParser\n\nfrom akvo.rsr.models import Organisation, Country\n\nfrom ..serializers import OrganisationSerializer\nfrom ..viewsets import BaseRSRViewSet\n\n\nclass AkvoOrganisationParser(XMLParser):\n def parse(self, stream, media_type=None, parser_context=None):\n assert etree, 'XMLParser requires defusedxml to be installed'\n\n parser_context = parser_context or {}\n encoding = parser_context.get('encoding', settings.DEFAULT_CHARSET)\n parser = etree.DefusedXMLParser(encoding=encoding)\n try:\n tree = etree.parse(stream, parser=parser, forbid_dtd=True)\n except (etree.ParseError, ValueError) as exc:\n raise ParseError('XML parse error - %s' % six.text_type(exc))\n return self.organisation_data_from_etree(tree.getroot())\n\n def organisation_data_from_etree(self, tree):\n def find_text(tree, str):\n element = tree.find(str)\n if element is None:\n return ''\n return element.text.strip() if element.text else \"\"\n\n def location_data(location_tree):\n if location_tree is None:\n return []\n iso_code = find_text(location_tree, 'iso_code').lower()\n country, created = Country.objects.get_or_create(**Country.fields_from_iso_code(iso_code))\n country = country.id\n latitude = find_text(location_tree, 'latitude') or 0\n longitude = find_text(location_tree, 'longitude') or 0\n primary = True\n return [dict(latitude=latitude, longitude=longitude, country=country, primary=primary)]\n\n #id = find_text(tree, 'org_id')\n long_name = find_text(tree, 'name')\n name = long_name[:25]\n description = find_text(tree, 'description')\n url = find_text(tree, 'url')\n iati_type = find_text(tree, 'iati_organisation_type')\n new_organisation_type = int(iati_type) if iati_type else 22\n organisation_type = Organisation.org_type_from_iati_type(new_organisation_type)\n locations = location_data(tree.find('location/object'))\n return dict(\n name=name, long_name=long_name, description=description, url=url,\n organisation_type=organisation_type, new_organisation_type=new_organisation_type,\n locations=locations\n )\n\n\nclass OrganisationViewSet(BaseRSRViewSet):\n \"\"\"\n API endpoint that allows organisations to be viewed or edited.\n \"\"\"\n queryset = Organisation.objects.all()\n serializer_class = OrganisationSerializer\n parser_classes = (AkvoOrganisationParser, JSONParser,)\n filter_fields = ('name', 'long_name', 'iati_org_id', )\n\n def get_queryset(self):\n \"\"\" Enable filtering of Organisations on iati_org_id or name\n \"\"\"\n queryset = super(OrganisationViewSet, self).get_queryset()\n pk = self.request.QUERY_PARAMS.get('id', None)\n if pk is not None:\n try:\n queryset = queryset.filter(pk=pk)\n except ValueError:\n pass\n iati_org_id = self.request.QUERY_PARAMS.get('iati_org_id', None)\n if iati_org_id is not None:\n queryset = queryset.filter(iati_org_id=iati_org_id)\n name = self.request.QUERY_PARAMS.get('name', None)\n if name is not None:\n queryset = queryset.filter(name=name)\n long_name = self.request.QUERY_PARAMS.get('long_name', None)\n if long_name is not None:\n queryset = queryset.filter(long_name=long_name)\n return queryset\n", "path": "akvo/rest/views/organisation.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\nfrom django.conf import settings\n\nfrom rest_framework.compat import etree, six\nfrom rest_framework.exceptions import ParseError\nfrom rest_framework.parsers import XMLParser, JSONParser\n\nfrom akvo.rsr.models import Organisation, Country\n\nfrom ..serializers import OrganisationSerializer\nfrom ..viewsets import BaseRSRViewSet\n\n\nclass AkvoOrganisationParser(XMLParser):\n def parse(self, stream, media_type=None, parser_context=None):\n assert etree, 'XMLParser requires defusedxml to be installed'\n\n parser_context = parser_context or {}\n encoding = parser_context.get('encoding', settings.DEFAULT_CHARSET)\n parser = etree.DefusedXMLParser(encoding=encoding)\n try:\n tree = etree.parse(stream, parser=parser, forbid_dtd=True)\n except (etree.ParseError, ValueError) as exc:\n raise ParseError('XML parse error - %s' % six.text_type(exc))\n return self.organisation_data_from_etree(tree.getroot())\n\n def organisation_data_from_etree(self, tree):\n def find_text(tree, str):\n element = tree.find(str)\n if element is None:\n return ''\n return element.text.strip() if element.text else \"\"\n\n def location_data(location_tree):\n if location_tree is None:\n return []\n iso_code = find_text(location_tree, 'iso_code').lower()\n country, created = Country.objects.get_or_create(**Country.fields_from_iso_code(iso_code))\n country = country.id\n latitude = find_text(location_tree, 'latitude') or 0\n longitude = find_text(location_tree, 'longitude') or 0\n primary = True\n return [dict(latitude=latitude, longitude=longitude, country=country, primary=primary)]\n\n #id = find_text(tree, 'org_id')\n long_name = find_text(tree, 'name')\n name = long_name[:25]\n description = find_text(tree, 'description')\n url = find_text(tree, 'url')\n iati_type = find_text(tree, 'iati_organisation_type')\n new_organisation_type = int(iati_type) if iati_type else 22\n organisation_type = Organisation.org_type_from_iati_type(new_organisation_type)\n locations = location_data(tree.find('location/object'))\n return dict(\n name=name, long_name=long_name, description=description, url=url,\n organisation_type=organisation_type, new_organisation_type=new_organisation_type,\n locations=locations\n )\n\n\nclass OrganisationViewSet(BaseRSRViewSet):\n \"\"\"\n API endpoint that allows organisations to be viewed or edited.\n \"\"\"\n queryset = Organisation.objects.all()\n serializer_class = OrganisationSerializer\n parser_classes = (AkvoOrganisationParser, JSONParser,)\n filter_fields = ('name', 'long_name', 'iati_org_id', 'content_owner')\n\n def get_queryset(self):\n \"\"\" Enable filtering of Organisations on iati_org_id or name\n \"\"\"\n queryset = super(OrganisationViewSet, self).get_queryset()\n pk = self.request.QUERY_PARAMS.get('id', None)\n if pk is not None:\n try:\n queryset = queryset.filter(pk=pk)\n except ValueError:\n pass\n iati_org_id = self.request.QUERY_PARAMS.get('iati_org_id', None)\n if iati_org_id is not None:\n queryset = queryset.filter(iati_org_id=iati_org_id)\n name = self.request.QUERY_PARAMS.get('name', None)\n if name is not None:\n queryset = queryset.filter(name=name)\n long_name = self.request.QUERY_PARAMS.get('long_name', None)\n if long_name is not None:\n queryset = queryset.filter(long_name=long_name)\n return queryset\n", "path": "akvo/rest/views/organisation.py"}]} | 1,306 | 147 |
gh_patches_debug_10083 | rasdani/github-patches | git_diff | Pyomo__pyomo-1806 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
pyomo.common.tempfiles.TempfileManager raises FileNotFoundError
I've recently started using pyomo.common.tempfiles.TempfileManager to set the temporary directory in a package, as in https://pyomo.readthedocs.io/en/stable/working_models.html#changing-the-temporary-directory. I was previously using TempfileManager from PyUtilib for about 4 years.
My tests now fail. Here is some of the trace:
File "/home/user/anaconda38/lib/python3.8/site-packages/pyomo/opt/base/solvers.py", line 571, in solve
self._presolve(*args, **kwds)
File "/home/user/anaconda38/lib/python3.8/site-packages/pyomo/solvers/plugins/solvers/CPLEX.py", line 349, in _presolve
ILMLicensedSystemCallSolver._presolve(self, *args, **kwds)
File "/home/user/anaconda38/lib/python3.8/site-packages/pyomo/opt/solver/shellcmd.py", line 197, in _presolve
OptSolver._presolve(self, *args, **kwds)
File "/home/user/anaconda38/lib/python3.8/site-packages/pyomo/opt/base/solvers.py", line 668, in _presolve
self._convert_problem(args,
File "/home/user/anaconda38/lib/python3.8/site-packages/pyomo/opt/base/solvers.py", line 738, in _convert_problem
return convert_problem(args,
File "/home/user/anaconda38/lib/python3.8/site-packages/pyomo/opt/base/convert.py", line 105, in convert_problem
problem_files, symbol_map = converter.apply(*tmp, **tmpkw)
File "/home/user/anaconda38/lib/python3.8/site-packages/pyomo/solvers/plugins/converter/model.py", line 72, in apply
problem_filename = TempfileManager.\
File "/home/user/anaconda38/lib/python3.8/site-packages/pyomo/common/tempfiles.py", line 67, in create_tempfile
ans = tempfile.mkstemp(suffix=suffix, prefix=prefix, text=text, dir=dir)
File "/home/user/anaconda38/lib/python3.8/tempfile.py", line 332, in mkstemp
return _mkstemp_inner(dir, prefix, suffix, flags, output_type)
File "/home/user/anaconda38/lib/python3.8/tempfile.py", line 250, in _mkstemp_inner
fd = _os.open(file, flags, 0o600)
FileNotFoundError: [Errno 2] No such file or directory: '/tmp/tmpc6s9g6hf/tmpjoxugx27.pyomo.lp'
If I run the tests individually on the commandline, then the FileNotFoundError does not occur.
If I run all the tests from a script, then the FileNotFoundError does occur.
If I run all the tests from the same script, but change the order of the tests, then the FileNotFoundError still occurs but during a different test.
Note that in all tests, I'm not acutally setting TempfileManager.tempdir. It appears in a method, but this method is not called during these tests. So just the import "from pyomo.common.tempfiles import TempfileManager" is being run.
Now if I change my code so that "TempfileManager.tempdir = None" is always called for each test, then the FileNotFoundError no longer occurs.
Can you help?
I'm using Python 3.8.5 from Anaconda, Pyomo 5.7.2 and Ubuntu 18.04 LTS.
Thanks,
Jason
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pyomo/common/tempfiles.py`
Content:
```
1 # ___________________________________________________________________________
2 #
3 # Pyomo: Python Optimization Modeling Objects
4 # Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
5 # Under the terms of Contract DE-NA0003525 with National Technology and
6 # Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
7 # rights in this software.
8 # This software is distributed under the 3-clause BSD License.
9 # ___________________________________________________________________________
10 #
11 # This module was originally developed as part of the PyUtilib project
12 # Copyright (c) 2008 Sandia Corporation.
13 # This software is distributed under the BSD License.
14 # Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
15 # the U.S. Government retains certain rights in this software.
16 # ___________________________________________________________________________
17
18 import os
19 import time
20 import tempfile
21 import logging
22 import shutil
23 from pyomo.common.deprecation import deprecation_warning
24 try:
25 from pyutilib.component.config.tempfiles import (
26 TempfileManager as pyutilib_mngr
27 )
28 except ImportError:
29 pyutilib_mngr = None
30
31 deletion_errors_are_fatal = True
32
33
34 class TempfileManagerClass:
35 """A class that manages temporary files."""
36
37 tempdir = None
38
39 def __init__(self, **kwds):
40 self._tempfiles = [[]]
41 self._ctr = -1
42
43 def create_tempfile(self, suffix=None, prefix=None, text=False, dir=None):
44 """Create a unique temporary file
45
46 Returns the absolute path of a temporary filename that is
47 guaranteed to be unique. This function generates the file and
48 returns the filename.
49
50 """
51 if suffix is None:
52 suffix = ''
53 if prefix is None:
54 prefix = 'tmp'
55 if dir is None:
56 dir = self.tempdir
57 if dir is None and pyutilib_mngr is not None:
58 dir = pyutilib_mngr.tempdir
59 if dir is not None:
60 deprecation_warning(
61 "The use of the PyUtilib TempfileManager.tempdir "
62 "to specify the default location for Pyomo "
63 "temporary files has been deprecated. "
64 "Please set TempfileManager.tempdir in "
65 "pyomo.common.tempfiles", version='5.7.2')
66
67 ans = tempfile.mkstemp(suffix=suffix, prefix=prefix, text=text, dir=dir)
68 ans = list(ans)
69 if not os.path.isabs(ans[1]): #pragma:nocover
70 fname = os.path.join(dir, ans[1])
71 else:
72 fname = ans[1]
73 os.close(ans[0])
74 if self._ctr >= 0:
75 new_fname = os.path.join(dir, prefix + str(self._ctr) + suffix)
76 # Delete any file having the sequential name and then
77 # rename
78 if os.path.exists(new_fname):
79 os.remove(new_fname)
80 shutil.move(fname, new_fname)
81 fname = new_fname
82 self._ctr += 1
83 self._tempfiles[-1].append(fname)
84 return fname
85
86 def create_tempdir(self, suffix=None, prefix=None, dir=None):
87 """Create a unique temporary directory
88
89 Returns the absolute path of a temporary directory that is
90 guaranteed to be unique. This function generates the directory
91 and returns the directory name.
92
93 """
94 if suffix is None:
95 suffix = ''
96 if prefix is None:
97 prefix = 'tmp'
98 if dir is None:
99 dir = self.tempdir
100 if dir is None and pyutilib_mngr is not None:
101 dir = pyutilib_mngr.tempdir
102 if dir is not None:
103 deprecation_warning(
104 "The use of the PyUtilib TempfileManager.tempdir "
105 "to specify the default location for Pyomo "
106 "temporary directories has been deprecated. "
107 "Please set TempfileManager.tempdir in "
108 "pyomo.common.tempfiles", version='5.7.2')
109
110 dirname = tempfile.mkdtemp(suffix=suffix, prefix=prefix, dir=dir)
111 if self._ctr >= 0:
112 new_dirname = os.path.join(dir, prefix + str(self._ctr) + suffix)
113 # Delete any directory having the sequential name and then
114 # rename
115 if os.path.exists(new_dirname):
116 shutil.rmtree(new_dirname)
117 shutil.move(dirname, new_dirname)
118 dirname = new_dirname
119 self._ctr += 1
120
121 self._tempfiles[-1].append(dirname)
122 return dirname
123
124 def add_tempfile(self, filename, exists=True):
125 """Declare this file to be temporary."""
126 tmp = os.path.abspath(filename)
127 if exists and not os.path.exists(tmp):
128 raise IOError("Temporary file does not exist: " + tmp)
129 self._tempfiles[-1].append(tmp)
130
131 def clear_tempfiles(self, remove=True):
132 """Delete all temporary files."""
133 while len(self._tempfiles) > 1:
134 self.pop(remove)
135 self.pop(remove)
136
137 def sequential_files(self, ctr=0):
138 """Start generating sequential files, using the specified counter"""
139 self._ctr = ctr
140
141 def unique_files(self):
142 """Stop generating sequential files, using the specified counter"""
143 self._ctr = -1
144
145 #
146 # Support "with" statements, where the pop automatically
147 # takes place on exit.
148 #
149 def push(self):
150 self._tempfiles.append([])
151 return self
152
153 def __enter__(self):
154 self.push()
155
156 def __exit__(self, type, value, traceback):
157 self.pop(remove=True)
158
159 def pop(self, remove=True):
160 files = self._tempfiles.pop()
161 if remove:
162 for filename in files:
163 if os.path.exists(filename):
164 if os.path.isdir(filename):
165 shutil.rmtree(
166 filename,
167 ignore_errors=not deletion_errors_are_fatal)
168 else:
169 try:
170 os.remove(filename)
171 except WindowsError:
172 # Sometimes Windows doesn't release the
173 # file lock immediately when the process
174 # terminates. If we get an error, wait a
175 # second and try again.
176 try:
177 time.sleep(1)
178 os.remove(filename)
179 except WindowsError:
180 if deletion_errors_are_fatal:
181 raise
182 else:
183 # Failure to delete a tempfile
184 # should NOT be fatal
185 logger = logging.getLogger(__name__)
186 logger.warning("Unable to delete temporary "
187 "file %s" % (filename,))
188
189 if len(self._tempfiles) == 0:
190 self._tempfiles = [[]]
191
192 TempfileManager = TempfileManagerClass()
193
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pyomo/common/tempfiles.py b/pyomo/common/tempfiles.py
--- a/pyomo/common/tempfiles.py
+++ b/pyomo/common/tempfiles.py
@@ -34,9 +34,8 @@
class TempfileManagerClass:
"""A class that manages temporary files."""
- tempdir = None
-
def __init__(self, **kwds):
+ self.tempdir = None
self._tempfiles = [[]]
self._ctr = -1
@@ -189,4 +188,5 @@
if len(self._tempfiles) == 0:
self._tempfiles = [[]]
+
TempfileManager = TempfileManagerClass()
| {"golden_diff": "diff --git a/pyomo/common/tempfiles.py b/pyomo/common/tempfiles.py\n--- a/pyomo/common/tempfiles.py\n+++ b/pyomo/common/tempfiles.py\n@@ -34,9 +34,8 @@\n class TempfileManagerClass:\n \"\"\"A class that manages temporary files.\"\"\"\n \n- tempdir = None\n-\n def __init__(self, **kwds):\n+ self.tempdir = None\n self._tempfiles = [[]]\n self._ctr = -1\n \n@@ -189,4 +188,5 @@\n if len(self._tempfiles) == 0:\n self._tempfiles = [[]]\n \n+\n TempfileManager = TempfileManagerClass()\n", "issue": "pyomo.common.tempfiles.TempfileManager raises FileNotFoundError\nI've recently started using pyomo.common.tempfiles.TempfileManager to set the temporary directory in a package, as in https://pyomo.readthedocs.io/en/stable/working_models.html#changing-the-temporary-directory. I was previously using TempfileManager from PyUtilib for about 4 years. \r\n\r\nMy tests now fail. Here is some of the trace:\r\n\r\n File \"/home/user/anaconda38/lib/python3.8/site-packages/pyomo/opt/base/solvers.py\", line 571, in solve\r\n self._presolve(*args, **kwds)\r\n File \"/home/user/anaconda38/lib/python3.8/site-packages/pyomo/solvers/plugins/solvers/CPLEX.py\", line 349, in _presolve\r\n ILMLicensedSystemCallSolver._presolve(self, *args, **kwds)\r\n File \"/home/user/anaconda38/lib/python3.8/site-packages/pyomo/opt/solver/shellcmd.py\", line 197, in _presolve\r\n OptSolver._presolve(self, *args, **kwds)\r\n File \"/home/user/anaconda38/lib/python3.8/site-packages/pyomo/opt/base/solvers.py\", line 668, in _presolve\r\n self._convert_problem(args,\r\n File \"/home/user/anaconda38/lib/python3.8/site-packages/pyomo/opt/base/solvers.py\", line 738, in _convert_problem\r\n return convert_problem(args,\r\n File \"/home/user/anaconda38/lib/python3.8/site-packages/pyomo/opt/base/convert.py\", line 105, in convert_problem\r\n problem_files, symbol_map = converter.apply(*tmp, **tmpkw)\r\n File \"/home/user/anaconda38/lib/python3.8/site-packages/pyomo/solvers/plugins/converter/model.py\", line 72, in apply\r\n problem_filename = TempfileManager.\\\r\n File \"/home/user/anaconda38/lib/python3.8/site-packages/pyomo/common/tempfiles.py\", line 67, in create_tempfile\r\n ans = tempfile.mkstemp(suffix=suffix, prefix=prefix, text=text, dir=dir)\r\n File \"/home/user/anaconda38/lib/python3.8/tempfile.py\", line 332, in mkstemp\r\n return _mkstemp_inner(dir, prefix, suffix, flags, output_type)\r\n File \"/home/user/anaconda38/lib/python3.8/tempfile.py\", line 250, in _mkstemp_inner\r\n fd = _os.open(file, flags, 0o600)\r\nFileNotFoundError: [Errno 2] No such file or directory: '/tmp/tmpc6s9g6hf/tmpjoxugx27.pyomo.lp'\r\n\r\nIf I run the tests individually on the commandline, then the FileNotFoundError does not occur.\r\n\r\nIf I run all the tests from a script, then the FileNotFoundError does occur.\r\n\r\nIf I run all the tests from the same script, but change the order of the tests, then the FileNotFoundError still occurs but during a different test.\r\n\r\nNote that in all tests, I'm not acutally setting TempfileManager.tempdir. It appears in a method, but this method is not called during these tests. So just the import \"from pyomo.common.tempfiles import TempfileManager\" is being run.\r\n\r\nNow if I change my code so that \"TempfileManager.tempdir = None\" is always called for each test, then the FileNotFoundError no longer occurs.\r\n\r\nCan you help?\r\n\r\nI'm using Python 3.8.5 from Anaconda, Pyomo 5.7.2 and Ubuntu 18.04 LTS.\r\n\r\nThanks,\r\nJason\n", "before_files": [{"content": "# ___________________________________________________________________________\n#\n# Pyomo: Python Optimization Modeling Objects\n# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC\n# Under the terms of Contract DE-NA0003525 with National Technology and\n# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain\n# rights in this software.\n# This software is distributed under the 3-clause BSD License.\n# ___________________________________________________________________________\n#\n# This module was originally developed as part of the PyUtilib project\n# Copyright (c) 2008 Sandia Corporation.\n# This software is distributed under the BSD License.\n# Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,\n# the U.S. Government retains certain rights in this software.\n# ___________________________________________________________________________\n\nimport os\nimport time\nimport tempfile\nimport logging\nimport shutil\nfrom pyomo.common.deprecation import deprecation_warning\ntry:\n from pyutilib.component.config.tempfiles import (\n TempfileManager as pyutilib_mngr\n )\nexcept ImportError:\n pyutilib_mngr = None\n\ndeletion_errors_are_fatal = True\n\n\nclass TempfileManagerClass:\n \"\"\"A class that manages temporary files.\"\"\"\n\n tempdir = None\n\n def __init__(self, **kwds):\n self._tempfiles = [[]]\n self._ctr = -1\n\n def create_tempfile(self, suffix=None, prefix=None, text=False, dir=None):\n \"\"\"Create a unique temporary file\n\n Returns the absolute path of a temporary filename that is\n guaranteed to be unique. This function generates the file and\n returns the filename.\n\n \"\"\"\n if suffix is None:\n suffix = ''\n if prefix is None:\n prefix = 'tmp'\n if dir is None:\n dir = self.tempdir\n if dir is None and pyutilib_mngr is not None:\n dir = pyutilib_mngr.tempdir\n if dir is not None:\n deprecation_warning(\n \"The use of the PyUtilib TempfileManager.tempdir \"\n \"to specify the default location for Pyomo \"\n \"temporary files has been deprecated. \"\n \"Please set TempfileManager.tempdir in \"\n \"pyomo.common.tempfiles\", version='5.7.2')\n\n ans = tempfile.mkstemp(suffix=suffix, prefix=prefix, text=text, dir=dir)\n ans = list(ans)\n if not os.path.isabs(ans[1]): #pragma:nocover\n fname = os.path.join(dir, ans[1])\n else:\n fname = ans[1]\n os.close(ans[0])\n if self._ctr >= 0:\n new_fname = os.path.join(dir, prefix + str(self._ctr) + suffix)\n # Delete any file having the sequential name and then\n # rename\n if os.path.exists(new_fname):\n os.remove(new_fname)\n shutil.move(fname, new_fname)\n fname = new_fname\n self._ctr += 1\n self._tempfiles[-1].append(fname)\n return fname\n\n def create_tempdir(self, suffix=None, prefix=None, dir=None):\n \"\"\"Create a unique temporary directory\n\n Returns the absolute path of a temporary directory that is\n guaranteed to be unique. This function generates the directory\n and returns the directory name.\n\n \"\"\"\n if suffix is None:\n suffix = ''\n if prefix is None:\n prefix = 'tmp'\n if dir is None:\n dir = self.tempdir\n if dir is None and pyutilib_mngr is not None:\n dir = pyutilib_mngr.tempdir\n if dir is not None:\n deprecation_warning(\n \"The use of the PyUtilib TempfileManager.tempdir \"\n \"to specify the default location for Pyomo \"\n \"temporary directories has been deprecated. \"\n \"Please set TempfileManager.tempdir in \"\n \"pyomo.common.tempfiles\", version='5.7.2')\n\n dirname = tempfile.mkdtemp(suffix=suffix, prefix=prefix, dir=dir)\n if self._ctr >= 0:\n new_dirname = os.path.join(dir, prefix + str(self._ctr) + suffix)\n # Delete any directory having the sequential name and then\n # rename\n if os.path.exists(new_dirname):\n shutil.rmtree(new_dirname)\n shutil.move(dirname, new_dirname)\n dirname = new_dirname\n self._ctr += 1\n\n self._tempfiles[-1].append(dirname)\n return dirname\n\n def add_tempfile(self, filename, exists=True):\n \"\"\"Declare this file to be temporary.\"\"\"\n tmp = os.path.abspath(filename)\n if exists and not os.path.exists(tmp):\n raise IOError(\"Temporary file does not exist: \" + tmp)\n self._tempfiles[-1].append(tmp)\n\n def clear_tempfiles(self, remove=True):\n \"\"\"Delete all temporary files.\"\"\"\n while len(self._tempfiles) > 1:\n self.pop(remove)\n self.pop(remove)\n\n def sequential_files(self, ctr=0):\n \"\"\"Start generating sequential files, using the specified counter\"\"\"\n self._ctr = ctr\n\n def unique_files(self):\n \"\"\"Stop generating sequential files, using the specified counter\"\"\"\n self._ctr = -1\n\n #\n # Support \"with\" statements, where the pop automatically\n # takes place on exit.\n #\n def push(self):\n self._tempfiles.append([])\n return self\n\n def __enter__(self):\n self.push()\n\n def __exit__(self, type, value, traceback):\n self.pop(remove=True)\n\n def pop(self, remove=True):\n files = self._tempfiles.pop()\n if remove:\n for filename in files:\n if os.path.exists(filename):\n if os.path.isdir(filename):\n shutil.rmtree(\n filename,\n ignore_errors=not deletion_errors_are_fatal)\n else:\n try:\n os.remove(filename)\n except WindowsError:\n # Sometimes Windows doesn't release the\n # file lock immediately when the process\n # terminates. If we get an error, wait a\n # second and try again.\n try:\n time.sleep(1)\n os.remove(filename)\n except WindowsError:\n if deletion_errors_are_fatal:\n raise\n else:\n # Failure to delete a tempfile\n # should NOT be fatal\n logger = logging.getLogger(__name__)\n logger.warning(\"Unable to delete temporary \"\n \"file %s\" % (filename,))\n\n if len(self._tempfiles) == 0:\n self._tempfiles = [[]]\n\nTempfileManager = TempfileManagerClass()\n", "path": "pyomo/common/tempfiles.py"}], "after_files": [{"content": "# ___________________________________________________________________________\n#\n# Pyomo: Python Optimization Modeling Objects\n# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC\n# Under the terms of Contract DE-NA0003525 with National Technology and\n# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain\n# rights in this software.\n# This software is distributed under the 3-clause BSD License.\n# ___________________________________________________________________________\n#\n# This module was originally developed as part of the PyUtilib project\n# Copyright (c) 2008 Sandia Corporation.\n# This software is distributed under the BSD License.\n# Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,\n# the U.S. Government retains certain rights in this software.\n# ___________________________________________________________________________\n\nimport os\nimport time\nimport tempfile\nimport logging\nimport shutil\nfrom pyomo.common.deprecation import deprecation_warning\ntry:\n from pyutilib.component.config.tempfiles import (\n TempfileManager as pyutilib_mngr\n )\nexcept ImportError:\n pyutilib_mngr = None\n\ndeletion_errors_are_fatal = True\n\n\nclass TempfileManagerClass:\n \"\"\"A class that manages temporary files.\"\"\"\n\n def __init__(self, **kwds):\n self.tempdir = None\n self._tempfiles = [[]]\n self._ctr = -1\n\n def create_tempfile(self, suffix=None, prefix=None, text=False, dir=None):\n \"\"\"Create a unique temporary file\n\n Returns the absolute path of a temporary filename that is\n guaranteed to be unique. This function generates the file and\n returns the filename.\n\n \"\"\"\n if suffix is None:\n suffix = ''\n if prefix is None:\n prefix = 'tmp'\n if dir is None:\n dir = self.tempdir\n if dir is None and pyutilib_mngr is not None:\n dir = pyutilib_mngr.tempdir\n if dir is not None:\n deprecation_warning(\n \"The use of the PyUtilib TempfileManager.tempdir \"\n \"to specify the default location for Pyomo \"\n \"temporary files has been deprecated. \"\n \"Please set TempfileManager.tempdir in \"\n \"pyomo.common.tempfiles\", version='5.7.2')\n\n ans = tempfile.mkstemp(suffix=suffix, prefix=prefix, text=text, dir=dir)\n ans = list(ans)\n if not os.path.isabs(ans[1]): #pragma:nocover\n fname = os.path.join(dir, ans[1])\n else:\n fname = ans[1]\n os.close(ans[0])\n if self._ctr >= 0:\n new_fname = os.path.join(dir, prefix + str(self._ctr) + suffix)\n # Delete any file having the sequential name and then\n # rename\n if os.path.exists(new_fname):\n os.remove(new_fname)\n shutil.move(fname, new_fname)\n fname = new_fname\n self._ctr += 1\n self._tempfiles[-1].append(fname)\n return fname\n\n def create_tempdir(self, suffix=None, prefix=None, dir=None):\n \"\"\"Create a unique temporary directory\n\n Returns the absolute path of a temporary directory that is\n guaranteed to be unique. This function generates the directory\n and returns the directory name.\n\n \"\"\"\n if suffix is None:\n suffix = ''\n if prefix is None:\n prefix = 'tmp'\n if dir is None:\n dir = self.tempdir\n if dir is None and pyutilib_mngr is not None:\n dir = pyutilib_mngr.tempdir\n if dir is not None:\n deprecation_warning(\n \"The use of the PyUtilib TempfileManager.tempdir \"\n \"to specify the default location for Pyomo \"\n \"temporary directories has been deprecated. \"\n \"Please set TempfileManager.tempdir in \"\n \"pyomo.common.tempfiles\", version='5.7.2')\n\n dirname = tempfile.mkdtemp(suffix=suffix, prefix=prefix, dir=dir)\n if self._ctr >= 0:\n new_dirname = os.path.join(dir, prefix + str(self._ctr) + suffix)\n # Delete any directory having the sequential name and then\n # rename\n if os.path.exists(new_dirname):\n shutil.rmtree(new_dirname)\n shutil.move(dirname, new_dirname)\n dirname = new_dirname\n self._ctr += 1\n\n self._tempfiles[-1].append(dirname)\n return dirname\n\n def add_tempfile(self, filename, exists=True):\n \"\"\"Declare this file to be temporary.\"\"\"\n tmp = os.path.abspath(filename)\n if exists and not os.path.exists(tmp):\n raise IOError(\"Temporary file does not exist: \" + tmp)\n self._tempfiles[-1].append(tmp)\n\n def clear_tempfiles(self, remove=True):\n \"\"\"Delete all temporary files.\"\"\"\n while len(self._tempfiles) > 1:\n self.pop(remove)\n self.pop(remove)\n\n def sequential_files(self, ctr=0):\n \"\"\"Start generating sequential files, using the specified counter\"\"\"\n self._ctr = ctr\n\n def unique_files(self):\n \"\"\"Stop generating sequential files, using the specified counter\"\"\"\n self._ctr = -1\n\n #\n # Support \"with\" statements, where the pop automatically\n # takes place on exit.\n #\n def push(self):\n self._tempfiles.append([])\n return self\n\n def __enter__(self):\n self.push()\n\n def __exit__(self, type, value, traceback):\n self.pop(remove=True)\n\n def pop(self, remove=True):\n files = self._tempfiles.pop()\n if remove:\n for filename in files:\n if os.path.exists(filename):\n if os.path.isdir(filename):\n shutil.rmtree(\n filename,\n ignore_errors=not deletion_errors_are_fatal)\n else:\n try:\n os.remove(filename)\n except WindowsError:\n # Sometimes Windows doesn't release the\n # file lock immediately when the process\n # terminates. If we get an error, wait a\n # second and try again.\n try:\n time.sleep(1)\n os.remove(filename)\n except WindowsError:\n if deletion_errors_are_fatal:\n raise\n else:\n # Failure to delete a tempfile\n # should NOT be fatal\n logger = logging.getLogger(__name__)\n logger.warning(\"Unable to delete temporary \"\n \"file %s\" % (filename,))\n\n if len(self._tempfiles) == 0:\n self._tempfiles = [[]]\n\n\nTempfileManager = TempfileManagerClass()\n", "path": "pyomo/common/tempfiles.py"}]} | 2,996 | 152 |
gh_patches_debug_19298 | rasdani/github-patches | git_diff | pyca__cryptography-6865 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Can load PKCS12 with ED25519 Keys but cannot Serialize them
Why does the pkcs12.serialize_key_and_certificates() still sanitize against ed private keys? cryptography has no problem loading pkcs12 files which contain ed25519 private keys and related certificates.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cryptography/hazmat/primitives/serialization/pkcs12.py`
Content:
```
1 # This file is dual licensed under the terms of the Apache License, Version
2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3 # for complete details.
4
5 import typing
6
7 from cryptography import x509
8 from cryptography.hazmat.primitives import serialization
9 from cryptography.hazmat.primitives.asymmetric import (
10 dsa,
11 ec,
12 ed25519,
13 ed448,
14 rsa,
15 )
16 from cryptography.hazmat.primitives.asymmetric.types import (
17 PRIVATE_KEY_TYPES,
18 )
19
20
21 _ALLOWED_PKCS12_TYPES = typing.Union[
22 rsa.RSAPrivateKey,
23 dsa.DSAPrivateKey,
24 ec.EllipticCurvePrivateKey,
25 ]
26
27
28 class PKCS12Certificate:
29 def __init__(
30 self,
31 cert: x509.Certificate,
32 friendly_name: typing.Optional[bytes],
33 ):
34 if not isinstance(cert, x509.Certificate):
35 raise TypeError("Expecting x509.Certificate object")
36 if friendly_name is not None and not isinstance(friendly_name, bytes):
37 raise TypeError("friendly_name must be bytes or None")
38 self._cert = cert
39 self._friendly_name = friendly_name
40
41 @property
42 def friendly_name(self) -> typing.Optional[bytes]:
43 return self._friendly_name
44
45 @property
46 def certificate(self) -> x509.Certificate:
47 return self._cert
48
49 def __eq__(self, other: object) -> bool:
50 if not isinstance(other, PKCS12Certificate):
51 return NotImplemented
52
53 return (
54 self.certificate == other.certificate
55 and self.friendly_name == other.friendly_name
56 )
57
58 def __ne__(self, other: object) -> bool:
59 return not self == other
60
61 def __hash__(self) -> int:
62 return hash((self.certificate, self.friendly_name))
63
64 def __repr__(self) -> str:
65 return "<PKCS12Certificate({}, friendly_name={!r})>".format(
66 self.certificate, self.friendly_name
67 )
68
69
70 class PKCS12KeyAndCertificates:
71 def __init__(
72 self,
73 key: typing.Optional[PRIVATE_KEY_TYPES],
74 cert: typing.Optional[PKCS12Certificate],
75 additional_certs: typing.List[PKCS12Certificate],
76 ):
77 if key is not None and not isinstance(
78 key,
79 (
80 rsa.RSAPrivateKey,
81 dsa.DSAPrivateKey,
82 ec.EllipticCurvePrivateKey,
83 ed25519.Ed25519PrivateKey,
84 ed448.Ed448PrivateKey,
85 ),
86 ):
87 raise TypeError(
88 "Key must be RSA, DSA, EllipticCurve, ED25519, or ED448"
89 " private key, or None."
90 )
91 if cert is not None and not isinstance(cert, PKCS12Certificate):
92 raise TypeError("cert must be a PKCS12Certificate object or None")
93 if not all(
94 isinstance(add_cert, PKCS12Certificate)
95 for add_cert in additional_certs
96 ):
97 raise TypeError(
98 "all values in additional_certs must be PKCS12Certificate"
99 " objects"
100 )
101 self._key = key
102 self._cert = cert
103 self._additional_certs = additional_certs
104
105 @property
106 def key(self) -> typing.Optional[PRIVATE_KEY_TYPES]:
107 return self._key
108
109 @property
110 def cert(self) -> typing.Optional[PKCS12Certificate]:
111 return self._cert
112
113 @property
114 def additional_certs(self) -> typing.List[PKCS12Certificate]:
115 return self._additional_certs
116
117 def __eq__(self, other: object) -> bool:
118 if not isinstance(other, PKCS12KeyAndCertificates):
119 return NotImplemented
120
121 return (
122 self.key == other.key
123 and self.cert == other.cert
124 and self.additional_certs == other.additional_certs
125 )
126
127 def __ne__(self, other: object) -> bool:
128 return not self == other
129
130 def __hash__(self) -> int:
131 return hash((self.key, self.cert, tuple(self.additional_certs)))
132
133 def __repr__(self) -> str:
134 fmt = (
135 "<PKCS12KeyAndCertificates(key={}, cert={}, additional_certs={})>"
136 )
137 return fmt.format(self.key, self.cert, self.additional_certs)
138
139
140 def load_key_and_certificates(
141 data: bytes,
142 password: typing.Optional[bytes],
143 backend: typing.Any = None,
144 ) -> typing.Tuple[
145 typing.Optional[PRIVATE_KEY_TYPES],
146 typing.Optional[x509.Certificate],
147 typing.List[x509.Certificate],
148 ]:
149 from cryptography.hazmat.backends.openssl.backend import backend as ossl
150
151 return ossl.load_key_and_certificates_from_pkcs12(data, password)
152
153
154 def load_pkcs12(
155 data: bytes,
156 password: typing.Optional[bytes],
157 backend: typing.Any = None,
158 ) -> PKCS12KeyAndCertificates:
159 from cryptography.hazmat.backends.openssl.backend import backend as ossl
160
161 return ossl.load_pkcs12(data, password)
162
163
164 def serialize_key_and_certificates(
165 name: typing.Optional[bytes],
166 key: typing.Optional[_ALLOWED_PKCS12_TYPES],
167 cert: typing.Optional[x509.Certificate],
168 cas: typing.Optional[typing.Iterable[x509.Certificate]],
169 encryption_algorithm: serialization.KeySerializationEncryption,
170 ) -> bytes:
171 if key is not None and not isinstance(
172 key,
173 (
174 rsa.RSAPrivateKey,
175 dsa.DSAPrivateKey,
176 ec.EllipticCurvePrivateKey,
177 ),
178 ):
179 raise TypeError(
180 "Key must be RSA, DSA, or EllipticCurve private key or None."
181 )
182 if cert is not None and not isinstance(cert, x509.Certificate):
183 raise TypeError("cert must be a certificate or None")
184
185 if cas is not None:
186 cas = list(cas)
187 if not all(isinstance(val, x509.Certificate) for val in cas):
188 raise TypeError("all values in cas must be certificates")
189
190 if not isinstance(
191 encryption_algorithm, serialization.KeySerializationEncryption
192 ):
193 raise TypeError(
194 "Key encryption algorithm must be a "
195 "KeySerializationEncryption instance"
196 )
197
198 if key is None and cert is None and not cas:
199 raise ValueError("You must supply at least one of key, cert, or cas")
200
201 from cryptography.hazmat.backends.openssl.backend import backend
202
203 return backend.serialize_key_and_certificates_to_pkcs12(
204 name, key, cert, cas, encryption_algorithm
205 )
206
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/cryptography/hazmat/primitives/serialization/pkcs12.py b/src/cryptography/hazmat/primitives/serialization/pkcs12.py
--- a/src/cryptography/hazmat/primitives/serialization/pkcs12.py
+++ b/src/cryptography/hazmat/primitives/serialization/pkcs12.py
@@ -22,6 +22,8 @@
rsa.RSAPrivateKey,
dsa.DSAPrivateKey,
ec.EllipticCurvePrivateKey,
+ ed25519.Ed25519PrivateKey,
+ ed448.Ed448PrivateKey,
]
@@ -174,10 +176,13 @@
rsa.RSAPrivateKey,
dsa.DSAPrivateKey,
ec.EllipticCurvePrivateKey,
+ ed25519.Ed25519PrivateKey,
+ ed448.Ed448PrivateKey,
),
):
raise TypeError(
- "Key must be RSA, DSA, or EllipticCurve private key or None."
+ "Key must be RSA, DSA, EllipticCurve, ED25519, or ED448"
+ " private key, or None."
)
if cert is not None and not isinstance(cert, x509.Certificate):
raise TypeError("cert must be a certificate or None")
| {"golden_diff": "diff --git a/src/cryptography/hazmat/primitives/serialization/pkcs12.py b/src/cryptography/hazmat/primitives/serialization/pkcs12.py\n--- a/src/cryptography/hazmat/primitives/serialization/pkcs12.py\n+++ b/src/cryptography/hazmat/primitives/serialization/pkcs12.py\n@@ -22,6 +22,8 @@\n rsa.RSAPrivateKey,\n dsa.DSAPrivateKey,\n ec.EllipticCurvePrivateKey,\n+ ed25519.Ed25519PrivateKey,\n+ ed448.Ed448PrivateKey,\n ]\n \n \n@@ -174,10 +176,13 @@\n rsa.RSAPrivateKey,\n dsa.DSAPrivateKey,\n ec.EllipticCurvePrivateKey,\n+ ed25519.Ed25519PrivateKey,\n+ ed448.Ed448PrivateKey,\n ),\n ):\n raise TypeError(\n- \"Key must be RSA, DSA, or EllipticCurve private key or None.\"\n+ \"Key must be RSA, DSA, EllipticCurve, ED25519, or ED448\"\n+ \" private key, or None.\"\n )\n if cert is not None and not isinstance(cert, x509.Certificate):\n raise TypeError(\"cert must be a certificate or None\")\n", "issue": "Can load PKCS12 with ED25519 Keys but cannot Serialize them\nWhy does the pkcs12.serialize_key_and_certificates() still sanitize against ed private keys? cryptography has no problem loading pkcs12 files which contain ed25519 private keys and related certificates.\n", "before_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nimport typing\n\nfrom cryptography import x509\nfrom cryptography.hazmat.primitives import serialization\nfrom cryptography.hazmat.primitives.asymmetric import (\n dsa,\n ec,\n ed25519,\n ed448,\n rsa,\n)\nfrom cryptography.hazmat.primitives.asymmetric.types import (\n PRIVATE_KEY_TYPES,\n)\n\n\n_ALLOWED_PKCS12_TYPES = typing.Union[\n rsa.RSAPrivateKey,\n dsa.DSAPrivateKey,\n ec.EllipticCurvePrivateKey,\n]\n\n\nclass PKCS12Certificate:\n def __init__(\n self,\n cert: x509.Certificate,\n friendly_name: typing.Optional[bytes],\n ):\n if not isinstance(cert, x509.Certificate):\n raise TypeError(\"Expecting x509.Certificate object\")\n if friendly_name is not None and not isinstance(friendly_name, bytes):\n raise TypeError(\"friendly_name must be bytes or None\")\n self._cert = cert\n self._friendly_name = friendly_name\n\n @property\n def friendly_name(self) -> typing.Optional[bytes]:\n return self._friendly_name\n\n @property\n def certificate(self) -> x509.Certificate:\n return self._cert\n\n def __eq__(self, other: object) -> bool:\n if not isinstance(other, PKCS12Certificate):\n return NotImplemented\n\n return (\n self.certificate == other.certificate\n and self.friendly_name == other.friendly_name\n )\n\n def __ne__(self, other: object) -> bool:\n return not self == other\n\n def __hash__(self) -> int:\n return hash((self.certificate, self.friendly_name))\n\n def __repr__(self) -> str:\n return \"<PKCS12Certificate({}, friendly_name={!r})>\".format(\n self.certificate, self.friendly_name\n )\n\n\nclass PKCS12KeyAndCertificates:\n def __init__(\n self,\n key: typing.Optional[PRIVATE_KEY_TYPES],\n cert: typing.Optional[PKCS12Certificate],\n additional_certs: typing.List[PKCS12Certificate],\n ):\n if key is not None and not isinstance(\n key,\n (\n rsa.RSAPrivateKey,\n dsa.DSAPrivateKey,\n ec.EllipticCurvePrivateKey,\n ed25519.Ed25519PrivateKey,\n ed448.Ed448PrivateKey,\n ),\n ):\n raise TypeError(\n \"Key must be RSA, DSA, EllipticCurve, ED25519, or ED448\"\n \" private key, or None.\"\n )\n if cert is not None and not isinstance(cert, PKCS12Certificate):\n raise TypeError(\"cert must be a PKCS12Certificate object or None\")\n if not all(\n isinstance(add_cert, PKCS12Certificate)\n for add_cert in additional_certs\n ):\n raise TypeError(\n \"all values in additional_certs must be PKCS12Certificate\"\n \" objects\"\n )\n self._key = key\n self._cert = cert\n self._additional_certs = additional_certs\n\n @property\n def key(self) -> typing.Optional[PRIVATE_KEY_TYPES]:\n return self._key\n\n @property\n def cert(self) -> typing.Optional[PKCS12Certificate]:\n return self._cert\n\n @property\n def additional_certs(self) -> typing.List[PKCS12Certificate]:\n return self._additional_certs\n\n def __eq__(self, other: object) -> bool:\n if not isinstance(other, PKCS12KeyAndCertificates):\n return NotImplemented\n\n return (\n self.key == other.key\n and self.cert == other.cert\n and self.additional_certs == other.additional_certs\n )\n\n def __ne__(self, other: object) -> bool:\n return not self == other\n\n def __hash__(self) -> int:\n return hash((self.key, self.cert, tuple(self.additional_certs)))\n\n def __repr__(self) -> str:\n fmt = (\n \"<PKCS12KeyAndCertificates(key={}, cert={}, additional_certs={})>\"\n )\n return fmt.format(self.key, self.cert, self.additional_certs)\n\n\ndef load_key_and_certificates(\n data: bytes,\n password: typing.Optional[bytes],\n backend: typing.Any = None,\n) -> typing.Tuple[\n typing.Optional[PRIVATE_KEY_TYPES],\n typing.Optional[x509.Certificate],\n typing.List[x509.Certificate],\n]:\n from cryptography.hazmat.backends.openssl.backend import backend as ossl\n\n return ossl.load_key_and_certificates_from_pkcs12(data, password)\n\n\ndef load_pkcs12(\n data: bytes,\n password: typing.Optional[bytes],\n backend: typing.Any = None,\n) -> PKCS12KeyAndCertificates:\n from cryptography.hazmat.backends.openssl.backend import backend as ossl\n\n return ossl.load_pkcs12(data, password)\n\n\ndef serialize_key_and_certificates(\n name: typing.Optional[bytes],\n key: typing.Optional[_ALLOWED_PKCS12_TYPES],\n cert: typing.Optional[x509.Certificate],\n cas: typing.Optional[typing.Iterable[x509.Certificate]],\n encryption_algorithm: serialization.KeySerializationEncryption,\n) -> bytes:\n if key is not None and not isinstance(\n key,\n (\n rsa.RSAPrivateKey,\n dsa.DSAPrivateKey,\n ec.EllipticCurvePrivateKey,\n ),\n ):\n raise TypeError(\n \"Key must be RSA, DSA, or EllipticCurve private key or None.\"\n )\n if cert is not None and not isinstance(cert, x509.Certificate):\n raise TypeError(\"cert must be a certificate or None\")\n\n if cas is not None:\n cas = list(cas)\n if not all(isinstance(val, x509.Certificate) for val in cas):\n raise TypeError(\"all values in cas must be certificates\")\n\n if not isinstance(\n encryption_algorithm, serialization.KeySerializationEncryption\n ):\n raise TypeError(\n \"Key encryption algorithm must be a \"\n \"KeySerializationEncryption instance\"\n )\n\n if key is None and cert is None and not cas:\n raise ValueError(\"You must supply at least one of key, cert, or cas\")\n\n from cryptography.hazmat.backends.openssl.backend import backend\n\n return backend.serialize_key_and_certificates_to_pkcs12(\n name, key, cert, cas, encryption_algorithm\n )\n", "path": "src/cryptography/hazmat/primitives/serialization/pkcs12.py"}], "after_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nimport typing\n\nfrom cryptography import x509\nfrom cryptography.hazmat.primitives import serialization\nfrom cryptography.hazmat.primitives.asymmetric import (\n dsa,\n ec,\n ed25519,\n ed448,\n rsa,\n)\nfrom cryptography.hazmat.primitives.asymmetric.types import (\n PRIVATE_KEY_TYPES,\n)\n\n\n_ALLOWED_PKCS12_TYPES = typing.Union[\n rsa.RSAPrivateKey,\n dsa.DSAPrivateKey,\n ec.EllipticCurvePrivateKey,\n ed25519.Ed25519PrivateKey,\n ed448.Ed448PrivateKey,\n]\n\n\nclass PKCS12Certificate:\n def __init__(\n self,\n cert: x509.Certificate,\n friendly_name: typing.Optional[bytes],\n ):\n if not isinstance(cert, x509.Certificate):\n raise TypeError(\"Expecting x509.Certificate object\")\n if friendly_name is not None and not isinstance(friendly_name, bytes):\n raise TypeError(\"friendly_name must be bytes or None\")\n self._cert = cert\n self._friendly_name = friendly_name\n\n @property\n def friendly_name(self) -> typing.Optional[bytes]:\n return self._friendly_name\n\n @property\n def certificate(self) -> x509.Certificate:\n return self._cert\n\n def __eq__(self, other: object) -> bool:\n if not isinstance(other, PKCS12Certificate):\n return NotImplemented\n\n return (\n self.certificate == other.certificate\n and self.friendly_name == other.friendly_name\n )\n\n def __ne__(self, other: object) -> bool:\n return not self == other\n\n def __hash__(self) -> int:\n return hash((self.certificate, self.friendly_name))\n\n def __repr__(self) -> str:\n return \"<PKCS12Certificate({}, friendly_name={!r})>\".format(\n self.certificate, self.friendly_name\n )\n\n\nclass PKCS12KeyAndCertificates:\n def __init__(\n self,\n key: typing.Optional[PRIVATE_KEY_TYPES],\n cert: typing.Optional[PKCS12Certificate],\n additional_certs: typing.List[PKCS12Certificate],\n ):\n if key is not None and not isinstance(\n key,\n (\n rsa.RSAPrivateKey,\n dsa.DSAPrivateKey,\n ec.EllipticCurvePrivateKey,\n ed25519.Ed25519PrivateKey,\n ed448.Ed448PrivateKey,\n ),\n ):\n raise TypeError(\n \"Key must be RSA, DSA, EllipticCurve, ED25519, or ED448\"\n \" private key, or None.\"\n )\n if cert is not None and not isinstance(cert, PKCS12Certificate):\n raise TypeError(\"cert must be a PKCS12Certificate object or None\")\n if not all(\n isinstance(add_cert, PKCS12Certificate)\n for add_cert in additional_certs\n ):\n raise TypeError(\n \"all values in additional_certs must be PKCS12Certificate\"\n \" objects\"\n )\n self._key = key\n self._cert = cert\n self._additional_certs = additional_certs\n\n @property\n def key(self) -> typing.Optional[PRIVATE_KEY_TYPES]:\n return self._key\n\n @property\n def cert(self) -> typing.Optional[PKCS12Certificate]:\n return self._cert\n\n @property\n def additional_certs(self) -> typing.List[PKCS12Certificate]:\n return self._additional_certs\n\n def __eq__(self, other: object) -> bool:\n if not isinstance(other, PKCS12KeyAndCertificates):\n return NotImplemented\n\n return (\n self.key == other.key\n and self.cert == other.cert\n and self.additional_certs == other.additional_certs\n )\n\n def __ne__(self, other: object) -> bool:\n return not self == other\n\n def __hash__(self) -> int:\n return hash((self.key, self.cert, tuple(self.additional_certs)))\n\n def __repr__(self) -> str:\n fmt = (\n \"<PKCS12KeyAndCertificates(key={}, cert={}, additional_certs={})>\"\n )\n return fmt.format(self.key, self.cert, self.additional_certs)\n\n\ndef load_key_and_certificates(\n data: bytes,\n password: typing.Optional[bytes],\n backend: typing.Any = None,\n) -> typing.Tuple[\n typing.Optional[PRIVATE_KEY_TYPES],\n typing.Optional[x509.Certificate],\n typing.List[x509.Certificate],\n]:\n from cryptography.hazmat.backends.openssl.backend import backend as ossl\n\n return ossl.load_key_and_certificates_from_pkcs12(data, password)\n\n\ndef load_pkcs12(\n data: bytes,\n password: typing.Optional[bytes],\n backend: typing.Any = None,\n) -> PKCS12KeyAndCertificates:\n from cryptography.hazmat.backends.openssl.backend import backend as ossl\n\n return ossl.load_pkcs12(data, password)\n\n\ndef serialize_key_and_certificates(\n name: typing.Optional[bytes],\n key: typing.Optional[_ALLOWED_PKCS12_TYPES],\n cert: typing.Optional[x509.Certificate],\n cas: typing.Optional[typing.Iterable[x509.Certificate]],\n encryption_algorithm: serialization.KeySerializationEncryption,\n) -> bytes:\n if key is not None and not isinstance(\n key,\n (\n rsa.RSAPrivateKey,\n dsa.DSAPrivateKey,\n ec.EllipticCurvePrivateKey,\n ed25519.Ed25519PrivateKey,\n ed448.Ed448PrivateKey,\n ),\n ):\n raise TypeError(\n \"Key must be RSA, DSA, EllipticCurve, ED25519, or ED448\"\n \" private key, or None.\"\n )\n if cert is not None and not isinstance(cert, x509.Certificate):\n raise TypeError(\"cert must be a certificate or None\")\n\n if cas is not None:\n cas = list(cas)\n if not all(isinstance(val, x509.Certificate) for val in cas):\n raise TypeError(\"all values in cas must be certificates\")\n\n if not isinstance(\n encryption_algorithm, serialization.KeySerializationEncryption\n ):\n raise TypeError(\n \"Key encryption algorithm must be a \"\n \"KeySerializationEncryption instance\"\n )\n\n if key is None and cert is None and not cas:\n raise ValueError(\"You must supply at least one of key, cert, or cas\")\n\n from cryptography.hazmat.backends.openssl.backend import backend\n\n return backend.serialize_key_and_certificates_to_pkcs12(\n name, key, cert, cas, encryption_algorithm\n )\n", "path": "src/cryptography/hazmat/primitives/serialization/pkcs12.py"}]} | 2,345 | 317 |
gh_patches_debug_31439 | rasdani/github-patches | git_diff | ethereum__consensus-specs-758 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Miscellaneous beacon chain changes—take 4
(See #128, #218, #322 for takes 1, 2, 3.)
Below is a list of miscellaneous suggestions for phase 0, most of which were discussed on the researcher's call on Feb 19. This issue keeps track of some of the phase 0 work remaining.
- [x] 1. **Friendlier GENESIS_SLOT**: Implemented in #655.
- [x] 2. **Granular state roots**: Expose state roots at every slot. Implemented in #649.
- [x] 3. **Previous block root reconstruction**: Provide enough information in `state` to reconstruct the current block's `previous_block_root`. Implemented in #649.
- [x] 4. **Define genesis Eth1 data**: Implemented in #649.
- [x] 5. **Mandatory deposits**: Mandatory processing of pending deposits.
- [x] 6. **Transfers during pre-activation**: Allow not-yet-activated validators to make transfers.
- [x] 7. **LMD GHOST tie breaker**: Compare block hashes to tie-break LMD GHOST.
- [ ] 8. **Maximum reversions**: Enshrine dynamic weak subjectivity revert period. See #577.
- [x] 9. **Double justifications**: Specify fork choice rule when there are two justified blocks at the same height. (Possible solution: ignore both and fallback to the previous highest justified block.)
- [x] 10. **Move to SHA256**: See #612.
- [ ] 11. **Standardise BLS12-381**: See #605.
- [ ] 12. **Performance parameters tuning**: Fine-tune `SECONDS_PER_SLOT`, `SHARD_COUNT`, etc. based on benchmarks.
- [ ] 13a. **Increase proposer rewards**: See #621. Need to check incentive compatibility with inclusion distance reward.
- [x] 13b. **Incentive-compatible proposer rewards**: Make proposer rewards proportional to balance.
- [ ] 14. **Increase rewards in general**: Calculate theoretical max issuance rate and work backwards to expected issuance.
- [x] 15. **Reduce SSZ_CHUNK_SIZE to 32**: See #603 and #696.
- [x] 16. **SSZ tuples**: See #665 and #696.
- [x] <s>17. **Immediately withdrawable if bad proof of possession**: See #657.</s>
- [x] 18. **4-byte working balance**: See #685.
- [x] 19. **Merkleisation-friendly pending attestations**: See #697.
- [ ] 20. **Fine-tune container field ordering**: To do with constants fine-tuning.
- [x] 21. **Minimum activation period**: See [here](https://github.com/ethereum/eth2.0-specs/issues/675#issuecomment-468159678) and [here](https://github.com/ethereum/eth2.0-specs/pull/746).
- [x] 22. **Milder ejections**: Replace `exit_validator` by `initiate_validator_exit` in `process_ejections`.
- [x] 23. **Improved rate limiting**: Change the rate limiting logic (for entry/exit/withdrawal) based on [this Ethresear.ch post](https://ethresear.ch/t/rate-limiting-entry-exits-not-withdrawals/4942).
- [x] 24. **Epoch transitions at start of epoch**: Instead of at the very end of the epoch.
- [x] 25. **Epoch-based proposer slashing**: As opposed to slot-based.
- [x] 26. **Genesis epochs**: Use `GENESIS_EPOCH - 1` for `previous_shuffling_epoch` and maybe `previous_shuffling_epoch`.
- [x] <s>27. **No backfilling of latest_active_index_roots**: Only set the active index root for the first slot.</s>
- [x] 28. <s>**`start_shard` offsets**: For fairer crosslinking latency across shards.</s>
- [x] 29. **Remove deposit timestamps and `DepositData`**: See #760.
- [x] 30. **Fair proposer sampling**: See #733.
- [ ] 31. **Slashed validators and LMD GHOST**: Should attestations from slashed validators be ignored in LMD GHOST?
- [x] 32. **Incentives simplification**: Simplification of the rewards and penalties.
- [ ] 33. **Exit fee**: See [here](https://github.com/ethereum/eth2.0-specs/pull/850#issuecomment-478068655).
- [x] 34. **GENESIS_SLOT == 0**: From Danny.
- [ ] 35. **Incentive-compatible crosslink rewards**: Proportional to amount of crosslink data.
- [ ] 36. **No phase 0 transfers**: Push transfers to phase 1 so that no economically meaningful activity happens during phase 0. This allows for phase 0 (a "testnet") to be rebooted if things go horribly wrong.
- [ ] 37. **Explicit genesis deposits**: Put genesis deposits in `block.body.deposits`.
- [x] 38. **Remove serialization from consensus**: See #924.
- [ ] 39. **Do not store withdrawal credentials**: See #937.
- [ ] 40. **Increase SECONDS_PER_SLOT and remove MIN_ATTESTATION_INCLUSION_DELAY**: The idea is to set different `SECONDS_PER_BEACON_SLOT`and `SECONDS_PER_SHARD_SLOT`, e.g. to 8/4, 12/3 or 16/4.
- [ ] 41. **The slotering**: Remove various unnecessary slots and replace by epochs where appropriate. (Justin surprise cleanup.)
- [x] 42. **Graffiti**: 32-byte arbitrary data in blocks
- [ ] 43. **Merge historical stats**: In particular, merge constants under "State list lengths".
- [ ] 44. **Improve epoch processing**: See #1043.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `utils/phase0/state_transition.py`
Content:
```
1 from . import spec
2
3
4 from typing import ( # noqa: F401
5 Any,
6 Callable,
7 List,
8 NewType,
9 Tuple,
10 )
11
12 from .spec import (
13 BeaconState,
14 BeaconBlock,
15 )
16
17
18 def process_transaction_type(state: BeaconState,
19 transactions: List[Any],
20 max_transactions: int,
21 tx_fn: Callable[[BeaconState, Any], None]) -> None:
22 assert len(transactions) <= max_transactions
23 for transaction in transactions:
24 tx_fn(state, transaction)
25
26
27 def process_transactions(state: BeaconState, block: BeaconBlock) -> None:
28 process_transaction_type(
29 state,
30 block.body.proposer_slashings,
31 spec.MAX_PROPOSER_SLASHINGS,
32 spec.process_proposer_slashing,
33 )
34 process_transaction_type(
35 state,
36 block.body.attester_slashings,
37 spec.MAX_ATTESTER_SLASHINGS,
38 spec.process_attester_slashing,
39 )
40 process_transaction_type(
41 state,
42 block.body.attestations,
43 spec.MAX_ATTESTATIONS,
44 spec.process_attestation,
45 )
46 process_transaction_type(
47 state,
48 block.body.deposits,
49 spec.MAX_DEPOSITS,
50 spec.process_deposit,
51 )
52 process_transaction_type(
53 state,
54 block.body.voluntary_exits,
55 spec.MAX_VOLUNTARY_EXITS,
56 spec.process_voluntary_exit,
57 )
58 assert len(block.body.transfers) == len(set(block.body.transfers))
59 process_transaction_type(
60 state,
61 block.body.transfers,
62 spec.MAX_TRANSFERS,
63 spec.process_transfer,
64 )
65
66
67 def process_block(state: BeaconState,
68 block: BeaconBlock,
69 verify_state_root: bool=False) -> None:
70 spec.process_block_header(state, block)
71 spec.process_randao(state, block)
72 spec.process_eth1_data(state, block)
73
74 process_transactions(state, block)
75 if verify_state_root:
76 spec.verify_block_state_root(state, block)
77
78
79 def process_epoch_transition(state: BeaconState) -> None:
80 spec.update_justification_and_finalization(state)
81 spec.process_crosslinks(state)
82 spec.maybe_reset_eth1_period(state)
83 spec.apply_rewards(state)
84 spec.process_ejections(state)
85 spec.update_registry_and_shuffling_data(state)
86 spec.process_slashings(state)
87 spec.process_exit_queue(state)
88 spec.finish_epoch_update(state)
89
90
91 def state_transition(state: BeaconState,
92 block: BeaconBlock,
93 verify_state_root: bool=False) -> BeaconState:
94 while state.slot < block.slot:
95 spec.cache_state(state)
96 if (state.slot + 1) % spec.SLOTS_PER_EPOCH == 0:
97 process_epoch_transition(state)
98 spec.advance_slot(state)
99 if block.slot == state.slot:
100 process_block(state, block, verify_state_root)
101
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/utils/phase0/state_transition.py b/utils/phase0/state_transition.py
--- a/utils/phase0/state_transition.py
+++ b/utils/phase0/state_transition.py
@@ -15,6 +15,13 @@
)
+def expected_deposit_count(state: BeaconState) -> int:
+ return min(
+ spec.MAX_DEPOSITS,
+ state.latest_eth1_data.deposit_count - state.deposit_index
+ )
+
+
def process_transaction_type(state: BeaconState,
transactions: List[Any],
max_transactions: int,
@@ -31,30 +38,36 @@
spec.MAX_PROPOSER_SLASHINGS,
spec.process_proposer_slashing,
)
+
process_transaction_type(
state,
block.body.attester_slashings,
spec.MAX_ATTESTER_SLASHINGS,
spec.process_attester_slashing,
)
+
process_transaction_type(
state,
block.body.attestations,
spec.MAX_ATTESTATIONS,
spec.process_attestation,
)
+
+ assert len(block.body.deposits) == expected_deposit_count(state)
process_transaction_type(
state,
block.body.deposits,
spec.MAX_DEPOSITS,
spec.process_deposit,
)
+
process_transaction_type(
state,
block.body.voluntary_exits,
spec.MAX_VOLUNTARY_EXITS,
spec.process_voluntary_exit,
)
+
assert len(block.body.transfers) == len(set(block.body.transfers))
process_transaction_type(
state,
| {"golden_diff": "diff --git a/utils/phase0/state_transition.py b/utils/phase0/state_transition.py\n--- a/utils/phase0/state_transition.py\n+++ b/utils/phase0/state_transition.py\n@@ -15,6 +15,13 @@\n )\n \n \n+def expected_deposit_count(state: BeaconState) -> int:\n+ return min(\n+ spec.MAX_DEPOSITS,\n+ state.latest_eth1_data.deposit_count - state.deposit_index\n+ )\n+\n+\n def process_transaction_type(state: BeaconState,\n transactions: List[Any],\n max_transactions: int,\n@@ -31,30 +38,36 @@\n spec.MAX_PROPOSER_SLASHINGS,\n spec.process_proposer_slashing,\n )\n+\n process_transaction_type(\n state,\n block.body.attester_slashings,\n spec.MAX_ATTESTER_SLASHINGS,\n spec.process_attester_slashing,\n )\n+\n process_transaction_type(\n state,\n block.body.attestations,\n spec.MAX_ATTESTATIONS,\n spec.process_attestation,\n )\n+\n+ assert len(block.body.deposits) == expected_deposit_count(state)\n process_transaction_type(\n state,\n block.body.deposits,\n spec.MAX_DEPOSITS,\n spec.process_deposit,\n )\n+\n process_transaction_type(\n state,\n block.body.voluntary_exits,\n spec.MAX_VOLUNTARY_EXITS,\n spec.process_voluntary_exit,\n )\n+\n assert len(block.body.transfers) == len(set(block.body.transfers))\n process_transaction_type(\n state,\n", "issue": "Miscellaneous beacon chain changes\u2014take 4\n(See #128, #218, #322 for takes 1, 2, 3.)\r\n\r\nBelow is a list of miscellaneous suggestions for phase 0, most of which were discussed on the researcher's call on Feb 19. This issue keeps track of some of the phase 0 work remaining.\r\n\r\n- [x] 1. **Friendlier GENESIS_SLOT**: Implemented in #655.\r\n- [x] 2. **Granular state roots**: Expose state roots at every slot. Implemented in #649.\r\n- [x] 3. **Previous block root reconstruction**: Provide enough information in `state` to reconstruct the current block's `previous_block_root`. Implemented in #649.\r\n- [x] 4. **Define genesis Eth1 data**: Implemented in #649.\r\n- [x] 5. **Mandatory deposits**: Mandatory processing of pending deposits.\r\n- [x] 6. **Transfers during pre-activation**: Allow not-yet-activated validators to make transfers.\r\n- [x] 7. **LMD GHOST tie breaker**: Compare block hashes to tie-break LMD GHOST.\r\n- [ ] 8. **Maximum reversions**: Enshrine dynamic weak subjectivity revert period. See #577.\r\n- [x] 9. **Double justifications**: Specify fork choice rule when there are two justified blocks at the same height. (Possible solution: ignore both and fallback to the previous highest justified block.)\r\n- [x] 10. **Move to SHA256**: See #612.\r\n- [ ] 11. **Standardise BLS12-381**: See #605.\r\n- [ ] 12. **Performance parameters tuning**: Fine-tune `SECONDS_PER_SLOT`, `SHARD_COUNT`, etc. based on benchmarks.\r\n- [ ] 13a. **Increase proposer rewards**: See #621. Need to check incentive compatibility with inclusion distance reward.\r\n- [x] 13b. **Incentive-compatible proposer rewards**: Make proposer rewards proportional to balance. \r\n- [ ] 14. **Increase rewards in general**: Calculate theoretical max issuance rate and work backwards to expected issuance.\r\n- [x] 15. **Reduce SSZ_CHUNK_SIZE to 32**: See #603 and #696.\r\n- [x] 16. **SSZ tuples**: See #665 and #696.\r\n- [x] <s>17. **Immediately withdrawable if bad proof of possession**: See #657.</s>\r\n- [x] 18. **4-byte working balance**: See #685.\r\n- [x] 19. **Merkleisation-friendly pending attestations**: See #697.\r\n- [ ] 20. **Fine-tune container field ordering**: To do with constants fine-tuning.\r\n- [x] 21. **Minimum activation period**: See [here](https://github.com/ethereum/eth2.0-specs/issues/675#issuecomment-468159678) and [here](https://github.com/ethereum/eth2.0-specs/pull/746).\r\n- [x] 22. **Milder ejections**: Replace `exit_validator` by `initiate_validator_exit` in `process_ejections`.\r\n- [x] 23. **Improved rate limiting**: Change the rate limiting logic (for entry/exit/withdrawal) based on [this Ethresear.ch post](https://ethresear.ch/t/rate-limiting-entry-exits-not-withdrawals/4942).\r\n- [x] 24. **Epoch transitions at start of epoch**: Instead of at the very end of the epoch.\r\n- [x] 25. **Epoch-based proposer slashing**: As opposed to slot-based.\r\n- [x] 26. **Genesis epochs**: Use `GENESIS_EPOCH - 1` for `previous_shuffling_epoch` and maybe `previous_shuffling_epoch`.\r\n- [x] <s>27. **No backfilling of latest_active_index_roots**: Only set the active index root for the first slot.</s>\r\n- [x] 28. <s>**`start_shard` offsets**: For fairer crosslinking latency across shards.</s>\r\n- [x] 29. **Remove deposit timestamps and `DepositData`**: See #760.\r\n- [x] 30. **Fair proposer sampling**: See #733.\r\n- [ ] 31. **Slashed validators and LMD GHOST**: Should attestations from slashed validators be ignored in LMD GHOST?\r\n- [x] 32. **Incentives simplification**: Simplification of the rewards and penalties.\r\n- [ ] 33. **Exit fee**: See [here](https://github.com/ethereum/eth2.0-specs/pull/850#issuecomment-478068655).\r\n- [x] 34. **GENESIS_SLOT == 0**: From Danny.\r\n- [ ] 35. **Incentive-compatible crosslink rewards**: Proportional to amount of crosslink data.\r\n- [ ] 36. **No phase 0 transfers**: Push transfers to phase 1 so that no economically meaningful activity happens during phase 0. This allows for phase 0 (a \"testnet\") to be rebooted if things go horribly wrong.\r\n- [ ] 37. **Explicit genesis deposits**: Put genesis deposits in `block.body.deposits`.\r\n- [x] 38. **Remove serialization from consensus**: See #924.\r\n- [ ] 39. **Do not store withdrawal credentials**: See #937.\r\n- [ ] 40. **Increase SECONDS_PER_SLOT and remove MIN_ATTESTATION_INCLUSION_DELAY**: The idea is to set different `SECONDS_PER_BEACON_SLOT`and `SECONDS_PER_SHARD_SLOT`, e.g. to 8/4, 12/3 or 16/4.\r\n- [ ] 41. **The slotering**: Remove various unnecessary slots and replace by epochs where appropriate. (Justin surprise cleanup.)\r\n- [x] 42. **Graffiti**: 32-byte arbitrary data in blocks\r\n- [ ] 43. **Merge historical stats**: In particular, merge constants under \"State list lengths\".\r\n- [ ] 44. **Improve epoch processing**: See #1043.\n", "before_files": [{"content": "from . import spec\n\n\nfrom typing import ( # noqa: F401\n Any,\n Callable,\n List,\n NewType,\n Tuple,\n)\n\nfrom .spec import (\n BeaconState,\n BeaconBlock,\n)\n\n\ndef process_transaction_type(state: BeaconState,\n transactions: List[Any],\n max_transactions: int,\n tx_fn: Callable[[BeaconState, Any], None]) -> None:\n assert len(transactions) <= max_transactions\n for transaction in transactions:\n tx_fn(state, transaction)\n\n\ndef process_transactions(state: BeaconState, block: BeaconBlock) -> None:\n process_transaction_type(\n state,\n block.body.proposer_slashings,\n spec.MAX_PROPOSER_SLASHINGS,\n spec.process_proposer_slashing,\n )\n process_transaction_type(\n state,\n block.body.attester_slashings,\n spec.MAX_ATTESTER_SLASHINGS,\n spec.process_attester_slashing,\n )\n process_transaction_type(\n state,\n block.body.attestations,\n spec.MAX_ATTESTATIONS,\n spec.process_attestation,\n )\n process_transaction_type(\n state,\n block.body.deposits,\n spec.MAX_DEPOSITS,\n spec.process_deposit,\n )\n process_transaction_type(\n state,\n block.body.voluntary_exits,\n spec.MAX_VOLUNTARY_EXITS,\n spec.process_voluntary_exit,\n )\n assert len(block.body.transfers) == len(set(block.body.transfers))\n process_transaction_type(\n state,\n block.body.transfers,\n spec.MAX_TRANSFERS,\n spec.process_transfer,\n )\n\n\ndef process_block(state: BeaconState,\n block: BeaconBlock,\n verify_state_root: bool=False) -> None:\n spec.process_block_header(state, block)\n spec.process_randao(state, block)\n spec.process_eth1_data(state, block)\n\n process_transactions(state, block)\n if verify_state_root:\n spec.verify_block_state_root(state, block)\n\n\ndef process_epoch_transition(state: BeaconState) -> None:\n spec.update_justification_and_finalization(state)\n spec.process_crosslinks(state)\n spec.maybe_reset_eth1_period(state)\n spec.apply_rewards(state)\n spec.process_ejections(state)\n spec.update_registry_and_shuffling_data(state)\n spec.process_slashings(state)\n spec.process_exit_queue(state)\n spec.finish_epoch_update(state)\n\n\ndef state_transition(state: BeaconState,\n block: BeaconBlock,\n verify_state_root: bool=False) -> BeaconState:\n while state.slot < block.slot:\n spec.cache_state(state)\n if (state.slot + 1) % spec.SLOTS_PER_EPOCH == 0:\n process_epoch_transition(state)\n spec.advance_slot(state)\n if block.slot == state.slot:\n process_block(state, block, verify_state_root)\n", "path": "utils/phase0/state_transition.py"}], "after_files": [{"content": "from . import spec\n\n\nfrom typing import ( # noqa: F401\n Any,\n Callable,\n List,\n NewType,\n Tuple,\n)\n\nfrom .spec import (\n BeaconState,\n BeaconBlock,\n)\n\n\ndef expected_deposit_count(state: BeaconState) -> int:\n return min(\n spec.MAX_DEPOSITS,\n state.latest_eth1_data.deposit_count - state.deposit_index\n )\n\n\ndef process_transaction_type(state: BeaconState,\n transactions: List[Any],\n max_transactions: int,\n tx_fn: Callable[[BeaconState, Any], None]) -> None:\n assert len(transactions) <= max_transactions\n for transaction in transactions:\n tx_fn(state, transaction)\n\n\ndef process_transactions(state: BeaconState, block: BeaconBlock) -> None:\n process_transaction_type(\n state,\n block.body.proposer_slashings,\n spec.MAX_PROPOSER_SLASHINGS,\n spec.process_proposer_slashing,\n )\n\n process_transaction_type(\n state,\n block.body.attester_slashings,\n spec.MAX_ATTESTER_SLASHINGS,\n spec.process_attester_slashing,\n )\n\n process_transaction_type(\n state,\n block.body.attestations,\n spec.MAX_ATTESTATIONS,\n spec.process_attestation,\n )\n\n assert len(block.body.deposits) == expected_deposit_count(state)\n process_transaction_type(\n state,\n block.body.deposits,\n spec.MAX_DEPOSITS,\n spec.process_deposit,\n )\n\n process_transaction_type(\n state,\n block.body.voluntary_exits,\n spec.MAX_VOLUNTARY_EXITS,\n spec.process_voluntary_exit,\n )\n\n assert len(block.body.transfers) == len(set(block.body.transfers))\n process_transaction_type(\n state,\n block.body.transfers,\n spec.MAX_TRANSFERS,\n spec.process_transfer,\n )\n\n\ndef process_block(state: BeaconState,\n block: BeaconBlock,\n verify_state_root: bool=False) -> None:\n spec.process_block_header(state, block)\n spec.process_randao(state, block)\n spec.process_eth1_data(state, block)\n\n process_transactions(state, block)\n if verify_state_root:\n spec.verify_block_state_root(state, block)\n\n\ndef process_epoch_transition(state: BeaconState) -> None:\n spec.update_justification_and_finalization(state)\n spec.process_crosslinks(state)\n spec.maybe_reset_eth1_period(state)\n spec.apply_rewards(state)\n spec.process_ejections(state)\n spec.update_registry_and_shuffling_data(state)\n spec.process_slashings(state)\n spec.process_exit_queue(state)\n spec.finish_epoch_update(state)\n\n\ndef state_transition(state: BeaconState,\n block: BeaconBlock,\n verify_state_root: bool=False) -> BeaconState:\n while state.slot < block.slot:\n spec.cache_state(state)\n if (state.slot + 1) % spec.SLOTS_PER_EPOCH == 0:\n process_epoch_transition(state)\n spec.advance_slot(state)\n if block.slot == state.slot:\n process_block(state, block, verify_state_root)\n", "path": "utils/phase0/state_transition.py"}]} | 2,487 | 332 |
gh_patches_debug_26667 | rasdani/github-patches | git_diff | getsentry__sentry-24151 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Accessibility Issues with User Feedback Widget
<!-- Requirements: please go through this checklist before opening a new issue -->
- [x] Review the documentation: https://docs.sentry.io/
- [x] Search for existing issues: https://github.com/getsentry/sentry-javascript/issues
- [x] Use the latest release: https://github.com/getsentry/sentry-javascript/releases
- [x] Provide a link to the affected event from your Sentry account <- Not applicable
## Package + Version
- [x] `@sentry/browser`
- [ ] `@sentry/node`
- [ ] `raven-js`
- [ ] `raven-node` _(raven for node)_
- [ ] other:
### Version:
```
5.29.2
```
## Description
The dialog opened by Sentry.showReportDialog is not accessible. See the [WAI-ARIA Authoring Practices guidance on accessible modals](https://www.w3.org/TR/wai-aria-practices-1.1/#dialog_modal).
Some specific issues which need to be fixed:
- `Tab` and `Shift` + `Tab` should not move focus to elements outside the modal (they currently can)
- When the modal loads, the first input of the form should receive focus (currently nothing in the modal is focused when the modal loads)
- The "Close" button is rendered using an anchor tag without an `href` attribute. As a result it is not in the tab sequence and keyboard users are not able to use it. To fix this, a `button` element should be used instead. Since the element does not cause navigation, the `button` element will also have the proper semantics and will improve the experience for users of assistive technology.
- The outermost div of the dialog needs `role` set to `dialog`
- The outermost div of the dialog needs `aria-modal` set to `true`
- The outermost div of the dialog needs `aria-labelledby` set to the `id` of the modal's h2
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/sentry/web/frontend/error_page_embed.py`
Content:
```
1 from django import forms
2 from django.db import IntegrityError, transaction
3 from django.http import HttpResponse
4 from django.views.generic import View
5 from django.utils import timezone
6 from django.utils.safestring import mark_safe
7 from django.utils.translation import ugettext_lazy as _
8 from django.views.decorators.csrf import csrf_exempt
9
10 from sentry import eventstore
11 from sentry.models import Project, ProjectKey, ProjectOption, UserReport
12 from sentry.web.helpers import render_to_response, render_to_string
13 from sentry.signals import user_feedback_received
14 from sentry.utils import json
15 from sentry.utils.http import absolute_uri, is_valid_origin, origin_from_request
16 from sentry.utils.validators import normalize_event_id
17
18 GENERIC_ERROR = _("An unknown error occurred while submitting your report. Please try again.")
19 FORM_ERROR = _("Some fields were invalid. Please correct the errors and try again.")
20 SENT_MESSAGE = _("Your feedback has been sent. Thank you!")
21
22 DEFAULT_TITLE = _("It looks like we're having issues.")
23 DEFAULT_SUBTITLE = _("Our team has been notified.")
24 DEFAULT_SUBTITLE2 = _("If you'd like to help, tell us what happened below.")
25
26 DEFAULT_NAME_LABEL = _("Name")
27 DEFAULT_EMAIL_LABEL = _("Email")
28 DEFAULT_COMMENTS_LABEL = _("What happened?")
29
30 DEFAULT_CLOSE_LABEL = _("Close")
31 DEFAULT_SUBMIT_LABEL = _("Submit Crash Report")
32
33 DEFAULT_OPTIONS = {
34 "title": DEFAULT_TITLE,
35 "subtitle": DEFAULT_SUBTITLE,
36 "subtitle2": DEFAULT_SUBTITLE2,
37 "labelName": DEFAULT_NAME_LABEL,
38 "labelEmail": DEFAULT_EMAIL_LABEL,
39 "labelComments": DEFAULT_COMMENTS_LABEL,
40 "labelClose": DEFAULT_CLOSE_LABEL,
41 "labelSubmit": DEFAULT_SUBMIT_LABEL,
42 "errorGeneric": GENERIC_ERROR,
43 "errorFormEntry": FORM_ERROR,
44 "successMessage": SENT_MESSAGE,
45 }
46
47
48 class UserReportForm(forms.ModelForm):
49 name = forms.CharField(
50 max_length=128, widget=forms.TextInput(attrs={"placeholder": _("Jane Bloggs")})
51 )
52 email = forms.EmailField(
53 max_length=75,
54 widget=forms.TextInput(attrs={"placeholder": _("[email protected]"), "type": "email"}),
55 )
56 comments = forms.CharField(
57 widget=forms.Textarea(attrs={"placeholder": _("I clicked on 'X' and then hit 'Confirm'")})
58 )
59
60 class Meta:
61 model = UserReport
62 fields = ("name", "email", "comments")
63
64
65 class ErrorPageEmbedView(View):
66 def _get_project_key(self, request):
67 try:
68 dsn = request.GET["dsn"]
69 except KeyError:
70 return
71
72 try:
73 key = ProjectKey.from_dsn(dsn)
74 except ProjectKey.DoesNotExist:
75 return
76
77 return key
78
79 def _get_origin(self, request):
80 return origin_from_request(request)
81
82 def _smart_response(self, request, context=None, status=200):
83 json_context = json.dumps(context or {})
84 accept = request.META.get("HTTP_ACCEPT") or ""
85 if "text/javascript" in accept:
86 content_type = "text/javascript"
87 content = ""
88 else:
89 content_type = "application/json"
90 content = json_context
91 response = HttpResponse(content, status=status, content_type=content_type)
92 response["Access-Control-Allow-Origin"] = request.META.get("HTTP_ORIGIN", "")
93 response["Access-Control-Allow-Methods"] = "GET, POST, OPTIONS"
94 response["Access-Control-Max-Age"] = "1000"
95 response["Access-Control-Allow-Headers"] = "Content-Type, Authorization, X-Requested-With"
96 response["Vary"] = "Accept"
97 if content == "" and context:
98 response["X-Sentry-Context"] = json_context
99 return response
100
101 @csrf_exempt
102 def dispatch(self, request):
103 try:
104 event_id = request.GET["eventId"]
105 except KeyError:
106 return self._smart_response(
107 request, {"eventId": "Missing or invalid parameter."}, status=400
108 )
109
110 normalized_event_id = normalize_event_id(event_id)
111 if normalized_event_id:
112 event_id = normalized_event_id
113 elif event_id:
114 return self._smart_response(
115 request, {"eventId": "Missing or invalid parameter."}, status=400
116 )
117
118 key = self._get_project_key(request)
119 if not key:
120 return self._smart_response(
121 request, {"dsn": "Missing or invalid parameter."}, status=404
122 )
123
124 origin = self._get_origin(request)
125 if not is_valid_origin(origin, key.project):
126 return self._smart_response(request, status=403)
127
128 if request.method == "OPTIONS":
129 return self._smart_response(request)
130
131 # customization options
132 options = DEFAULT_OPTIONS.copy()
133 for name in options.keys():
134 if name in request.GET:
135 options[name] = str(request.GET[name])
136
137 # TODO(dcramer): since we cant use a csrf cookie we should at the very
138 # least sign the request / add some kind of nonce
139 initial = {"name": request.GET.get("name"), "email": request.GET.get("email")}
140
141 form = UserReportForm(request.POST if request.method == "POST" else None, initial=initial)
142 if form.is_valid():
143 # TODO(dcramer): move this to post to the internal API
144 report = form.save(commit=False)
145 report.project_id = key.project_id
146 report.event_id = event_id
147
148 event = eventstore.get_event_by_id(report.project_id, report.event_id)
149
150 if event is not None:
151 report.environment_id = event.get_environment().id
152 report.group_id = event.group_id
153
154 try:
155 with transaction.atomic():
156 report.save()
157 except IntegrityError:
158 # There was a duplicate, so just overwrite the existing
159 # row with the new one. The only way this ever happens is
160 # if someone is messing around with the API, or doing
161 # something wrong with the SDK, but this behavior is
162 # more reasonable than just hard erroring and is more
163 # expected.
164 UserReport.objects.filter(
165 project_id=report.project_id, event_id=report.event_id
166 ).update(
167 name=report.name,
168 email=report.email,
169 comments=report.comments,
170 date_added=timezone.now(),
171 )
172
173 else:
174 if report.group_id:
175 report.notify()
176
177 user_feedback_received.send(
178 project=Project.objects.get(id=report.project_id),
179 sender=self,
180 )
181
182 return self._smart_response(request)
183 elif request.method == "POST":
184 return self._smart_response(request, {"errors": dict(form.errors)}, status=400)
185
186 show_branding = (
187 ProjectOption.objects.get_value(
188 project=key.project, key="feedback:branding", default="1"
189 )
190 == "1"
191 )
192
193 template = render_to_string(
194 "sentry/error-page-embed.html",
195 context={
196 "form": form,
197 "show_branding": show_branding,
198 "title": options["title"],
199 "subtitle": options["subtitle"],
200 "subtitle2": options["subtitle2"],
201 "name_label": options["labelName"],
202 "email_label": options["labelEmail"],
203 "comments_label": options["labelComments"],
204 "submit_label": options["labelSubmit"],
205 "close_label": options["labelClose"],
206 },
207 )
208
209 context = {
210 "endpoint": mark_safe("*/" + json.dumps(absolute_uri(request.get_full_path())) + ";/*"),
211 "template": mark_safe("*/" + json.dumps(template) + ";/*"),
212 "strings": json.dumps_htmlsafe(
213 {
214 "generic_error": str(options["errorGeneric"]),
215 "form_error": str(options["errorFormEntry"]),
216 "sent_message": str(options["successMessage"]),
217 }
218 ),
219 }
220
221 return render_to_response(
222 "sentry/error-page-embed.js", context, request, content_type="text/javascript"
223 )
224
```
Path: `src/sentry/web/frontend/debug/debug_error_embed.py`
Content:
```
1 from django.conf import settings
2 from django.views.generic import View
3 from urllib.parse import urlencode
4
5 from sentry.models import ProjectKey
6 from sentry.web.helpers import render_to_response
7
8
9 class DebugErrorPageEmbedView(View):
10 def _get_project_key(self):
11 return ProjectKey.objects.filter(project=settings.SENTRY_PROJECT)[0]
12
13 def get(self, request):
14 context = {
15 "query_params": urlencode(
16 {
17 "dsn": self._get_project_key().dsn_public,
18 "event_id": "342a3d7f690a49f8bd7c4cf0e61a9ded",
19 "options": dict(**request.GET),
20 }
21 )
22 }
23
24 return render_to_response("sentry/debug/error-page-embed.html", context, request)
25
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/sentry/web/frontend/debug/debug_error_embed.py b/src/sentry/web/frontend/debug/debug_error_embed.py
--- a/src/sentry/web/frontend/debug/debug_error_embed.py
+++ b/src/sentry/web/frontend/debug/debug_error_embed.py
@@ -15,8 +15,8 @@
"query_params": urlencode(
{
"dsn": self._get_project_key().dsn_public,
- "event_id": "342a3d7f690a49f8bd7c4cf0e61a9ded",
- "options": dict(**request.GET),
+ "eventId": "342a3d7f690a49f8bd7c4cf0e61a9ded",
+ **request.GET,
}
)
}
diff --git a/src/sentry/web/frontend/error_page_embed.py b/src/sentry/web/frontend/error_page_embed.py
--- a/src/sentry/web/frontend/error_page_embed.py
+++ b/src/sentry/web/frontend/error_page_embed.py
@@ -209,12 +209,16 @@
context = {
"endpoint": mark_safe("*/" + json.dumps(absolute_uri(request.get_full_path())) + ";/*"),
"template": mark_safe("*/" + json.dumps(template) + ";/*"),
- "strings": json.dumps_htmlsafe(
- {
- "generic_error": str(options["errorGeneric"]),
- "form_error": str(options["errorFormEntry"]),
- "sent_message": str(options["successMessage"]),
- }
+ "strings": mark_safe(
+ "*/"
+ + json.dumps_htmlsafe(
+ {
+ "generic_error": str(options["errorGeneric"]),
+ "form_error": str(options["errorFormEntry"]),
+ "sent_message": str(options["successMessage"]),
+ }
+ )
+ + ";/*"
),
}
| {"golden_diff": "diff --git a/src/sentry/web/frontend/debug/debug_error_embed.py b/src/sentry/web/frontend/debug/debug_error_embed.py\n--- a/src/sentry/web/frontend/debug/debug_error_embed.py\n+++ b/src/sentry/web/frontend/debug/debug_error_embed.py\n@@ -15,8 +15,8 @@\n \"query_params\": urlencode(\n {\n \"dsn\": self._get_project_key().dsn_public,\n- \"event_id\": \"342a3d7f690a49f8bd7c4cf0e61a9ded\",\n- \"options\": dict(**request.GET),\n+ \"eventId\": \"342a3d7f690a49f8bd7c4cf0e61a9ded\",\n+ **request.GET,\n }\n )\n }\ndiff --git a/src/sentry/web/frontend/error_page_embed.py b/src/sentry/web/frontend/error_page_embed.py\n--- a/src/sentry/web/frontend/error_page_embed.py\n+++ b/src/sentry/web/frontend/error_page_embed.py\n@@ -209,12 +209,16 @@\n context = {\n \"endpoint\": mark_safe(\"*/\" + json.dumps(absolute_uri(request.get_full_path())) + \";/*\"),\n \"template\": mark_safe(\"*/\" + json.dumps(template) + \";/*\"),\n- \"strings\": json.dumps_htmlsafe(\n- {\n- \"generic_error\": str(options[\"errorGeneric\"]),\n- \"form_error\": str(options[\"errorFormEntry\"]),\n- \"sent_message\": str(options[\"successMessage\"]),\n- }\n+ \"strings\": mark_safe(\n+ \"*/\"\n+ + json.dumps_htmlsafe(\n+ {\n+ \"generic_error\": str(options[\"errorGeneric\"]),\n+ \"form_error\": str(options[\"errorFormEntry\"]),\n+ \"sent_message\": str(options[\"successMessage\"]),\n+ }\n+ )\n+ + \";/*\"\n ),\n }\n", "issue": "Accessibility Issues with User Feedback Widget\n<!-- Requirements: please go through this checklist before opening a new issue -->\r\n\r\n- [x] Review the documentation: https://docs.sentry.io/\r\n- [x] Search for existing issues: https://github.com/getsentry/sentry-javascript/issues\r\n- [x] Use the latest release: https://github.com/getsentry/sentry-javascript/releases\r\n- [x] Provide a link to the affected event from your Sentry account <- Not applicable\r\n\r\n## Package + Version\r\n\r\n- [x] `@sentry/browser`\r\n- [ ] `@sentry/node`\r\n- [ ] `raven-js`\r\n- [ ] `raven-node` _(raven for node)_\r\n- [ ] other:\r\n\r\n### Version:\r\n\r\n```\r\n5.29.2\r\n```\r\n\r\n## Description\r\n\r\nThe dialog opened by Sentry.showReportDialog is not accessible. See the [WAI-ARIA Authoring Practices guidance on accessible modals](https://www.w3.org/TR/wai-aria-practices-1.1/#dialog_modal).\r\n\r\nSome specific issues which need to be fixed:\r\n\r\n- `Tab` and `Shift` + `Tab` should not move focus to elements outside the modal (they currently can)\r\n- When the modal loads, the first input of the form should receive focus (currently nothing in the modal is focused when the modal loads)\r\n- The \"Close\" button is rendered using an anchor tag without an `href` attribute. As a result it is not in the tab sequence and keyboard users are not able to use it. To fix this, a `button` element should be used instead. Since the element does not cause navigation, the `button` element will also have the proper semantics and will improve the experience for users of assistive technology.\r\n- The outermost div of the dialog needs `role` set to `dialog`\r\n- The outermost div of the dialog needs `aria-modal` set to `true`\r\n- The outermost div of the dialog needs `aria-labelledby` set to the `id` of the modal's h2\r\n\n", "before_files": [{"content": "from django import forms\nfrom django.db import IntegrityError, transaction\nfrom django.http import HttpResponse\nfrom django.views.generic import View\nfrom django.utils import timezone\nfrom django.utils.safestring import mark_safe\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.views.decorators.csrf import csrf_exempt\n\nfrom sentry import eventstore\nfrom sentry.models import Project, ProjectKey, ProjectOption, UserReport\nfrom sentry.web.helpers import render_to_response, render_to_string\nfrom sentry.signals import user_feedback_received\nfrom sentry.utils import json\nfrom sentry.utils.http import absolute_uri, is_valid_origin, origin_from_request\nfrom sentry.utils.validators import normalize_event_id\n\nGENERIC_ERROR = _(\"An unknown error occurred while submitting your report. Please try again.\")\nFORM_ERROR = _(\"Some fields were invalid. Please correct the errors and try again.\")\nSENT_MESSAGE = _(\"Your feedback has been sent. Thank you!\")\n\nDEFAULT_TITLE = _(\"It looks like we're having issues.\")\nDEFAULT_SUBTITLE = _(\"Our team has been notified.\")\nDEFAULT_SUBTITLE2 = _(\"If you'd like to help, tell us what happened below.\")\n\nDEFAULT_NAME_LABEL = _(\"Name\")\nDEFAULT_EMAIL_LABEL = _(\"Email\")\nDEFAULT_COMMENTS_LABEL = _(\"What happened?\")\n\nDEFAULT_CLOSE_LABEL = _(\"Close\")\nDEFAULT_SUBMIT_LABEL = _(\"Submit Crash Report\")\n\nDEFAULT_OPTIONS = {\n \"title\": DEFAULT_TITLE,\n \"subtitle\": DEFAULT_SUBTITLE,\n \"subtitle2\": DEFAULT_SUBTITLE2,\n \"labelName\": DEFAULT_NAME_LABEL,\n \"labelEmail\": DEFAULT_EMAIL_LABEL,\n \"labelComments\": DEFAULT_COMMENTS_LABEL,\n \"labelClose\": DEFAULT_CLOSE_LABEL,\n \"labelSubmit\": DEFAULT_SUBMIT_LABEL,\n \"errorGeneric\": GENERIC_ERROR,\n \"errorFormEntry\": FORM_ERROR,\n \"successMessage\": SENT_MESSAGE,\n}\n\n\nclass UserReportForm(forms.ModelForm):\n name = forms.CharField(\n max_length=128, widget=forms.TextInput(attrs={\"placeholder\": _(\"Jane Bloggs\")})\n )\n email = forms.EmailField(\n max_length=75,\n widget=forms.TextInput(attrs={\"placeholder\": _(\"[email protected]\"), \"type\": \"email\"}),\n )\n comments = forms.CharField(\n widget=forms.Textarea(attrs={\"placeholder\": _(\"I clicked on 'X' and then hit 'Confirm'\")})\n )\n\n class Meta:\n model = UserReport\n fields = (\"name\", \"email\", \"comments\")\n\n\nclass ErrorPageEmbedView(View):\n def _get_project_key(self, request):\n try:\n dsn = request.GET[\"dsn\"]\n except KeyError:\n return\n\n try:\n key = ProjectKey.from_dsn(dsn)\n except ProjectKey.DoesNotExist:\n return\n\n return key\n\n def _get_origin(self, request):\n return origin_from_request(request)\n\n def _smart_response(self, request, context=None, status=200):\n json_context = json.dumps(context or {})\n accept = request.META.get(\"HTTP_ACCEPT\") or \"\"\n if \"text/javascript\" in accept:\n content_type = \"text/javascript\"\n content = \"\"\n else:\n content_type = \"application/json\"\n content = json_context\n response = HttpResponse(content, status=status, content_type=content_type)\n response[\"Access-Control-Allow-Origin\"] = request.META.get(\"HTTP_ORIGIN\", \"\")\n response[\"Access-Control-Allow-Methods\"] = \"GET, POST, OPTIONS\"\n response[\"Access-Control-Max-Age\"] = \"1000\"\n response[\"Access-Control-Allow-Headers\"] = \"Content-Type, Authorization, X-Requested-With\"\n response[\"Vary\"] = \"Accept\"\n if content == \"\" and context:\n response[\"X-Sentry-Context\"] = json_context\n return response\n\n @csrf_exempt\n def dispatch(self, request):\n try:\n event_id = request.GET[\"eventId\"]\n except KeyError:\n return self._smart_response(\n request, {\"eventId\": \"Missing or invalid parameter.\"}, status=400\n )\n\n normalized_event_id = normalize_event_id(event_id)\n if normalized_event_id:\n event_id = normalized_event_id\n elif event_id:\n return self._smart_response(\n request, {\"eventId\": \"Missing or invalid parameter.\"}, status=400\n )\n\n key = self._get_project_key(request)\n if not key:\n return self._smart_response(\n request, {\"dsn\": \"Missing or invalid parameter.\"}, status=404\n )\n\n origin = self._get_origin(request)\n if not is_valid_origin(origin, key.project):\n return self._smart_response(request, status=403)\n\n if request.method == \"OPTIONS\":\n return self._smart_response(request)\n\n # customization options\n options = DEFAULT_OPTIONS.copy()\n for name in options.keys():\n if name in request.GET:\n options[name] = str(request.GET[name])\n\n # TODO(dcramer): since we cant use a csrf cookie we should at the very\n # least sign the request / add some kind of nonce\n initial = {\"name\": request.GET.get(\"name\"), \"email\": request.GET.get(\"email\")}\n\n form = UserReportForm(request.POST if request.method == \"POST\" else None, initial=initial)\n if form.is_valid():\n # TODO(dcramer): move this to post to the internal API\n report = form.save(commit=False)\n report.project_id = key.project_id\n report.event_id = event_id\n\n event = eventstore.get_event_by_id(report.project_id, report.event_id)\n\n if event is not None:\n report.environment_id = event.get_environment().id\n report.group_id = event.group_id\n\n try:\n with transaction.atomic():\n report.save()\n except IntegrityError:\n # There was a duplicate, so just overwrite the existing\n # row with the new one. The only way this ever happens is\n # if someone is messing around with the API, or doing\n # something wrong with the SDK, but this behavior is\n # more reasonable than just hard erroring and is more\n # expected.\n UserReport.objects.filter(\n project_id=report.project_id, event_id=report.event_id\n ).update(\n name=report.name,\n email=report.email,\n comments=report.comments,\n date_added=timezone.now(),\n )\n\n else:\n if report.group_id:\n report.notify()\n\n user_feedback_received.send(\n project=Project.objects.get(id=report.project_id),\n sender=self,\n )\n\n return self._smart_response(request)\n elif request.method == \"POST\":\n return self._smart_response(request, {\"errors\": dict(form.errors)}, status=400)\n\n show_branding = (\n ProjectOption.objects.get_value(\n project=key.project, key=\"feedback:branding\", default=\"1\"\n )\n == \"1\"\n )\n\n template = render_to_string(\n \"sentry/error-page-embed.html\",\n context={\n \"form\": form,\n \"show_branding\": show_branding,\n \"title\": options[\"title\"],\n \"subtitle\": options[\"subtitle\"],\n \"subtitle2\": options[\"subtitle2\"],\n \"name_label\": options[\"labelName\"],\n \"email_label\": options[\"labelEmail\"],\n \"comments_label\": options[\"labelComments\"],\n \"submit_label\": options[\"labelSubmit\"],\n \"close_label\": options[\"labelClose\"],\n },\n )\n\n context = {\n \"endpoint\": mark_safe(\"*/\" + json.dumps(absolute_uri(request.get_full_path())) + \";/*\"),\n \"template\": mark_safe(\"*/\" + json.dumps(template) + \";/*\"),\n \"strings\": json.dumps_htmlsafe(\n {\n \"generic_error\": str(options[\"errorGeneric\"]),\n \"form_error\": str(options[\"errorFormEntry\"]),\n \"sent_message\": str(options[\"successMessage\"]),\n }\n ),\n }\n\n return render_to_response(\n \"sentry/error-page-embed.js\", context, request, content_type=\"text/javascript\"\n )\n", "path": "src/sentry/web/frontend/error_page_embed.py"}, {"content": "from django.conf import settings\nfrom django.views.generic import View\nfrom urllib.parse import urlencode\n\nfrom sentry.models import ProjectKey\nfrom sentry.web.helpers import render_to_response\n\n\nclass DebugErrorPageEmbedView(View):\n def _get_project_key(self):\n return ProjectKey.objects.filter(project=settings.SENTRY_PROJECT)[0]\n\n def get(self, request):\n context = {\n \"query_params\": urlencode(\n {\n \"dsn\": self._get_project_key().dsn_public,\n \"event_id\": \"342a3d7f690a49f8bd7c4cf0e61a9ded\",\n \"options\": dict(**request.GET),\n }\n )\n }\n\n return render_to_response(\"sentry/debug/error-page-embed.html\", context, request)\n", "path": "src/sentry/web/frontend/debug/debug_error_embed.py"}], "after_files": [{"content": "from django import forms\nfrom django.db import IntegrityError, transaction\nfrom django.http import HttpResponse\nfrom django.views.generic import View\nfrom django.utils import timezone\nfrom django.utils.safestring import mark_safe\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.views.decorators.csrf import csrf_exempt\n\nfrom sentry import eventstore\nfrom sentry.models import Project, ProjectKey, ProjectOption, UserReport\nfrom sentry.web.helpers import render_to_response, render_to_string\nfrom sentry.signals import user_feedback_received\nfrom sentry.utils import json\nfrom sentry.utils.http import absolute_uri, is_valid_origin, origin_from_request\nfrom sentry.utils.validators import normalize_event_id\n\nGENERIC_ERROR = _(\"An unknown error occurred while submitting your report. Please try again.\")\nFORM_ERROR = _(\"Some fields were invalid. Please correct the errors and try again.\")\nSENT_MESSAGE = _(\"Your feedback has been sent. Thank you!\")\n\nDEFAULT_TITLE = _(\"It looks like we're having issues.\")\nDEFAULT_SUBTITLE = _(\"Our team has been notified.\")\nDEFAULT_SUBTITLE2 = _(\"If you'd like to help, tell us what happened below.\")\n\nDEFAULT_NAME_LABEL = _(\"Name\")\nDEFAULT_EMAIL_LABEL = _(\"Email\")\nDEFAULT_COMMENTS_LABEL = _(\"What happened?\")\n\nDEFAULT_CLOSE_LABEL = _(\"Close\")\nDEFAULT_SUBMIT_LABEL = _(\"Submit Crash Report\")\n\nDEFAULT_OPTIONS = {\n \"title\": DEFAULT_TITLE,\n \"subtitle\": DEFAULT_SUBTITLE,\n \"subtitle2\": DEFAULT_SUBTITLE2,\n \"labelName\": DEFAULT_NAME_LABEL,\n \"labelEmail\": DEFAULT_EMAIL_LABEL,\n \"labelComments\": DEFAULT_COMMENTS_LABEL,\n \"labelClose\": DEFAULT_CLOSE_LABEL,\n \"labelSubmit\": DEFAULT_SUBMIT_LABEL,\n \"errorGeneric\": GENERIC_ERROR,\n \"errorFormEntry\": FORM_ERROR,\n \"successMessage\": SENT_MESSAGE,\n}\n\n\nclass UserReportForm(forms.ModelForm):\n name = forms.CharField(\n max_length=128, widget=forms.TextInput(attrs={\"placeholder\": _(\"Jane Bloggs\")})\n )\n email = forms.EmailField(\n max_length=75,\n widget=forms.TextInput(attrs={\"placeholder\": _(\"[email protected]\"), \"type\": \"email\"}),\n )\n comments = forms.CharField(\n widget=forms.Textarea(attrs={\"placeholder\": _(\"I clicked on 'X' and then hit 'Confirm'\")})\n )\n\n class Meta:\n model = UserReport\n fields = (\"name\", \"email\", \"comments\")\n\n\nclass ErrorPageEmbedView(View):\n def _get_project_key(self, request):\n try:\n dsn = request.GET[\"dsn\"]\n except KeyError:\n return\n\n try:\n key = ProjectKey.from_dsn(dsn)\n except ProjectKey.DoesNotExist:\n return\n\n return key\n\n def _get_origin(self, request):\n return origin_from_request(request)\n\n def _smart_response(self, request, context=None, status=200):\n json_context = json.dumps(context or {})\n accept = request.META.get(\"HTTP_ACCEPT\") or \"\"\n if \"text/javascript\" in accept:\n content_type = \"text/javascript\"\n content = \"\"\n else:\n content_type = \"application/json\"\n content = json_context\n response = HttpResponse(content, status=status, content_type=content_type)\n response[\"Access-Control-Allow-Origin\"] = request.META.get(\"HTTP_ORIGIN\", \"\")\n response[\"Access-Control-Allow-Methods\"] = \"GET, POST, OPTIONS\"\n response[\"Access-Control-Max-Age\"] = \"1000\"\n response[\"Access-Control-Allow-Headers\"] = \"Content-Type, Authorization, X-Requested-With\"\n response[\"Vary\"] = \"Accept\"\n if content == \"\" and context:\n response[\"X-Sentry-Context\"] = json_context\n return response\n\n @csrf_exempt\n def dispatch(self, request):\n try:\n event_id = request.GET[\"eventId\"]\n except KeyError:\n return self._smart_response(\n request, {\"eventId\": \"Missing or invalid parameter.\"}, status=400\n )\n\n normalized_event_id = normalize_event_id(event_id)\n if normalized_event_id:\n event_id = normalized_event_id\n elif event_id:\n return self._smart_response(\n request, {\"eventId\": \"Missing or invalid parameter.\"}, status=400\n )\n\n key = self._get_project_key(request)\n if not key:\n return self._smart_response(\n request, {\"dsn\": \"Missing or invalid parameter.\"}, status=404\n )\n\n origin = self._get_origin(request)\n if not is_valid_origin(origin, key.project):\n return self._smart_response(request, status=403)\n\n if request.method == \"OPTIONS\":\n return self._smart_response(request)\n\n # customization options\n options = DEFAULT_OPTIONS.copy()\n for name in options.keys():\n if name in request.GET:\n options[name] = str(request.GET[name])\n\n # TODO(dcramer): since we cant use a csrf cookie we should at the very\n # least sign the request / add some kind of nonce\n initial = {\"name\": request.GET.get(\"name\"), \"email\": request.GET.get(\"email\")}\n\n form = UserReportForm(request.POST if request.method == \"POST\" else None, initial=initial)\n if form.is_valid():\n # TODO(dcramer): move this to post to the internal API\n report = form.save(commit=False)\n report.project_id = key.project_id\n report.event_id = event_id\n\n event = eventstore.get_event_by_id(report.project_id, report.event_id)\n\n if event is not None:\n report.environment_id = event.get_environment().id\n report.group_id = event.group_id\n\n try:\n with transaction.atomic():\n report.save()\n except IntegrityError:\n # There was a duplicate, so just overwrite the existing\n # row with the new one. The only way this ever happens is\n # if someone is messing around with the API, or doing\n # something wrong with the SDK, but this behavior is\n # more reasonable than just hard erroring and is more\n # expected.\n UserReport.objects.filter(\n project_id=report.project_id, event_id=report.event_id\n ).update(\n name=report.name,\n email=report.email,\n comments=report.comments,\n date_added=timezone.now(),\n )\n\n else:\n if report.group_id:\n report.notify()\n\n user_feedback_received.send(\n project=Project.objects.get(id=report.project_id),\n sender=self,\n )\n\n return self._smart_response(request)\n elif request.method == \"POST\":\n return self._smart_response(request, {\"errors\": dict(form.errors)}, status=400)\n\n show_branding = (\n ProjectOption.objects.get_value(\n project=key.project, key=\"feedback:branding\", default=\"1\"\n )\n == \"1\"\n )\n\n template = render_to_string(\n \"sentry/error-page-embed.html\",\n context={\n \"form\": form,\n \"show_branding\": show_branding,\n \"title\": options[\"title\"],\n \"subtitle\": options[\"subtitle\"],\n \"subtitle2\": options[\"subtitle2\"],\n \"name_label\": options[\"labelName\"],\n \"email_label\": options[\"labelEmail\"],\n \"comments_label\": options[\"labelComments\"],\n \"submit_label\": options[\"labelSubmit\"],\n \"close_label\": options[\"labelClose\"],\n },\n )\n\n context = {\n \"endpoint\": mark_safe(\"*/\" + json.dumps(absolute_uri(request.get_full_path())) + \";/*\"),\n \"template\": mark_safe(\"*/\" + json.dumps(template) + \";/*\"),\n \"strings\": mark_safe(\n \"*/\"\n + json.dumps_htmlsafe(\n {\n \"generic_error\": str(options[\"errorGeneric\"]),\n \"form_error\": str(options[\"errorFormEntry\"]),\n \"sent_message\": str(options[\"successMessage\"]),\n }\n )\n + \";/*\"\n ),\n }\n\n return render_to_response(\n \"sentry/error-page-embed.js\", context, request, content_type=\"text/javascript\"\n )\n", "path": "src/sentry/web/frontend/error_page_embed.py"}, {"content": "from django.conf import settings\nfrom django.views.generic import View\nfrom urllib.parse import urlencode\n\nfrom sentry.models import ProjectKey\nfrom sentry.web.helpers import render_to_response\n\n\nclass DebugErrorPageEmbedView(View):\n def _get_project_key(self):\n return ProjectKey.objects.filter(project=settings.SENTRY_PROJECT)[0]\n\n def get(self, request):\n context = {\n \"query_params\": urlencode(\n {\n \"dsn\": self._get_project_key().dsn_public,\n \"eventId\": \"342a3d7f690a49f8bd7c4cf0e61a9ded\",\n **request.GET,\n }\n )\n }\n\n return render_to_response(\"sentry/debug/error-page-embed.html\", context, request)\n", "path": "src/sentry/web/frontend/debug/debug_error_embed.py"}]} | 3,203 | 419 |
gh_patches_debug_41519 | rasdani/github-patches | git_diff | microsoft__Qcodes-1742 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
GroupParameter initial_value cannot be set
Here's a MWE:
```py
from qcodes.instrument.group_parameter import GroupParameter, Group
from qcodes import Instrument
class MyInstrument(Instrument):
def __init__(self, name, *args, **kwargs):
super().__init__(name, *args, **kwargs)
self.add_parameter(name="foo",
initial_value=42,
parameter_class=GroupParameter
)
self.group = Group([self.foo])
instr = MyInstrument("test")
```
### Expected behaviour
The instrument should have the GroupParameter with the given initial value.
### Actual behaviour
Raises `RuntimeError("Trying to set Group value but no group defined")`.
### Proposed fix
The `GroupParameter` should defer setting the initial value until it has been added to the group. One way of doing it would be to add something like the following to `GroupParameter.__init__`, before the `super().__init__` call:
```py
if "initial_value" in kwargs:
self._initial_value = kwargs["initial_value"]
kwargs["initial_value"] = None
```
and then adding a `GroupParameter.add_to_group` method where the value is actually set, and calling that instead of just setting `parameter.group = self` in `Group.__init__`. I'm not 100% sure if this is the right way to do it.
### System
**qcodes branch**: master
**qcodes commit**: c7eef82d9ab68afb3546fb3c736f2d5b2ff02a14
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `qcodes/instrument/group_parameter.py`
Content:
```
1 """
2 This module implements a :class:`.Group` intended to hold multiple
3 parameters that are to be gotten and set by the same command. The parameters
4 should be of type :class:`GroupParameter`
5 """
6
7
8 from collections import OrderedDict
9 from typing import List, Union, Callable, Dict, Any, Optional
10
11 from qcodes.instrument.parameter import Parameter
12 from qcodes import Instrument
13
14
15 class GroupParameter(Parameter):
16 """
17 Group parameter is a :class:`.Parameter`, whose value can be set or get
18 only with other group parameters. This happens when an instrument
19 has commands which set and get more than one parameter per call.
20
21 The ``set_raw`` method of a group parameter forwards the call to the
22 group, and the group then makes sure that the values of other parameters
23 within the group are left unchanged. The ``get_raw`` method of a group
24 parameter also forwards the call to the group, and the group makes sure
25 that the command output is parsed correctly, and the value of the
26 parameter of interest is returned.
27
28 After initialization, the group parameters need to be added to a group.
29 See :class:`.Group` for more information.
30
31 Args:
32 name: Name of the parameter.
33 instrument: Instrument that this parameter belongs to; this
34 instrument is used by the group to call its get and set commands.
35
36 **kwargs: All kwargs used by the :class:`.Parameter` class, except
37 ``set_cmd`` and ``get_cmd``.
38 """
39
40 def __init__(self,
41 name: str,
42 instrument: Optional['Instrument'] = None,
43 **kwargs
44 ) -> None:
45
46 if "set_cmd" in kwargs or "get_cmd" in kwargs:
47 raise ValueError("A GroupParameter does not use 'set_cmd' or "
48 "'get_cmd' kwarg")
49
50 self.group: Union[Group, None] = None
51 super().__init__(name, instrument=instrument, **kwargs)
52
53 self.set = self._wrap_set(self.set_raw)
54
55 self.get_raw = lambda result=None: result if result is not None \
56 else self._get_raw_value()
57
58 self.get = self._wrap_get(self.get_raw)
59
60 def _get_raw_value(self) -> Any:
61 if self.group is None:
62 raise RuntimeError("Trying to get Group value but no "
63 "group defined")
64 self.group.update()
65 return self.raw_value
66
67 def set_raw(self, value: Any) -> None:
68 if self.group is None:
69 raise RuntimeError("Trying to set Group value but no "
70 "group defined")
71 self.group.set(self, value)
72
73
74 class Group:
75 """
76 The group combines :class:`.GroupParameter` s that are to be gotten or set
77 via the same command. The command has to be a string, for example,
78 a VISA command.
79
80 The :class:`Group`'s methods are used within :class:`GroupParameter` in
81 order to properly implement setting and getting of a single parameter in
82 the situation where one command sets or gets more than one parameter.
83
84 The command used for setting values of parameters has to be a format
85 string which contains the names of the parameters the group has been
86 initialized with. For example, if a command has syntax ``CMD a_value,
87 b_value``, where ``a_value`` and ``b_value`` are values of two parameters
88 with names ``a`` and ``b``, then the command string has to be ``CMD {a},
89 {b}``, and the group has to be initialized with two ``GroupParameter`` s
90 ``a_param`` and ``b_param``, where ``a_param.name=="a"`` and
91 ``b_param.name=="b"``.
92
93 **Note** that by default, it is assumed that the command used for getting
94 values returns a comma-separated list of values of parameters, and their
95 order corresponds to the order of :class:`.GroupParameter` s in the list
96 that is passed to the :class:`Group`'s constructor. Through keyword
97 arguments of the :class:`Group`'s constructor, it is possible to change
98 the separator, and even the parser of the output of the get command.
99
100 The get and set commands are called via the instrument that the first
101 parameter belongs to. It is assumed that all the parameters within the
102 group belong to the same instrument.
103
104 Example:
105
106 ::
107
108 class InstrumentWithGroupParameters(VisaInstrument):
109 def __init__(self, name, address, **kwargs):
110 super().__init__(name, address, **kwargs)
111
112 ...
113
114 # Here is how group of group parameters is defined for
115 # a simple case of an example "SGP" command that sets and gets
116 # values of "enabled" and "gain" parameters (it is assumed that
117 # "SGP?" returns the parameter values as comma-separated list
118 # "enabled_value,gain_value")
119 self.add_parameter('enabled',
120 label='Enabled',
121 val_mapping={True: 1, False: 0},
122 parameter_class=GroupParameter)
123 self.add_parameter('gain',
124 label='Some gain value',
125 get_parser=float,
126 parameter_class=GroupParameter)
127 self.output_group = Group([self.enabled, self.gain],
128 set_cmd='SGP {enabled}, {gain}',
129 get_cmd='SGP?')
130
131 ...
132
133 Args:
134 parameters: a list of :class:`.GroupParameter` instances which have
135 to be gotten and set via the same command; the order of
136 parameters in the list should correspond to the order of the
137 values returned by the ``get_cmd``.
138 set_cmd: Format string of the command that is used for setting the
139 valueS of the parameters; for example, ``CMD {a}, {b}``.
140 get_cmd: String of the command that is used for getting the values
141 of the parameters; for example, ``CMD?``.
142 separator: A separator that is used when parsing the output of the
143 ``get_cmd`` in order to obtain the values of the parameters; it
144 is ignored in case a custom ``get_parser`` is used.
145 get_parser: A callable with a single string argument that is used to
146 parse the output of the ``get_cmd``; the callable has to return a
147 dictionary where parameter names are keys, and the values are the
148 values (as directly obtained from the output of the get command;
149 note that parsers within the parameters will take care of
150 individual parsing of their values).
151 """
152 def __init__(self,
153 parameters: List[GroupParameter],
154 set_cmd: str = None,
155 get_cmd: str = None,
156 get_parser: Union[Callable[[str],
157 Dict[str, Any]], None] = None,
158 separator: str = ','
159 ) -> None:
160 self.parameters = OrderedDict((p.name, p) for p in parameters)
161
162 for p in parameters:
163 p.group = self
164
165 if len(set([p.root_instrument for p in parameters])) > 1:
166 raise ValueError(
167 "All parameters should belong to the same instrument")
168
169 self.instrument = parameters[0].root_instrument
170
171 self.set_cmd = set_cmd
172 self.get_cmd = get_cmd
173
174 if get_parser:
175 self.get_parser = get_parser
176 else:
177 self.get_parser = self._separator_parser(separator)
178
179 def _separator_parser(self, separator: str
180 ) -> Callable[[str], Dict[str, Any]]:
181 """A default separator-based string parser"""
182 def parser(ret_str: str) -> Dict[str, Any]:
183 keys = self.parameters.keys()
184 values = ret_str.split(separator)
185 return dict(zip(keys, values))
186
187 return parser
188
189 def set(self, set_parameter: GroupParameter, value: Any):
190 """
191 Sets the value of the given parameter within a group to the given
192 value by calling the ``set_cmd``.
193
194 Args:
195 set_parameter: The parameter within the group to set.
196 value: The new value for this parameter.
197 """
198 if any((p.get_latest() is None) for p in self.parameters.values()):
199 self.update()
200 calling_dict = {name: p.raw_value
201 for name, p in self.parameters.items()}
202 calling_dict[set_parameter.name] = value
203 if self.set_cmd is None:
204 raise RuntimeError("Calling set but no `set_cmd` defined")
205 command_str = self.set_cmd.format(**calling_dict)
206 if self.instrument is None:
207 raise RuntimeError("Trying to set GroupParameter not attached "
208 "to any instrument.")
209 self.instrument.write(command_str)
210
211 def update(self):
212 """
213 Update the values of all the parameters within the group by calling
214 the ``get_cmd``.
215 """
216 ret = self.get_parser(self.instrument.ask(self.get_cmd))
217 for name, p in list(self.parameters.items()):
218 p.get(result=ret[name])
219
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/qcodes/instrument/group_parameter.py b/qcodes/instrument/group_parameter.py
--- a/qcodes/instrument/group_parameter.py
+++ b/qcodes/instrument/group_parameter.py
@@ -30,8 +30,11 @@
Args:
name: Name of the parameter.
- instrument: Instrument that this parameter belongs to; this
- instrument is used by the group to call its get and set commands.
+ instrument: Instrument that this parameter belongs to; this instrument
+ is used by the group to call its get and set commands.
+ initial_value: Initial value of the parameter. Note that either none or
+ all of the parameters in a :class:`.Group` should have an initial
+ value.
**kwargs: All kwargs used by the :class:`.Parameter` class, except
``set_cmd`` and ``get_cmd``.
@@ -40,6 +43,7 @@
def __init__(self,
name: str,
instrument: Optional['Instrument'] = None,
+ initial_value: Union[float, int, str, None] = None,
**kwargs
) -> None:
@@ -48,6 +52,7 @@
"'get_cmd' kwarg")
self.group: Union[Group, None] = None
+ self._initial_value = initial_value
super().__init__(name, instrument=instrument, **kwargs)
self.set = self._wrap_set(self.set_raw)
@@ -176,6 +181,27 @@
else:
self.get_parser = self._separator_parser(separator)
+ have_initial_values = [p._initial_value is not None
+ for p in parameters]
+ if any(have_initial_values):
+ if not all(have_initial_values):
+ params_with_initial_values = [p.name for p in parameters
+ if p._initial_value is not None]
+ params_without_initial_values = [p.name for p in parameters
+ if p._initial_value is None]
+ error_msg = (f'Either none or all of the parameters in a '
+ f'group should have an initial value. Found '
+ f'initial values for '
+ f'{params_with_initial_values} but not for '
+ f'{params_without_initial_values}.')
+ raise ValueError(error_msg)
+
+ calling_dict = {name: p._initial_value
+ for name, p in self.parameters.items()}
+
+ self._set_from_dict(calling_dict)
+
+
def _separator_parser(self, separator: str
) -> Callable[[str], Dict[str, Any]]:
"""A default separator-based string parser"""
@@ -200,6 +226,14 @@
calling_dict = {name: p.raw_value
for name, p in self.parameters.items()}
calling_dict[set_parameter.name] = value
+
+ self._set_from_dict(calling_dict)
+
+ def _set_from_dict(self, calling_dict: Dict[str, Any]):
+ """
+ Use ``set_cmd`` to parse a dict that maps parameter names to parameter
+ values, and actually perform setting the values.
+ """
if self.set_cmd is None:
raise RuntimeError("Calling set but no `set_cmd` defined")
command_str = self.set_cmd.format(**calling_dict)
| {"golden_diff": "diff --git a/qcodes/instrument/group_parameter.py b/qcodes/instrument/group_parameter.py\n--- a/qcodes/instrument/group_parameter.py\n+++ b/qcodes/instrument/group_parameter.py\n@@ -30,8 +30,11 @@\n \n Args:\n name: Name of the parameter.\n- instrument: Instrument that this parameter belongs to; this\n- instrument is used by the group to call its get and set commands.\n+ instrument: Instrument that this parameter belongs to; this instrument\n+ is used by the group to call its get and set commands.\n+ initial_value: Initial value of the parameter. Note that either none or\n+ all of the parameters in a :class:`.Group` should have an initial\n+ value.\n \n **kwargs: All kwargs used by the :class:`.Parameter` class, except\n ``set_cmd`` and ``get_cmd``.\n@@ -40,6 +43,7 @@\n def __init__(self,\n name: str,\n instrument: Optional['Instrument'] = None,\n+ initial_value: Union[float, int, str, None] = None,\n **kwargs\n ) -> None:\n \n@@ -48,6 +52,7 @@\n \"'get_cmd' kwarg\")\n \n self.group: Union[Group, None] = None\n+ self._initial_value = initial_value\n super().__init__(name, instrument=instrument, **kwargs)\n \n self.set = self._wrap_set(self.set_raw)\n@@ -176,6 +181,27 @@\n else:\n self.get_parser = self._separator_parser(separator)\n \n+ have_initial_values = [p._initial_value is not None\n+ for p in parameters]\n+ if any(have_initial_values):\n+ if not all(have_initial_values):\n+ params_with_initial_values = [p.name for p in parameters\n+ if p._initial_value is not None]\n+ params_without_initial_values = [p.name for p in parameters\n+ if p._initial_value is None]\n+ error_msg = (f'Either none or all of the parameters in a '\n+ f'group should have an initial value. Found '\n+ f'initial values for '\n+ f'{params_with_initial_values} but not for '\n+ f'{params_without_initial_values}.')\n+ raise ValueError(error_msg)\n+\n+ calling_dict = {name: p._initial_value\n+ for name, p in self.parameters.items()}\n+\n+ self._set_from_dict(calling_dict)\n+\n+\n def _separator_parser(self, separator: str\n ) -> Callable[[str], Dict[str, Any]]:\n \"\"\"A default separator-based string parser\"\"\"\n@@ -200,6 +226,14 @@\n calling_dict = {name: p.raw_value\n for name, p in self.parameters.items()}\n calling_dict[set_parameter.name] = value\n+\n+ self._set_from_dict(calling_dict)\n+\n+ def _set_from_dict(self, calling_dict: Dict[str, Any]):\n+ \"\"\"\n+ Use ``set_cmd`` to parse a dict that maps parameter names to parameter\n+ values, and actually perform setting the values.\n+ \"\"\"\n if self.set_cmd is None:\n raise RuntimeError(\"Calling set but no `set_cmd` defined\")\n command_str = self.set_cmd.format(**calling_dict)\n", "issue": "GroupParameter initial_value cannot be set\nHere's a MWE:\r\n```py\r\nfrom qcodes.instrument.group_parameter import GroupParameter, Group\r\nfrom qcodes import Instrument\r\n\r\nclass MyInstrument(Instrument):\r\n def __init__(self, name, *args, **kwargs):\r\n super().__init__(name, *args, **kwargs)\r\n\r\n self.add_parameter(name=\"foo\",\r\n initial_value=42,\r\n parameter_class=GroupParameter\r\n )\r\n\r\n self.group = Group([self.foo])\r\n\r\ninstr = MyInstrument(\"test\")\r\n```\r\n\r\n### Expected behaviour\r\nThe instrument should have the GroupParameter with the given initial value.\r\n\r\n### Actual behaviour\r\nRaises `RuntimeError(\"Trying to set Group value but no group defined\")`.\r\n\r\n### Proposed fix\r\nThe `GroupParameter` should defer setting the initial value until it has been added to the group. One way of doing it would be to add something like the following to `GroupParameter.__init__`, before the `super().__init__` call:\r\n```py\r\nif \"initial_value\" in kwargs:\r\n self._initial_value = kwargs[\"initial_value\"]\r\n kwargs[\"initial_value\"] = None\r\n```\r\nand then adding a `GroupParameter.add_to_group` method where the value is actually set, and calling that instead of just setting `parameter.group = self` in `Group.__init__`. I'm not 100% sure if this is the right way to do it.\r\n\r\n### System\r\n**qcodes branch**: master\r\n\r\n**qcodes commit**: c7eef82d9ab68afb3546fb3c736f2d5b2ff02a14\r\n\n", "before_files": [{"content": "\"\"\"\nThis module implements a :class:`.Group` intended to hold multiple\nparameters that are to be gotten and set by the same command. The parameters\nshould be of type :class:`GroupParameter`\n\"\"\"\n\n\nfrom collections import OrderedDict\nfrom typing import List, Union, Callable, Dict, Any, Optional\n\nfrom qcodes.instrument.parameter import Parameter\nfrom qcodes import Instrument\n\n\nclass GroupParameter(Parameter):\n \"\"\"\n Group parameter is a :class:`.Parameter`, whose value can be set or get\n only with other group parameters. This happens when an instrument\n has commands which set and get more than one parameter per call.\n\n The ``set_raw`` method of a group parameter forwards the call to the\n group, and the group then makes sure that the values of other parameters\n within the group are left unchanged. The ``get_raw`` method of a group\n parameter also forwards the call to the group, and the group makes sure\n that the command output is parsed correctly, and the value of the\n parameter of interest is returned.\n\n After initialization, the group parameters need to be added to a group.\n See :class:`.Group` for more information.\n\n Args:\n name: Name of the parameter.\n instrument: Instrument that this parameter belongs to; this\n instrument is used by the group to call its get and set commands.\n\n **kwargs: All kwargs used by the :class:`.Parameter` class, except\n ``set_cmd`` and ``get_cmd``.\n \"\"\"\n\n def __init__(self,\n name: str,\n instrument: Optional['Instrument'] = None,\n **kwargs\n ) -> None:\n\n if \"set_cmd\" in kwargs or \"get_cmd\" in kwargs:\n raise ValueError(\"A GroupParameter does not use 'set_cmd' or \"\n \"'get_cmd' kwarg\")\n\n self.group: Union[Group, None] = None\n super().__init__(name, instrument=instrument, **kwargs)\n\n self.set = self._wrap_set(self.set_raw)\n\n self.get_raw = lambda result=None: result if result is not None \\\n else self._get_raw_value()\n\n self.get = self._wrap_get(self.get_raw)\n\n def _get_raw_value(self) -> Any:\n if self.group is None:\n raise RuntimeError(\"Trying to get Group value but no \"\n \"group defined\")\n self.group.update()\n return self.raw_value\n\n def set_raw(self, value: Any) -> None:\n if self.group is None:\n raise RuntimeError(\"Trying to set Group value but no \"\n \"group defined\")\n self.group.set(self, value)\n\n\nclass Group:\n \"\"\"\n The group combines :class:`.GroupParameter` s that are to be gotten or set\n via the same command. The command has to be a string, for example,\n a VISA command.\n\n The :class:`Group`'s methods are used within :class:`GroupParameter` in\n order to properly implement setting and getting of a single parameter in\n the situation where one command sets or gets more than one parameter.\n\n The command used for setting values of parameters has to be a format\n string which contains the names of the parameters the group has been\n initialized with. For example, if a command has syntax ``CMD a_value,\n b_value``, where ``a_value`` and ``b_value`` are values of two parameters\n with names ``a`` and ``b``, then the command string has to be ``CMD {a},\n {b}``, and the group has to be initialized with two ``GroupParameter`` s\n ``a_param`` and ``b_param``, where ``a_param.name==\"a\"`` and\n ``b_param.name==\"b\"``.\n\n **Note** that by default, it is assumed that the command used for getting\n values returns a comma-separated list of values of parameters, and their\n order corresponds to the order of :class:`.GroupParameter` s in the list\n that is passed to the :class:`Group`'s constructor. Through keyword\n arguments of the :class:`Group`'s constructor, it is possible to change\n the separator, and even the parser of the output of the get command.\n\n The get and set commands are called via the instrument that the first\n parameter belongs to. It is assumed that all the parameters within the\n group belong to the same instrument.\n\n Example:\n\n ::\n\n class InstrumentWithGroupParameters(VisaInstrument):\n def __init__(self, name, address, **kwargs):\n super().__init__(name, address, **kwargs)\n\n ...\n\n # Here is how group of group parameters is defined for\n # a simple case of an example \"SGP\" command that sets and gets\n # values of \"enabled\" and \"gain\" parameters (it is assumed that\n # \"SGP?\" returns the parameter values as comma-separated list\n # \"enabled_value,gain_value\")\n self.add_parameter('enabled',\n label='Enabled',\n val_mapping={True: 1, False: 0},\n parameter_class=GroupParameter)\n self.add_parameter('gain',\n label='Some gain value',\n get_parser=float,\n parameter_class=GroupParameter)\n self.output_group = Group([self.enabled, self.gain],\n set_cmd='SGP {enabled}, {gain}',\n get_cmd='SGP?')\n\n ...\n\n Args:\n parameters: a list of :class:`.GroupParameter` instances which have\n to be gotten and set via the same command; the order of\n parameters in the list should correspond to the order of the\n values returned by the ``get_cmd``.\n set_cmd: Format string of the command that is used for setting the\n valueS of the parameters; for example, ``CMD {a}, {b}``.\n get_cmd: String of the command that is used for getting the values\n of the parameters; for example, ``CMD?``.\n separator: A separator that is used when parsing the output of the\n ``get_cmd`` in order to obtain the values of the parameters; it\n is ignored in case a custom ``get_parser`` is used.\n get_parser: A callable with a single string argument that is used to\n parse the output of the ``get_cmd``; the callable has to return a\n dictionary where parameter names are keys, and the values are the\n values (as directly obtained from the output of the get command;\n note that parsers within the parameters will take care of\n individual parsing of their values).\n \"\"\"\n def __init__(self,\n parameters: List[GroupParameter],\n set_cmd: str = None,\n get_cmd: str = None,\n get_parser: Union[Callable[[str],\n Dict[str, Any]], None] = None,\n separator: str = ','\n ) -> None:\n self.parameters = OrderedDict((p.name, p) for p in parameters)\n\n for p in parameters:\n p.group = self\n\n if len(set([p.root_instrument for p in parameters])) > 1:\n raise ValueError(\n \"All parameters should belong to the same instrument\")\n\n self.instrument = parameters[0].root_instrument\n\n self.set_cmd = set_cmd\n self.get_cmd = get_cmd\n\n if get_parser:\n self.get_parser = get_parser\n else:\n self.get_parser = self._separator_parser(separator)\n\n def _separator_parser(self, separator: str\n ) -> Callable[[str], Dict[str, Any]]:\n \"\"\"A default separator-based string parser\"\"\"\n def parser(ret_str: str) -> Dict[str, Any]:\n keys = self.parameters.keys()\n values = ret_str.split(separator)\n return dict(zip(keys, values))\n\n return parser\n\n def set(self, set_parameter: GroupParameter, value: Any):\n \"\"\"\n Sets the value of the given parameter within a group to the given\n value by calling the ``set_cmd``.\n\n Args:\n set_parameter: The parameter within the group to set.\n value: The new value for this parameter.\n \"\"\"\n if any((p.get_latest() is None) for p in self.parameters.values()):\n self.update()\n calling_dict = {name: p.raw_value\n for name, p in self.parameters.items()}\n calling_dict[set_parameter.name] = value\n if self.set_cmd is None:\n raise RuntimeError(\"Calling set but no `set_cmd` defined\")\n command_str = self.set_cmd.format(**calling_dict)\n if self.instrument is None:\n raise RuntimeError(\"Trying to set GroupParameter not attached \"\n \"to any instrument.\")\n self.instrument.write(command_str)\n\n def update(self):\n \"\"\"\n Update the values of all the parameters within the group by calling\n the ``get_cmd``.\n \"\"\"\n ret = self.get_parser(self.instrument.ask(self.get_cmd))\n for name, p in list(self.parameters.items()):\n p.get(result=ret[name])\n", "path": "qcodes/instrument/group_parameter.py"}], "after_files": [{"content": "\"\"\"\nThis module implements a :class:`.Group` intended to hold multiple\nparameters that are to be gotten and set by the same command. The parameters\nshould be of type :class:`GroupParameter`\n\"\"\"\n\n\nfrom collections import OrderedDict\nfrom typing import List, Union, Callable, Dict, Any, Optional\n\nfrom qcodes.instrument.parameter import Parameter\nfrom qcodes import Instrument\n\n\nclass GroupParameter(Parameter):\n \"\"\"\n Group parameter is a :class:`.Parameter`, whose value can be set or get\n only with other group parameters. This happens when an instrument\n has commands which set and get more than one parameter per call.\n\n The ``set_raw`` method of a group parameter forwards the call to the\n group, and the group then makes sure that the values of other parameters\n within the group are left unchanged. The ``get_raw`` method of a group\n parameter also forwards the call to the group, and the group makes sure\n that the command output is parsed correctly, and the value of the\n parameter of interest is returned.\n\n After initialization, the group parameters need to be added to a group.\n See :class:`.Group` for more information.\n\n Args:\n name: Name of the parameter.\n instrument: Instrument that this parameter belongs to; this instrument\n is used by the group to call its get and set commands.\n initial_value: Initial value of the parameter. Note that either none or\n all of the parameters in a :class:`.Group` should have an initial\n value.\n\n **kwargs: All kwargs used by the :class:`.Parameter` class, except\n ``set_cmd`` and ``get_cmd``.\n \"\"\"\n\n def __init__(self,\n name: str,\n instrument: Optional['Instrument'] = None,\n initial_value: Union[float, int, str, None] = None,\n **kwargs\n ) -> None:\n\n if \"set_cmd\" in kwargs or \"get_cmd\" in kwargs:\n raise ValueError(\"A GroupParameter does not use 'set_cmd' or \"\n \"'get_cmd' kwarg\")\n\n self.group: Union[Group, None] = None\n self._initial_value = initial_value\n super().__init__(name, instrument=instrument, **kwargs)\n\n self.set = self._wrap_set(self.set_raw)\n\n self.get_raw = lambda result=None: result if result is not None \\\n else self._get_raw_value()\n\n self.get = self._wrap_get(self.get_raw)\n\n def _get_raw_value(self) -> Any:\n if self.group is None:\n raise RuntimeError(\"Trying to get Group value but no \"\n \"group defined\")\n self.group.update()\n return self.raw_value\n\n def set_raw(self, value: Any) -> None:\n if self.group is None:\n raise RuntimeError(\"Trying to set Group value but no \"\n \"group defined\")\n self.group.set(self, value)\n\n\nclass Group:\n \"\"\"\n The group combines :class:`.GroupParameter` s that are to be gotten or set\n via the same command. The command has to be a string, for example,\n a VISA command.\n\n The :class:`Group`'s methods are used within :class:`GroupParameter` in\n order to properly implement setting and getting of a single parameter in\n the situation where one command sets or gets more than one parameter.\n\n The command used for setting values of parameters has to be a format\n string which contains the names of the parameters the group has been\n initialized with. For example, if a command has syntax ``CMD a_value,\n b_value``, where ``a_value`` and ``b_value`` are values of two parameters\n with names ``a`` and ``b``, then the command string has to be ``CMD {a},\n {b}``, and the group has to be initialized with two ``GroupParameter`` s\n ``a_param`` and ``b_param``, where ``a_param.name==\"a\"`` and\n ``b_param.name==\"b\"``.\n\n **Note** that by default, it is assumed that the command used for getting\n values returns a comma-separated list of values of parameters, and their\n order corresponds to the order of :class:`.GroupParameter` s in the list\n that is passed to the :class:`Group`'s constructor. Through keyword\n arguments of the :class:`Group`'s constructor, it is possible to change\n the separator, and even the parser of the output of the get command.\n\n The get and set commands are called via the instrument that the first\n parameter belongs to. It is assumed that all the parameters within the\n group belong to the same instrument.\n\n Example:\n\n ::\n\n class InstrumentWithGroupParameters(VisaInstrument):\n def __init__(self, name, address, **kwargs):\n super().__init__(name, address, **kwargs)\n\n ...\n\n # Here is how group of group parameters is defined for\n # a simple case of an example \"SGP\" command that sets and gets\n # values of \"enabled\" and \"gain\" parameters (it is assumed that\n # \"SGP?\" returns the parameter values as comma-separated list\n # \"enabled_value,gain_value\")\n self.add_parameter('enabled',\n label='Enabled',\n val_mapping={True: 1, False: 0},\n parameter_class=GroupParameter)\n self.add_parameter('gain',\n label='Some gain value',\n get_parser=float,\n parameter_class=GroupParameter)\n self.output_group = Group([self.enabled, self.gain],\n set_cmd='SGP {enabled}, {gain}',\n get_cmd='SGP?')\n\n ...\n\n Args:\n parameters: a list of :class:`.GroupParameter` instances which have\n to be gotten and set via the same command; the order of\n parameters in the list should correspond to the order of the\n values returned by the ``get_cmd``.\n set_cmd: Format string of the command that is used for setting the\n valueS of the parameters; for example, ``CMD {a}, {b}``.\n get_cmd: String of the command that is used for getting the values\n of the parameters; for example, ``CMD?``.\n separator: A separator that is used when parsing the output of the\n ``get_cmd`` in order to obtain the values of the parameters; it\n is ignored in case a custom ``get_parser`` is used.\n get_parser: A callable with a single string argument that is used to\n parse the output of the ``get_cmd``; the callable has to return a\n dictionary where parameter names are keys, and the values are the\n values (as directly obtained from the output of the get command;\n note that parsers within the parameters will take care of\n individual parsing of their values).\n \"\"\"\n def __init__(self,\n parameters: List[GroupParameter],\n set_cmd: str = None,\n get_cmd: str = None,\n get_parser: Union[Callable[[str],\n Dict[str, Any]], None] = None,\n separator: str = ','\n ) -> None:\n self.parameters = OrderedDict((p.name, p) for p in parameters)\n\n for p in parameters:\n p.group = self\n\n if len(set([p.root_instrument for p in parameters])) > 1:\n raise ValueError(\n \"All parameters should belong to the same instrument\")\n\n self.instrument = parameters[0].root_instrument\n\n self.set_cmd = set_cmd\n self.get_cmd = get_cmd\n\n if get_parser:\n self.get_parser = get_parser\n else:\n self.get_parser = self._separator_parser(separator)\n\n have_initial_values = [p._initial_value is not None\n for p in parameters]\n if any(have_initial_values):\n if not all(have_initial_values):\n params_with_initial_values = [p.name for p in parameters\n if p._initial_value is not None]\n params_without_initial_values = [p.name for p in parameters\n if p._initial_value is None]\n error_msg = (f'Either none or all of the parameters in a '\n f'group should have an initial value. Found '\n f'initial values for '\n f'{params_with_initial_values} but not for '\n f'{params_without_initial_values}.')\n raise ValueError(error_msg)\n\n calling_dict = {name: p._initial_value\n for name, p in self.parameters.items()}\n\n self._set_from_dict(calling_dict)\n\n\n def _separator_parser(self, separator: str\n ) -> Callable[[str], Dict[str, Any]]:\n \"\"\"A default separator-based string parser\"\"\"\n def parser(ret_str: str) -> Dict[str, Any]:\n keys = self.parameters.keys()\n values = ret_str.split(separator)\n return dict(zip(keys, values))\n\n return parser\n\n def set(self, set_parameter: GroupParameter, value: Any):\n \"\"\"\n Sets the value of the given parameter within a group to the given\n value by calling the ``set_cmd``.\n\n Args:\n set_parameter: The parameter within the group to set.\n value: The new value for this parameter.\n \"\"\"\n if any((p.get_latest() is None) for p in self.parameters.values()):\n self.update()\n calling_dict = {name: p.raw_value\n for name, p in self.parameters.items()}\n calling_dict[set_parameter.name] = value\n\n self._set_from_dict(calling_dict)\n\n def _set_from_dict(self, calling_dict: Dict[str, Any]):\n \"\"\"\n Use ``set_cmd`` to parse a dict that maps parameter names to parameter\n values, and actually perform setting the values.\n \"\"\"\n if self.set_cmd is None:\n raise RuntimeError(\"Calling set but no `set_cmd` defined\")\n command_str = self.set_cmd.format(**calling_dict)\n if self.instrument is None:\n raise RuntimeError(\"Trying to set GroupParameter not attached \"\n \"to any instrument.\")\n self.instrument.write(command_str)\n\n def update(self):\n \"\"\"\n Update the values of all the parameters within the group by calling\n the ``get_cmd``.\n \"\"\"\n ret = self.get_parser(self.instrument.ask(self.get_cmd))\n for name, p in list(self.parameters.items()):\n p.get(result=ret[name])\n", "path": "qcodes/instrument/group_parameter.py"}]} | 3,097 | 734 |
gh_patches_debug_18740 | rasdani/github-patches | git_diff | learningequality__kolibri-11846 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Impossible to create a learner account on a second (imported) facility?
## Observed behavior
I managed to replicate this on 2 separate Android devices. My Windows 7 server device has 2 facilities, one created during the server setup (**Win7new**), and the second one imported from another VM (**imported-from-Win11**). This second one has several classes, and I have been using it exclusively during this past week.
1. At first I tried to create a new learner account from the Huawei Android 8 tablet on the second facility, and got an error (_Whoops!_). I retried and the field validation was saying ``Username already exists``, which did not make sense, since I searched users on that second facility on the server VM and the username I was trying to create was not there. I moved on, imported another user and went on testing something else.
2. Some times after I was setting up another LoD (on the Motorola phone), tried again to create (not import) a new user on the second facility, and got the same message that the ``Username already exists``. After the initial confusion I had a hunch and went to check the first facility (the one I never really used for class interactions during this testing), and lo and behold, the user I was trying to create did exist there, probably from the above setup on Huawei tablet. And this is not my fat finger miss-touching the wrong radio button during the facility selection, it actually seems that the option to create a new user is somehow _bound_ to the first facility on the device, and would not _obey_ the user's selection of the second facility.
LoD | Win7 facility users | imported-from-Win11 facility users
-- | -- | --
 |  | 
## Errors and logs
[Windows 7 home folder](https://drive.google.com/file/d/1QaHEZON_yL3hnLRsW2PEURY76MVEL9JT/view?usp=drive_link) (without content)
## Expected behavior
User needs to be able to select the facility they need to create their new learner account.
## User-facing consequences
Impossible to create a new learner account on a desired facility.
## Steps to reproduce
You will need a server with 2 facilities, try setting up an LoD by creating a new account on the second one.
## Context
* Kolibri version: 0.16b13
* Operating system: Windows 7
cc @pcenov to try to replicate
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kolibri/plugins/setup_wizard/api.py`
Content:
```
1 import requests
2 from django.urls import reverse
3 from rest_framework import decorators
4 from rest_framework.exceptions import AuthenticationFailed
5 from rest_framework.exceptions import NotFound
6 from rest_framework.exceptions import PermissionDenied
7 from rest_framework.exceptions import ValidationError
8 from rest_framework.permissions import BasePermission
9 from rest_framework.response import Response
10 from rest_framework.viewsets import ViewSet
11
12 from kolibri.core.auth.constants import user_kinds
13 from kolibri.core.auth.models import Facility
14 from kolibri.core.auth.models import FacilityUser
15 from kolibri.core.auth.utils.users import get_remote_users_info
16 from kolibri.core.device.models import DevicePermissions
17
18
19 # Basic class that makes these endpoints unusable if device is provisioned
20 class HasPermissionDuringSetup(BasePermission):
21 def has_permission(self, request, view):
22 from kolibri.core.device.utils import device_provisioned
23
24 return not device_provisioned()
25
26
27 class HasPermissionDuringLODSetup(BasePermission):
28 def has_permission(self, request, view):
29 from kolibri.core.device.utils import get_device_setting
30
31 return get_device_setting("subset_of_users_device")
32
33
34 class SetupWizardResource(ViewSet):
35 """
36 Generic endpoints for use during various setup wizard onboarding flows
37 """
38
39 permission_classes = (HasPermissionDuringSetup,)
40
41 @decorators.action(methods=["post"], detail=False)
42 def createuseronremote(self, request):
43 facility_id = request.data.get("facility_id", None)
44 username = request.data.get("username", None)
45 password = request.data.get("password", None)
46 full_name = request.data.get("full_name", "")
47 baseurl = request.data.get("baseurl", None)
48
49 api_url = reverse("kolibri:core:publicsignup-list")
50
51 url = "{}{}".format(baseurl, api_url)
52
53 payload = {
54 "facility_id": facility_id,
55 "username": username,
56 "password": password,
57 "full_name": full_name,
58 }
59
60 r = requests.post(url, data=payload)
61 return Response({"status": r.status_code, "data": r.content})
62
63
64 class FacilityImportViewSet(ViewSet):
65 """
66 A group of endpoints that are used by the SetupWizard to import a facility
67 and create a superuser
68 """
69
70 permission_classes = (HasPermissionDuringSetup,)
71
72 @decorators.action(methods=["get"], detail=False)
73 def facilityadmins(self, request):
74 # The filter is very loose, since we are assuming that the only
75 # users are from the new facility
76 queryset = FacilityUser.objects.filter(roles__kind__contains="admin")
77 response_data = [
78 {"full_name": user.full_name, "username": user.username, "id": user.id}
79 for user in queryset
80 ]
81 return Response(response_data)
82
83 @decorators.action(methods=["post"], detail=False)
84 def grantsuperuserpermissions(self, request):
85 """
86 Given a user ID and credentials, create a superuser DevicePermissions record
87 """
88 user_id = request.data.get("user_id", "")
89 password = request.data.get("password", "")
90
91 # Get the Facility User object
92 try:
93 facilityuser = FacilityUser.objects.get(id=user_id)
94 except (Exception, FacilityUser.DoesNotExist):
95 raise NotFound()
96
97 # Test for password and admin role
98 if (
99 not facilityuser.check_password(password)
100 or user_kinds.ADMIN not in facilityuser.session_data["kind"]
101 ):
102 raise PermissionDenied()
103
104 # If it succeeds, create a DevicePermissions model for the user
105 DevicePermissions.objects.update_or_create(
106 user=facilityuser,
107 defaults={"is_superuser": True, "can_manage_content": True},
108 )
109
110 # Finally: return a simple 200 so UI can continue on
111 return Response({"user_id": user_id})
112
113 @decorators.action(methods=["post"], detail=False)
114 def createsuperuser(self, request):
115 """
116 Given a username, full name and password, create a superuser attached
117 to the facility that was imported (or create a facility with given facility_name)
118 """
119 facility_name = request.data.get("facility_name", None)
120
121 # Get the imported facility (assuming its the only one at this point)
122 if Facility.objects.count() == 0:
123 the_facility = Facility.objects.create(name=facility_name)
124 else:
125 the_facility = Facility.objects.get()
126 if facility_name:
127 the_facility.name = facility_name
128 the_facility.save()
129
130 try:
131 superuser = FacilityUser.objects.create_superuser(
132 request.data.get("username"),
133 request.data.get("password"),
134 facility=the_facility,
135 full_name=request.data.get("full_name"),
136 )
137 return Response({"username": superuser.username})
138
139 except ValidationError:
140 raise ValidationError(detail="duplicate", code="duplicate_username")
141
142 @decorators.action(methods=["post"], detail=False)
143 def listfacilitylearners(self, request):
144 """
145 If the request is done by an admin user it will return a list of the users of the
146 facility
147
148 :param baseurl: First part of the url of the server that's going to be requested
149 :param facility_id: Id of the facility to authenticate and get the list of users
150 :param username: Username of the user that's going to authenticate
151 :param password: Password of the user that's going to authenticate
152 :return: List of the learners of the facility.
153 """
154 facility_id = request.data.get("facility_id")
155 baseurl = request.data.get("baseurl")
156 password = request.data.get("password")
157 username = request.data.get("username")
158 try:
159 facility_info = get_remote_users_info(
160 baseurl, facility_id, username, password
161 )
162 except AuthenticationFailed:
163 raise PermissionDenied()
164 user_info = facility_info["user"]
165 roles = user_info["roles"]
166 admin_roles = (user_kinds.ADMIN, user_kinds.SUPERUSER)
167 if not any(role in roles for role in admin_roles):
168 raise PermissionDenied()
169 students = [u for u in facility_info["users"] if not u["roles"]]
170 return Response({"students": students, "admin": facility_info["user"]})
171
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kolibri/plugins/setup_wizard/api.py b/kolibri/plugins/setup_wizard/api.py
--- a/kolibri/plugins/setup_wizard/api.py
+++ b/kolibri/plugins/setup_wizard/api.py
@@ -9,6 +9,7 @@
from rest_framework.response import Response
from rest_framework.viewsets import ViewSet
+from kolibri.core.auth.backends import FACILITY_CREDENTIAL_KEY
from kolibri.core.auth.constants import user_kinds
from kolibri.core.auth.models import Facility
from kolibri.core.auth.models import FacilityUser
@@ -51,7 +52,9 @@
url = "{}{}".format(baseurl, api_url)
payload = {
- "facility_id": facility_id,
+ # N.B. facility is keyed by facility not facility_id on the signup
+ # viewset serializer.
+ FACILITY_CREDENTIAL_KEY: facility_id,
"username": username,
"password": password,
"full_name": full_name,
| {"golden_diff": "diff --git a/kolibri/plugins/setup_wizard/api.py b/kolibri/plugins/setup_wizard/api.py\n--- a/kolibri/plugins/setup_wizard/api.py\n+++ b/kolibri/plugins/setup_wizard/api.py\n@@ -9,6 +9,7 @@\n from rest_framework.response import Response\n from rest_framework.viewsets import ViewSet\n \n+from kolibri.core.auth.backends import FACILITY_CREDENTIAL_KEY\n from kolibri.core.auth.constants import user_kinds\n from kolibri.core.auth.models import Facility\n from kolibri.core.auth.models import FacilityUser\n@@ -51,7 +52,9 @@\n url = \"{}{}\".format(baseurl, api_url)\n \n payload = {\n- \"facility_id\": facility_id,\n+ # N.B. facility is keyed by facility not facility_id on the signup\n+ # viewset serializer.\n+ FACILITY_CREDENTIAL_KEY: facility_id,\n \"username\": username,\n \"password\": password,\n \"full_name\": full_name,\n", "issue": "Impossible to create a learner account on a second (imported) facility?\n## Observed behavior\r\nI managed to replicate this on 2 separate Android devices. My Windows 7 server device has 2 facilities, one created during the server setup (**Win7new**), and the second one imported from another VM (**imported-from-Win11**). This second one has several classes, and I have been using it exclusively during this past week. \r\n\r\n1. At first I tried to create a new learner account from the Huawei Android 8 tablet on the second facility, and got an error (_Whoops!_). I retried and the field validation was saying ``Username already exists``, which did not make sense, since I searched users on that second facility on the server VM and the username I was trying to create was not there. I moved on, imported another user and went on testing something else.\r\n2. Some times after I was setting up another LoD (on the Motorola phone), tried again to create (not import) a new user on the second facility, and got the same message that the ``Username already exists``. After the initial confusion I had a hunch and went to check the first facility (the one I never really used for class interactions during this testing), and lo and behold, the user I was trying to create did exist there, probably from the above setup on Huawei tablet. And this is not my fat finger miss-touching the wrong radio button during the facility selection, it actually seems that the option to create a new user is somehow _bound_ to the first facility on the device, and would not _obey_ the user's selection of the second facility.\r\n\r\nLoD | Win7 facility users | imported-from-Win11 facility users\r\n-- | -- | --\r\n |  | \r\n\r\n## Errors and logs\r\n[Windows 7 home folder](https://drive.google.com/file/d/1QaHEZON_yL3hnLRsW2PEURY76MVEL9JT/view?usp=drive_link) (without content)\r\n\r\n## Expected behavior\r\nUser needs to be able to select the facility they need to create their new learner account.\r\n\r\n## User-facing consequences\r\nImpossible to create a new learner account on a desired facility.\r\n\r\n## Steps to reproduce\r\nYou will need a server with 2 facilities, try setting up an LoD by creating a new account on the second one. \r\n\r\n## Context\r\n\r\n * Kolibri version: 0.16b13\r\n * Operating system: Windows 7\r\n\r\ncc @pcenov to try to replicate\n", "before_files": [{"content": "import requests\nfrom django.urls import reverse\nfrom rest_framework import decorators\nfrom rest_framework.exceptions import AuthenticationFailed\nfrom rest_framework.exceptions import NotFound\nfrom rest_framework.exceptions import PermissionDenied\nfrom rest_framework.exceptions import ValidationError\nfrom rest_framework.permissions import BasePermission\nfrom rest_framework.response import Response\nfrom rest_framework.viewsets import ViewSet\n\nfrom kolibri.core.auth.constants import user_kinds\nfrom kolibri.core.auth.models import Facility\nfrom kolibri.core.auth.models import FacilityUser\nfrom kolibri.core.auth.utils.users import get_remote_users_info\nfrom kolibri.core.device.models import DevicePermissions\n\n\n# Basic class that makes these endpoints unusable if device is provisioned\nclass HasPermissionDuringSetup(BasePermission):\n def has_permission(self, request, view):\n from kolibri.core.device.utils import device_provisioned\n\n return not device_provisioned()\n\n\nclass HasPermissionDuringLODSetup(BasePermission):\n def has_permission(self, request, view):\n from kolibri.core.device.utils import get_device_setting\n\n return get_device_setting(\"subset_of_users_device\")\n\n\nclass SetupWizardResource(ViewSet):\n \"\"\"\n Generic endpoints for use during various setup wizard onboarding flows\n \"\"\"\n\n permission_classes = (HasPermissionDuringSetup,)\n\n @decorators.action(methods=[\"post\"], detail=False)\n def createuseronremote(self, request):\n facility_id = request.data.get(\"facility_id\", None)\n username = request.data.get(\"username\", None)\n password = request.data.get(\"password\", None)\n full_name = request.data.get(\"full_name\", \"\")\n baseurl = request.data.get(\"baseurl\", None)\n\n api_url = reverse(\"kolibri:core:publicsignup-list\")\n\n url = \"{}{}\".format(baseurl, api_url)\n\n payload = {\n \"facility_id\": facility_id,\n \"username\": username,\n \"password\": password,\n \"full_name\": full_name,\n }\n\n r = requests.post(url, data=payload)\n return Response({\"status\": r.status_code, \"data\": r.content})\n\n\nclass FacilityImportViewSet(ViewSet):\n \"\"\"\n A group of endpoints that are used by the SetupWizard to import a facility\n and create a superuser\n \"\"\"\n\n permission_classes = (HasPermissionDuringSetup,)\n\n @decorators.action(methods=[\"get\"], detail=False)\n def facilityadmins(self, request):\n # The filter is very loose, since we are assuming that the only\n # users are from the new facility\n queryset = FacilityUser.objects.filter(roles__kind__contains=\"admin\")\n response_data = [\n {\"full_name\": user.full_name, \"username\": user.username, \"id\": user.id}\n for user in queryset\n ]\n return Response(response_data)\n\n @decorators.action(methods=[\"post\"], detail=False)\n def grantsuperuserpermissions(self, request):\n \"\"\"\n Given a user ID and credentials, create a superuser DevicePermissions record\n \"\"\"\n user_id = request.data.get(\"user_id\", \"\")\n password = request.data.get(\"password\", \"\")\n\n # Get the Facility User object\n try:\n facilityuser = FacilityUser.objects.get(id=user_id)\n except (Exception, FacilityUser.DoesNotExist):\n raise NotFound()\n\n # Test for password and admin role\n if (\n not facilityuser.check_password(password)\n or user_kinds.ADMIN not in facilityuser.session_data[\"kind\"]\n ):\n raise PermissionDenied()\n\n # If it succeeds, create a DevicePermissions model for the user\n DevicePermissions.objects.update_or_create(\n user=facilityuser,\n defaults={\"is_superuser\": True, \"can_manage_content\": True},\n )\n\n # Finally: return a simple 200 so UI can continue on\n return Response({\"user_id\": user_id})\n\n @decorators.action(methods=[\"post\"], detail=False)\n def createsuperuser(self, request):\n \"\"\"\n Given a username, full name and password, create a superuser attached\n to the facility that was imported (or create a facility with given facility_name)\n \"\"\"\n facility_name = request.data.get(\"facility_name\", None)\n\n # Get the imported facility (assuming its the only one at this point)\n if Facility.objects.count() == 0:\n the_facility = Facility.objects.create(name=facility_name)\n else:\n the_facility = Facility.objects.get()\n if facility_name:\n the_facility.name = facility_name\n the_facility.save()\n\n try:\n superuser = FacilityUser.objects.create_superuser(\n request.data.get(\"username\"),\n request.data.get(\"password\"),\n facility=the_facility,\n full_name=request.data.get(\"full_name\"),\n )\n return Response({\"username\": superuser.username})\n\n except ValidationError:\n raise ValidationError(detail=\"duplicate\", code=\"duplicate_username\")\n\n @decorators.action(methods=[\"post\"], detail=False)\n def listfacilitylearners(self, request):\n \"\"\"\n If the request is done by an admin user it will return a list of the users of the\n facility\n\n :param baseurl: First part of the url of the server that's going to be requested\n :param facility_id: Id of the facility to authenticate and get the list of users\n :param username: Username of the user that's going to authenticate\n :param password: Password of the user that's going to authenticate\n :return: List of the learners of the facility.\n \"\"\"\n facility_id = request.data.get(\"facility_id\")\n baseurl = request.data.get(\"baseurl\")\n password = request.data.get(\"password\")\n username = request.data.get(\"username\")\n try:\n facility_info = get_remote_users_info(\n baseurl, facility_id, username, password\n )\n except AuthenticationFailed:\n raise PermissionDenied()\n user_info = facility_info[\"user\"]\n roles = user_info[\"roles\"]\n admin_roles = (user_kinds.ADMIN, user_kinds.SUPERUSER)\n if not any(role in roles for role in admin_roles):\n raise PermissionDenied()\n students = [u for u in facility_info[\"users\"] if not u[\"roles\"]]\n return Response({\"students\": students, \"admin\": facility_info[\"user\"]})\n", "path": "kolibri/plugins/setup_wizard/api.py"}], "after_files": [{"content": "import requests\nfrom django.urls import reverse\nfrom rest_framework import decorators\nfrom rest_framework.exceptions import AuthenticationFailed\nfrom rest_framework.exceptions import NotFound\nfrom rest_framework.exceptions import PermissionDenied\nfrom rest_framework.exceptions import ValidationError\nfrom rest_framework.permissions import BasePermission\nfrom rest_framework.response import Response\nfrom rest_framework.viewsets import ViewSet\n\nfrom kolibri.core.auth.backends import FACILITY_CREDENTIAL_KEY\nfrom kolibri.core.auth.constants import user_kinds\nfrom kolibri.core.auth.models import Facility\nfrom kolibri.core.auth.models import FacilityUser\nfrom kolibri.core.auth.utils.users import get_remote_users_info\nfrom kolibri.core.device.models import DevicePermissions\n\n\n# Basic class that makes these endpoints unusable if device is provisioned\nclass HasPermissionDuringSetup(BasePermission):\n def has_permission(self, request, view):\n from kolibri.core.device.utils import device_provisioned\n\n return not device_provisioned()\n\n\nclass HasPermissionDuringLODSetup(BasePermission):\n def has_permission(self, request, view):\n from kolibri.core.device.utils import get_device_setting\n\n return get_device_setting(\"subset_of_users_device\")\n\n\nclass SetupWizardResource(ViewSet):\n \"\"\"\n Generic endpoints for use during various setup wizard onboarding flows\n \"\"\"\n\n permission_classes = (HasPermissionDuringSetup,)\n\n @decorators.action(methods=[\"post\"], detail=False)\n def createuseronremote(self, request):\n facility_id = request.data.get(\"facility_id\", None)\n username = request.data.get(\"username\", None)\n password = request.data.get(\"password\", None)\n full_name = request.data.get(\"full_name\", \"\")\n baseurl = request.data.get(\"baseurl\", None)\n\n api_url = reverse(\"kolibri:core:publicsignup-list\")\n\n url = \"{}{}\".format(baseurl, api_url)\n\n payload = {\n # N.B. facility is keyed by facility not facility_id on the signup\n # viewset serializer.\n FACILITY_CREDENTIAL_KEY: facility_id,\n \"username\": username,\n \"password\": password,\n \"full_name\": full_name,\n }\n\n r = requests.post(url, data=payload)\n return Response({\"status\": r.status_code, \"data\": r.content})\n\n\nclass FacilityImportViewSet(ViewSet):\n \"\"\"\n A group of endpoints that are used by the SetupWizard to import a facility\n and create a superuser\n \"\"\"\n\n permission_classes = (HasPermissionDuringSetup,)\n\n @decorators.action(methods=[\"get\"], detail=False)\n def facilityadmins(self, request):\n # The filter is very loose, since we are assuming that the only\n # users are from the new facility\n queryset = FacilityUser.objects.filter(roles__kind__contains=\"admin\")\n response_data = [\n {\"full_name\": user.full_name, \"username\": user.username, \"id\": user.id}\n for user in queryset\n ]\n return Response(response_data)\n\n @decorators.action(methods=[\"post\"], detail=False)\n def grantsuperuserpermissions(self, request):\n \"\"\"\n Given a user ID and credentials, create a superuser DevicePermissions record\n \"\"\"\n user_id = request.data.get(\"user_id\", \"\")\n password = request.data.get(\"password\", \"\")\n\n # Get the Facility User object\n try:\n facilityuser = FacilityUser.objects.get(id=user_id)\n except (Exception, FacilityUser.DoesNotExist):\n raise NotFound()\n\n # Test for password and admin role\n if (\n not facilityuser.check_password(password)\n or user_kinds.ADMIN not in facilityuser.session_data[\"kind\"]\n ):\n raise PermissionDenied()\n\n # If it succeeds, create a DevicePermissions model for the user\n DevicePermissions.objects.update_or_create(\n user=facilityuser,\n defaults={\"is_superuser\": True, \"can_manage_content\": True},\n )\n\n # Finally: return a simple 200 so UI can continue on\n return Response({\"user_id\": user_id})\n\n @decorators.action(methods=[\"post\"], detail=False)\n def createsuperuser(self, request):\n \"\"\"\n Given a username, full name and password, create a superuser attached\n to the facility that was imported (or create a facility with given facility_name)\n \"\"\"\n facility_name = request.data.get(\"facility_name\", None)\n\n # Get the imported facility (assuming its the only one at this point)\n if Facility.objects.count() == 0:\n the_facility = Facility.objects.create(name=facility_name)\n else:\n the_facility = Facility.objects.get()\n if facility_name:\n the_facility.name = facility_name\n the_facility.save()\n\n try:\n superuser = FacilityUser.objects.create_superuser(\n request.data.get(\"username\"),\n request.data.get(\"password\"),\n facility=the_facility,\n full_name=request.data.get(\"full_name\"),\n )\n return Response({\"username\": superuser.username})\n\n except ValidationError:\n raise ValidationError(detail=\"duplicate\", code=\"duplicate_username\")\n\n @decorators.action(methods=[\"post\"], detail=False)\n def listfacilitylearners(self, request):\n \"\"\"\n If the request is done by an admin user it will return a list of the users of the\n facility\n\n :param baseurl: First part of the url of the server that's going to be requested\n :param facility_id: Id of the facility to authenticate and get the list of users\n :param username: Username of the user that's going to authenticate\n :param password: Password of the user that's going to authenticate\n :return: List of the learners of the facility.\n \"\"\"\n facility_id = request.data.get(\"facility_id\")\n baseurl = request.data.get(\"baseurl\")\n password = request.data.get(\"password\")\n username = request.data.get(\"username\")\n try:\n facility_info = get_remote_users_info(\n baseurl, facility_id, username, password\n )\n except AuthenticationFailed:\n raise PermissionDenied()\n user_info = facility_info[\"user\"]\n roles = user_info[\"roles\"]\n admin_roles = (user_kinds.ADMIN, user_kinds.SUPERUSER)\n if not any(role in roles for role in admin_roles):\n raise PermissionDenied()\n students = [u for u in facility_info[\"users\"] if not u[\"roles\"]]\n return Response({\"students\": students, \"admin\": facility_info[\"user\"]})\n", "path": "kolibri/plugins/setup_wizard/api.py"}]} | 2,742 | 212 |
gh_patches_debug_19384 | rasdani/github-patches | git_diff | qutebrowser__qutebrowser-3328 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
:edit-command accepts invalid commands
When using `:edit-command` and changing the command to `foo` (without an initial `:`):
```
10:33:50 DEBUG procs guiprocess:on_finished:98 Process finished with code 0, status 0.
10:33:50 DEBUG procs editor:on_proc_closed:73 Editor closed
10:33:50 DEBUG procs editor:on_proc_closed:90 Read back: foo
10:33:50 ERROR misc crashsignal:exception_hook:216 Uncaught exception
Traceback (most recent call last):
File "/home/florian/proj/qutebrowser/git/qutebrowser/mainwindow/statusbar/command.py", line 179, in callback
self.set_cmd_text(text)
File "/home/florian/proj/qutebrowser/git/qutebrowser/mainwindow/statusbar/command.py", line 86, in set_cmd_text
self.setText(text)
File "/home/florian/proj/qutebrowser/git/qutebrowser/mainwindow/statusbar/command.py", line 212, in setText
"'{}'!".format(text))
AssertionError: setText got called with invalid text 'foo
'!
```
When changing it to an empty file and pressing enter:
```
10:34:38 DEBUG commands command:run:484 command called: command-accept
[...]
10:34:38 ERROR misc crashsignal:exception_hook:216 Uncaught exception
Traceback (most recent call last):
File "/home/florian/proj/qutebrowser/git/qutebrowser/app.py", line 935, in eventFilter
return handler(event)
File "/home/florian/proj/qutebrowser/git/qutebrowser/app.py", line 895, in _handle_key_event
return man.eventFilter(event)
File "/home/florian/proj/qutebrowser/git/qutebrowser/keyinput/modeman.py", line 326, in eventFilter
return self._eventFilter_keypress(event)
File "/home/florian/proj/qutebrowser/git/qutebrowser/keyinput/modeman.py", line 162, in _eventFilter_keypress
handled = parser.handle(event)
File "/home/florian/proj/qutebrowser/git/qutebrowser/keyinput/basekeyparser.py", line 266, in handle
handled = self._handle_special_key(e)
File "/home/florian/proj/qutebrowser/git/qutebrowser/keyinput/basekeyparser.py", line 139, in _handle_special_key
self.execute(cmdstr, self.Type.special, count)
File "/home/florian/proj/qutebrowser/git/qutebrowser/keyinput/keyparser.py", line 44, in execute
self._commandrunner.run(cmdstr, count)
File "/home/florian/proj/qutebrowser/git/qutebrowser/commands/runners.py", line 301, in run
result.cmd.run(self._win_id, args, count=count)
File "/home/florian/proj/qutebrowser/git/qutebrowser/commands/command.py", line 500, in run
self.handler(*posargs, **kwargs)
File "/home/florian/proj/qutebrowser/git/qutebrowser/mainwindow/statusbar/command.py", line 167, in command_accept
self.got_cmd[str].emit(prefixes[text[0]] + text[1:])
IndexError: string index out of range
```
Report: https://crashes.qutebrowser.org/view/97044b65
cc @rcorre
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `qutebrowser/mainwindow/statusbar/command.py`
Content:
```
1 # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
2
3 # Copyright 2014-2017 Florian Bruhin (The Compiler) <[email protected]>
4 #
5 # This file is part of qutebrowser.
6 #
7 # qutebrowser is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU General Public License as published by
9 # the Free Software Foundation, either version 3 of the License, or
10 # (at your option) any later version.
11 #
12 # qutebrowser is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU General Public License for more details.
16 #
17 # You should have received a copy of the GNU General Public License
18 # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
19
20 """The commandline in the statusbar."""
21
22 from PyQt5.QtCore import pyqtSignal, pyqtSlot, Qt, QSize
23 from PyQt5.QtWidgets import QSizePolicy
24
25 from qutebrowser.keyinput import modeman, modeparsers
26 from qutebrowser.commands import cmdexc, cmdutils
27 from qutebrowser.misc import cmdhistory, editor
28 from qutebrowser.misc import miscwidgets as misc
29 from qutebrowser.utils import usertypes, log, objreg
30
31
32 class Command(misc.MinimalLineEditMixin, misc.CommandLineEdit):
33
34 """The commandline part of the statusbar.
35
36 Attributes:
37 _win_id: The window ID this widget is associated with.
38
39 Signals:
40 got_cmd: Emitted when a command is triggered by the user.
41 arg: The command string and also potentially the count.
42 clear_completion_selection: Emitted before the completion widget is
43 hidden.
44 hide_completion: Emitted when the completion widget should be hidden.
45 update_completion: Emitted when the completion should be shown/updated.
46 show_cmd: Emitted when command input should be shown.
47 hide_cmd: Emitted when command input can be hidden.
48 """
49
50 got_cmd = pyqtSignal([str], [str, int])
51 clear_completion_selection = pyqtSignal()
52 hide_completion = pyqtSignal()
53 update_completion = pyqtSignal()
54 show_cmd = pyqtSignal()
55 hide_cmd = pyqtSignal()
56
57 def __init__(self, *, win_id, private, parent=None):
58 misc.CommandLineEdit.__init__(self, parent=parent)
59 misc.MinimalLineEditMixin.__init__(self)
60 self._win_id = win_id
61 if not private:
62 command_history = objreg.get('command-history')
63 self.history.history = command_history.data
64 self.history.changed.connect(command_history.changed)
65 self.setSizePolicy(QSizePolicy.MinimumExpanding, QSizePolicy.Ignored)
66 self.cursorPositionChanged.connect(self.update_completion)
67 self.textChanged.connect(self.update_completion)
68 self.textChanged.connect(self.updateGeometry)
69
70 def prefix(self):
71 """Get the currently entered command prefix."""
72 text = self.text()
73 if not text:
74 return ''
75 elif text[0] in modeparsers.STARTCHARS:
76 return text[0]
77 else:
78 return ''
79
80 def set_cmd_text(self, text):
81 """Preset the statusbar to some text.
82
83 Args:
84 text: The text to set as string.
85 """
86 self.setText(text)
87 log.modes.debug("Setting command text, focusing {!r}".format(self))
88 modeman.enter(self._win_id, usertypes.KeyMode.command, 'cmd focus')
89 self.setFocus()
90 self.show_cmd.emit()
91
92 @cmdutils.register(instance='status-command', name='set-cmd-text',
93 scope='window', maxsplit=0)
94 @cmdutils.argument('count', count=True)
95 def set_cmd_text_command(self, text, count=None, space=False, append=False,
96 run_on_count=False):
97 """Preset the statusbar to some text.
98
99 //
100
101 Wrapper for set_cmd_text to check the arguments and allow multiple
102 strings which will get joined.
103
104 Args:
105 text: The commandline to set.
106 count: The count if given.
107 space: If given, a space is added to the end.
108 append: If given, the text is appended to the current text.
109 run_on_count: If given with a count, the command is run with the
110 given count rather than setting the command text.
111 """
112 if space:
113 text += ' '
114 if append:
115 if not self.text():
116 raise cmdexc.CommandError("No current text!")
117 text = self.text() + text
118
119 if not text or text[0] not in modeparsers.STARTCHARS:
120 raise cmdexc.CommandError(
121 "Invalid command text '{}'.".format(text))
122 if run_on_count and count is not None:
123 self.got_cmd[str, int].emit(text, count)
124 else:
125 self.set_cmd_text(text)
126
127 @cmdutils.register(instance='status-command',
128 modes=[usertypes.KeyMode.command], scope='window')
129 def command_history_prev(self):
130 """Go back in the commandline history."""
131 try:
132 if not self.history.is_browsing():
133 item = self.history.start(self.text().strip())
134 else:
135 item = self.history.previtem()
136 except (cmdhistory.HistoryEmptyError,
137 cmdhistory.HistoryEndReachedError):
138 return
139 if item:
140 self.set_cmd_text(item)
141
142 @cmdutils.register(instance='status-command',
143 modes=[usertypes.KeyMode.command], scope='window')
144 def command_history_next(self):
145 """Go forward in the commandline history."""
146 if not self.history.is_browsing():
147 return
148 try:
149 item = self.history.nextitem()
150 except cmdhistory.HistoryEndReachedError:
151 return
152 if item:
153 self.set_cmd_text(item)
154
155 @cmdutils.register(instance='status-command',
156 modes=[usertypes.KeyMode.command], scope='window')
157 def command_accept(self):
158 """Execute the command currently in the commandline."""
159 prefixes = {
160 ':': '',
161 '/': 'search -- ',
162 '?': 'search -r -- ',
163 }
164 text = self.text()
165 self.history.append(text)
166 modeman.leave(self._win_id, usertypes.KeyMode.command, 'cmd accept')
167 self.got_cmd[str].emit(prefixes[text[0]] + text[1:])
168
169 @cmdutils.register(instance='status-command', scope='window')
170 def edit_command(self, run=False):
171 """Open an editor to modify the current command.
172
173 Args:
174 run: Run the command if the editor exits successfully.
175 """
176 ed = editor.ExternalEditor(parent=self)
177
178 def callback(text):
179 self.set_cmd_text(text)
180 if run:
181 self.command_accept()
182
183 ed.editing_finished.connect(callback)
184 ed.edit(self.text())
185
186 @pyqtSlot(usertypes.KeyMode)
187 def on_mode_left(self, mode):
188 """Clear up when command mode was left.
189
190 - Clear the statusbar text if it's explicitly unfocused.
191 - Clear completion selection
192 - Hide completion
193
194 Args:
195 mode: The mode which was left.
196 """
197 if mode == usertypes.KeyMode.command:
198 self.setText('')
199 self.history.stop()
200 self.hide_cmd.emit()
201 self.clear_completion_selection.emit()
202 self.hide_completion.emit()
203
204 def setText(self, text):
205 """Extend setText to set prefix and make sure the prompt is ok."""
206 if not text:
207 pass
208 elif text[0] in modeparsers.STARTCHARS:
209 super().set_prompt(text[0])
210 else:
211 raise AssertionError("setText got called with invalid text "
212 "'{}'!".format(text))
213 super().setText(text)
214
215 def keyPressEvent(self, e):
216 """Override keyPressEvent to ignore Return key presses.
217
218 If this widget is focused, we are in passthrough key mode, and
219 Enter/Shift+Enter/etc. will cause QLineEdit to think it's finished
220 without command_accept to be called.
221 """
222 if e.key() == Qt.Key_Return:
223 e.ignore()
224 return
225 else:
226 super().keyPressEvent(e)
227
228 def sizeHint(self):
229 """Dynamically calculate the needed size."""
230 height = super().sizeHint().height()
231 text = self.text()
232 if not text:
233 text = 'x'
234 width = self.fontMetrics().width(text)
235 return QSize(width, height)
236
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/qutebrowser/mainwindow/statusbar/command.py b/qutebrowser/mainwindow/statusbar/command.py
--- a/qutebrowser/mainwindow/statusbar/command.py
+++ b/qutebrowser/mainwindow/statusbar/command.py
@@ -26,7 +26,7 @@
from qutebrowser.commands import cmdexc, cmdutils
from qutebrowser.misc import cmdhistory, editor
from qutebrowser.misc import miscwidgets as misc
-from qutebrowser.utils import usertypes, log, objreg
+from qutebrowser.utils import usertypes, log, objreg, message
class Command(misc.MinimalLineEditMixin, misc.CommandLineEdit):
@@ -176,6 +176,10 @@
ed = editor.ExternalEditor(parent=self)
def callback(text):
+ if not text or text[0] not in modeparsers.STARTCHARS:
+ message.error('command must start with one of {}'
+ .format(modeparsers.STARTCHARS))
+ return
self.set_cmd_text(text)
if run:
self.command_accept()
| {"golden_diff": "diff --git a/qutebrowser/mainwindow/statusbar/command.py b/qutebrowser/mainwindow/statusbar/command.py\n--- a/qutebrowser/mainwindow/statusbar/command.py\n+++ b/qutebrowser/mainwindow/statusbar/command.py\n@@ -26,7 +26,7 @@\n from qutebrowser.commands import cmdexc, cmdutils\n from qutebrowser.misc import cmdhistory, editor\n from qutebrowser.misc import miscwidgets as misc\n-from qutebrowser.utils import usertypes, log, objreg\n+from qutebrowser.utils import usertypes, log, objreg, message\n \n \n class Command(misc.MinimalLineEditMixin, misc.CommandLineEdit):\n@@ -176,6 +176,10 @@\n ed = editor.ExternalEditor(parent=self)\n \n def callback(text):\n+ if not text or text[0] not in modeparsers.STARTCHARS:\n+ message.error('command must start with one of {}'\n+ .format(modeparsers.STARTCHARS))\n+ return\n self.set_cmd_text(text)\n if run:\n self.command_accept()\n", "issue": ":edit-command accepts invalid commands\nWhen using `:edit-command` and changing the command to `foo` (without an initial `:`):\r\n\r\n```\r\n10:33:50 DEBUG procs guiprocess:on_finished:98 Process finished with code 0, status 0.\r\n10:33:50 DEBUG procs editor:on_proc_closed:73 Editor closed\r\n10:33:50 DEBUG procs editor:on_proc_closed:90 Read back: foo\r\n\r\n10:33:50 ERROR misc crashsignal:exception_hook:216 Uncaught exception\r\nTraceback (most recent call last):\r\n File \"/home/florian/proj/qutebrowser/git/qutebrowser/mainwindow/statusbar/command.py\", line 179, in callback\r\n self.set_cmd_text(text)\r\n File \"/home/florian/proj/qutebrowser/git/qutebrowser/mainwindow/statusbar/command.py\", line 86, in set_cmd_text\r\n self.setText(text)\r\n File \"/home/florian/proj/qutebrowser/git/qutebrowser/mainwindow/statusbar/command.py\", line 212, in setText\r\n \"'{}'!\".format(text))\r\nAssertionError: setText got called with invalid text 'foo\r\n'!\r\n```\r\n\r\nWhen changing it to an empty file and pressing enter:\r\n\r\n```\r\n10:34:38 DEBUG commands command:run:484 command called: command-accept\r\n[...]\r\n10:34:38 ERROR misc crashsignal:exception_hook:216 Uncaught exception\r\nTraceback (most recent call last):\r\n File \"/home/florian/proj/qutebrowser/git/qutebrowser/app.py\", line 935, in eventFilter\r\n return handler(event)\r\n File \"/home/florian/proj/qutebrowser/git/qutebrowser/app.py\", line 895, in _handle_key_event\r\n return man.eventFilter(event)\r\n File \"/home/florian/proj/qutebrowser/git/qutebrowser/keyinput/modeman.py\", line 326, in eventFilter\r\n return self._eventFilter_keypress(event)\r\n File \"/home/florian/proj/qutebrowser/git/qutebrowser/keyinput/modeman.py\", line 162, in _eventFilter_keypress\r\n handled = parser.handle(event)\r\n File \"/home/florian/proj/qutebrowser/git/qutebrowser/keyinput/basekeyparser.py\", line 266, in handle\r\n handled = self._handle_special_key(e)\r\n File \"/home/florian/proj/qutebrowser/git/qutebrowser/keyinput/basekeyparser.py\", line 139, in _handle_special_key\r\n self.execute(cmdstr, self.Type.special, count)\r\n File \"/home/florian/proj/qutebrowser/git/qutebrowser/keyinput/keyparser.py\", line 44, in execute\r\n self._commandrunner.run(cmdstr, count)\r\n File \"/home/florian/proj/qutebrowser/git/qutebrowser/commands/runners.py\", line 301, in run\r\n result.cmd.run(self._win_id, args, count=count)\r\n File \"/home/florian/proj/qutebrowser/git/qutebrowser/commands/command.py\", line 500, in run\r\n self.handler(*posargs, **kwargs)\r\n File \"/home/florian/proj/qutebrowser/git/qutebrowser/mainwindow/statusbar/command.py\", line 167, in command_accept\r\n self.got_cmd[str].emit(prefixes[text[0]] + text[1:])\r\nIndexError: string index out of range\r\n```\r\n\r\nReport: https://crashes.qutebrowser.org/view/97044b65\r\n\r\ncc @rcorre\r\n\n", "before_files": [{"content": "# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:\n\n# Copyright 2014-2017 Florian Bruhin (The Compiler) <[email protected]>\n#\n# This file is part of qutebrowser.\n#\n# qutebrowser is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# qutebrowser is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"The commandline in the statusbar.\"\"\"\n\nfrom PyQt5.QtCore import pyqtSignal, pyqtSlot, Qt, QSize\nfrom PyQt5.QtWidgets import QSizePolicy\n\nfrom qutebrowser.keyinput import modeman, modeparsers\nfrom qutebrowser.commands import cmdexc, cmdutils\nfrom qutebrowser.misc import cmdhistory, editor\nfrom qutebrowser.misc import miscwidgets as misc\nfrom qutebrowser.utils import usertypes, log, objreg\n\n\nclass Command(misc.MinimalLineEditMixin, misc.CommandLineEdit):\n\n \"\"\"The commandline part of the statusbar.\n\n Attributes:\n _win_id: The window ID this widget is associated with.\n\n Signals:\n got_cmd: Emitted when a command is triggered by the user.\n arg: The command string and also potentially the count.\n clear_completion_selection: Emitted before the completion widget is\n hidden.\n hide_completion: Emitted when the completion widget should be hidden.\n update_completion: Emitted when the completion should be shown/updated.\n show_cmd: Emitted when command input should be shown.\n hide_cmd: Emitted when command input can be hidden.\n \"\"\"\n\n got_cmd = pyqtSignal([str], [str, int])\n clear_completion_selection = pyqtSignal()\n hide_completion = pyqtSignal()\n update_completion = pyqtSignal()\n show_cmd = pyqtSignal()\n hide_cmd = pyqtSignal()\n\n def __init__(self, *, win_id, private, parent=None):\n misc.CommandLineEdit.__init__(self, parent=parent)\n misc.MinimalLineEditMixin.__init__(self)\n self._win_id = win_id\n if not private:\n command_history = objreg.get('command-history')\n self.history.history = command_history.data\n self.history.changed.connect(command_history.changed)\n self.setSizePolicy(QSizePolicy.MinimumExpanding, QSizePolicy.Ignored)\n self.cursorPositionChanged.connect(self.update_completion)\n self.textChanged.connect(self.update_completion)\n self.textChanged.connect(self.updateGeometry)\n\n def prefix(self):\n \"\"\"Get the currently entered command prefix.\"\"\"\n text = self.text()\n if not text:\n return ''\n elif text[0] in modeparsers.STARTCHARS:\n return text[0]\n else:\n return ''\n\n def set_cmd_text(self, text):\n \"\"\"Preset the statusbar to some text.\n\n Args:\n text: The text to set as string.\n \"\"\"\n self.setText(text)\n log.modes.debug(\"Setting command text, focusing {!r}\".format(self))\n modeman.enter(self._win_id, usertypes.KeyMode.command, 'cmd focus')\n self.setFocus()\n self.show_cmd.emit()\n\n @cmdutils.register(instance='status-command', name='set-cmd-text',\n scope='window', maxsplit=0)\n @cmdutils.argument('count', count=True)\n def set_cmd_text_command(self, text, count=None, space=False, append=False,\n run_on_count=False):\n \"\"\"Preset the statusbar to some text.\n\n //\n\n Wrapper for set_cmd_text to check the arguments and allow multiple\n strings which will get joined.\n\n Args:\n text: The commandline to set.\n count: The count if given.\n space: If given, a space is added to the end.\n append: If given, the text is appended to the current text.\n run_on_count: If given with a count, the command is run with the\n given count rather than setting the command text.\n \"\"\"\n if space:\n text += ' '\n if append:\n if not self.text():\n raise cmdexc.CommandError(\"No current text!\")\n text = self.text() + text\n\n if not text or text[0] not in modeparsers.STARTCHARS:\n raise cmdexc.CommandError(\n \"Invalid command text '{}'.\".format(text))\n if run_on_count and count is not None:\n self.got_cmd[str, int].emit(text, count)\n else:\n self.set_cmd_text(text)\n\n @cmdutils.register(instance='status-command',\n modes=[usertypes.KeyMode.command], scope='window')\n def command_history_prev(self):\n \"\"\"Go back in the commandline history.\"\"\"\n try:\n if not self.history.is_browsing():\n item = self.history.start(self.text().strip())\n else:\n item = self.history.previtem()\n except (cmdhistory.HistoryEmptyError,\n cmdhistory.HistoryEndReachedError):\n return\n if item:\n self.set_cmd_text(item)\n\n @cmdutils.register(instance='status-command',\n modes=[usertypes.KeyMode.command], scope='window')\n def command_history_next(self):\n \"\"\"Go forward in the commandline history.\"\"\"\n if not self.history.is_browsing():\n return\n try:\n item = self.history.nextitem()\n except cmdhistory.HistoryEndReachedError:\n return\n if item:\n self.set_cmd_text(item)\n\n @cmdutils.register(instance='status-command',\n modes=[usertypes.KeyMode.command], scope='window')\n def command_accept(self):\n \"\"\"Execute the command currently in the commandline.\"\"\"\n prefixes = {\n ':': '',\n '/': 'search -- ',\n '?': 'search -r -- ',\n }\n text = self.text()\n self.history.append(text)\n modeman.leave(self._win_id, usertypes.KeyMode.command, 'cmd accept')\n self.got_cmd[str].emit(prefixes[text[0]] + text[1:])\n\n @cmdutils.register(instance='status-command', scope='window')\n def edit_command(self, run=False):\n \"\"\"Open an editor to modify the current command.\n\n Args:\n run: Run the command if the editor exits successfully.\n \"\"\"\n ed = editor.ExternalEditor(parent=self)\n\n def callback(text):\n self.set_cmd_text(text)\n if run:\n self.command_accept()\n\n ed.editing_finished.connect(callback)\n ed.edit(self.text())\n\n @pyqtSlot(usertypes.KeyMode)\n def on_mode_left(self, mode):\n \"\"\"Clear up when command mode was left.\n\n - Clear the statusbar text if it's explicitly unfocused.\n - Clear completion selection\n - Hide completion\n\n Args:\n mode: The mode which was left.\n \"\"\"\n if mode == usertypes.KeyMode.command:\n self.setText('')\n self.history.stop()\n self.hide_cmd.emit()\n self.clear_completion_selection.emit()\n self.hide_completion.emit()\n\n def setText(self, text):\n \"\"\"Extend setText to set prefix and make sure the prompt is ok.\"\"\"\n if not text:\n pass\n elif text[0] in modeparsers.STARTCHARS:\n super().set_prompt(text[0])\n else:\n raise AssertionError(\"setText got called with invalid text \"\n \"'{}'!\".format(text))\n super().setText(text)\n\n def keyPressEvent(self, e):\n \"\"\"Override keyPressEvent to ignore Return key presses.\n\n If this widget is focused, we are in passthrough key mode, and\n Enter/Shift+Enter/etc. will cause QLineEdit to think it's finished\n without command_accept to be called.\n \"\"\"\n if e.key() == Qt.Key_Return:\n e.ignore()\n return\n else:\n super().keyPressEvent(e)\n\n def sizeHint(self):\n \"\"\"Dynamically calculate the needed size.\"\"\"\n height = super().sizeHint().height()\n text = self.text()\n if not text:\n text = 'x'\n width = self.fontMetrics().width(text)\n return QSize(width, height)\n", "path": "qutebrowser/mainwindow/statusbar/command.py"}], "after_files": [{"content": "# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:\n\n# Copyright 2014-2017 Florian Bruhin (The Compiler) <[email protected]>\n#\n# This file is part of qutebrowser.\n#\n# qutebrowser is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# qutebrowser is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"The commandline in the statusbar.\"\"\"\n\nfrom PyQt5.QtCore import pyqtSignal, pyqtSlot, Qt, QSize\nfrom PyQt5.QtWidgets import QSizePolicy\n\nfrom qutebrowser.keyinput import modeman, modeparsers\nfrom qutebrowser.commands import cmdexc, cmdutils\nfrom qutebrowser.misc import cmdhistory, editor\nfrom qutebrowser.misc import miscwidgets as misc\nfrom qutebrowser.utils import usertypes, log, objreg, message\n\n\nclass Command(misc.MinimalLineEditMixin, misc.CommandLineEdit):\n\n \"\"\"The commandline part of the statusbar.\n\n Attributes:\n _win_id: The window ID this widget is associated with.\n\n Signals:\n got_cmd: Emitted when a command is triggered by the user.\n arg: The command string and also potentially the count.\n clear_completion_selection: Emitted before the completion widget is\n hidden.\n hide_completion: Emitted when the completion widget should be hidden.\n update_completion: Emitted when the completion should be shown/updated.\n show_cmd: Emitted when command input should be shown.\n hide_cmd: Emitted when command input can be hidden.\n \"\"\"\n\n got_cmd = pyqtSignal([str], [str, int])\n clear_completion_selection = pyqtSignal()\n hide_completion = pyqtSignal()\n update_completion = pyqtSignal()\n show_cmd = pyqtSignal()\n hide_cmd = pyqtSignal()\n\n def __init__(self, *, win_id, private, parent=None):\n misc.CommandLineEdit.__init__(self, parent=parent)\n misc.MinimalLineEditMixin.__init__(self)\n self._win_id = win_id\n if not private:\n command_history = objreg.get('command-history')\n self.history.history = command_history.data\n self.history.changed.connect(command_history.changed)\n self.setSizePolicy(QSizePolicy.MinimumExpanding, QSizePolicy.Ignored)\n self.cursorPositionChanged.connect(self.update_completion)\n self.textChanged.connect(self.update_completion)\n self.textChanged.connect(self.updateGeometry)\n\n def prefix(self):\n \"\"\"Get the currently entered command prefix.\"\"\"\n text = self.text()\n if not text:\n return ''\n elif text[0] in modeparsers.STARTCHARS:\n return text[0]\n else:\n return ''\n\n def set_cmd_text(self, text):\n \"\"\"Preset the statusbar to some text.\n\n Args:\n text: The text to set as string.\n \"\"\"\n self.setText(text)\n log.modes.debug(\"Setting command text, focusing {!r}\".format(self))\n modeman.enter(self._win_id, usertypes.KeyMode.command, 'cmd focus')\n self.setFocus()\n self.show_cmd.emit()\n\n @cmdutils.register(instance='status-command', name='set-cmd-text',\n scope='window', maxsplit=0)\n @cmdutils.argument('count', count=True)\n def set_cmd_text_command(self, text, count=None, space=False, append=False,\n run_on_count=False):\n \"\"\"Preset the statusbar to some text.\n\n //\n\n Wrapper for set_cmd_text to check the arguments and allow multiple\n strings which will get joined.\n\n Args:\n text: The commandline to set.\n count: The count if given.\n space: If given, a space is added to the end.\n append: If given, the text is appended to the current text.\n run_on_count: If given with a count, the command is run with the\n given count rather than setting the command text.\n \"\"\"\n if space:\n text += ' '\n if append:\n if not self.text():\n raise cmdexc.CommandError(\"No current text!\")\n text = self.text() + text\n\n if not text or text[0] not in modeparsers.STARTCHARS:\n raise cmdexc.CommandError(\n \"Invalid command text '{}'.\".format(text))\n if run_on_count and count is not None:\n self.got_cmd[str, int].emit(text, count)\n else:\n self.set_cmd_text(text)\n\n @cmdutils.register(instance='status-command',\n modes=[usertypes.KeyMode.command], scope='window')\n def command_history_prev(self):\n \"\"\"Go back in the commandline history.\"\"\"\n try:\n if not self.history.is_browsing():\n item = self.history.start(self.text().strip())\n else:\n item = self.history.previtem()\n except (cmdhistory.HistoryEmptyError,\n cmdhistory.HistoryEndReachedError):\n return\n if item:\n self.set_cmd_text(item)\n\n @cmdutils.register(instance='status-command',\n modes=[usertypes.KeyMode.command], scope='window')\n def command_history_next(self):\n \"\"\"Go forward in the commandline history.\"\"\"\n if not self.history.is_browsing():\n return\n try:\n item = self.history.nextitem()\n except cmdhistory.HistoryEndReachedError:\n return\n if item:\n self.set_cmd_text(item)\n\n @cmdutils.register(instance='status-command',\n modes=[usertypes.KeyMode.command], scope='window')\n def command_accept(self):\n \"\"\"Execute the command currently in the commandline.\"\"\"\n prefixes = {\n ':': '',\n '/': 'search -- ',\n '?': 'search -r -- ',\n }\n text = self.text()\n self.history.append(text)\n modeman.leave(self._win_id, usertypes.KeyMode.command, 'cmd accept')\n self.got_cmd[str].emit(prefixes[text[0]] + text[1:])\n\n @cmdutils.register(instance='status-command', scope='window')\n def edit_command(self, run=False):\n \"\"\"Open an editor to modify the current command.\n\n Args:\n run: Run the command if the editor exits successfully.\n \"\"\"\n ed = editor.ExternalEditor(parent=self)\n\n def callback(text):\n if not text or text[0] not in modeparsers.STARTCHARS:\n message.error('command must start with one of {}'\n .format(modeparsers.STARTCHARS))\n return\n self.set_cmd_text(text)\n if run:\n self.command_accept()\n\n ed.editing_finished.connect(callback)\n ed.edit(self.text())\n\n @pyqtSlot(usertypes.KeyMode)\n def on_mode_left(self, mode):\n \"\"\"Clear up when command mode was left.\n\n - Clear the statusbar text if it's explicitly unfocused.\n - Clear completion selection\n - Hide completion\n\n Args:\n mode: The mode which was left.\n \"\"\"\n if mode == usertypes.KeyMode.command:\n self.setText('')\n self.history.stop()\n self.hide_cmd.emit()\n self.clear_completion_selection.emit()\n self.hide_completion.emit()\n\n def setText(self, text):\n \"\"\"Extend setText to set prefix and make sure the prompt is ok.\"\"\"\n if not text:\n pass\n elif text[0] in modeparsers.STARTCHARS:\n super().set_prompt(text[0])\n else:\n raise AssertionError(\"setText got called with invalid text \"\n \"'{}'!\".format(text))\n super().setText(text)\n\n def keyPressEvent(self, e):\n \"\"\"Override keyPressEvent to ignore Return key presses.\n\n If this widget is focused, we are in passthrough key mode, and\n Enter/Shift+Enter/etc. will cause QLineEdit to think it's finished\n without command_accept to be called.\n \"\"\"\n if e.key() == Qt.Key_Return:\n e.ignore()\n return\n else:\n super().keyPressEvent(e)\n\n def sizeHint(self):\n \"\"\"Dynamically calculate the needed size.\"\"\"\n height = super().sizeHint().height()\n text = self.text()\n if not text:\n text = 'x'\n width = self.fontMetrics().width(text)\n return QSize(width, height)\n", "path": "qutebrowser/mainwindow/statusbar/command.py"}]} | 3,490 | 229 |
gh_patches_debug_13060 | rasdani/github-patches | git_diff | ytdl-org__youtube-dl-3089 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
AttributeError in ard module
With the command given below, I get the error message below. I'm using version 2014.06.09.
`youtube-dl http://www.ardmediathek.de/tv/Klassiker-der-Weltliteratur/Max-Frisch/BR-alpha/Video\?documentId\=19067308\&bcastId\=14913194`
```
[ARD] 19067308: Downloading webpage
[ARD] 19067308: Downloading JSON metadata
Traceback (most recent call last):
File "/usr/bin/youtube-dl", line 9, in <module>
load_entry_point('youtube-dl==2014.06.09', 'console_scripts', 'youtube-dl')()
File "/usr/lib/python3.4/site-packages/youtube_dl/__init__.py", line 853, in main
_real_main(argv)
File "/usr/lib/python3.4/site-packages/youtube_dl/__init__.py", line 843, in _real_main
retcode = ydl.download(all_urls)
File "/usr/lib/python3.4/site-packages/youtube_dl/YoutubeDL.py", line 1050, in download
self.extract_info(url)
File "/usr/lib/python3.4/site-packages/youtube_dl/YoutubeDL.py", line 516, in extract_info
ie_result = ie.extract(url)
File "/usr/lib/python3.4/site-packages/youtube_dl/extractor/common.py", line 168, in extract
return self._real_extract(url)
File "/usr/lib/python3.4/site-packages/youtube_dl/extractor/ard.py", line 66, in _real_extract
determine_ext(format['url']), format['quality'])
File "/usr/lib/python3.4/site-packages/youtube_dl/utils.py", line 845, in determine_ext
guess = url.partition(u'?')[0].rpartition(u'.')[2]
AttributeError: 'list' object has no attribute 'partition'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `youtube_dl/extractor/ard.py`
Content:
```
1 # coding: utf-8
2 from __future__ import unicode_literals
3
4 import re
5
6 from .common import InfoExtractor
7 from ..utils import (
8 determine_ext,
9 ExtractorError,
10 )
11
12
13 class ARDIE(InfoExtractor):
14 _VALID_URL = r'^https?://(?:(?:www\.)?ardmediathek\.de|mediathek\.daserste\.de)/(?:.*/)(?P<video_id>[^/\?]+)(?:\?.*)?'
15
16 _TEST = {
17 'url': 'http://www.ardmediathek.de/das-erste/guenther-jauch/edward-snowden-im-interview-held-oder-verraeter?documentId=19288786',
18 'file': '19288786.mp4',
19 'md5': '515bf47ce209fb3f5a61b7aad364634c',
20 'info_dict': {
21 'title': 'Edward Snowden im Interview - Held oder Verräter?',
22 'description': 'Edward Snowden hat alles aufs Spiel gesetzt, um die weltweite \xdcberwachung durch die Geheimdienste zu enttarnen. Nun stellt sich der ehemalige NSA-Mitarbeiter erstmals weltweit in einem TV-Interview den Fragen eines NDR-Journalisten. Die Sendung vom Sonntagabend.',
23 'thumbnail': 'http://www.ardmediathek.de/ard/servlet/contentblob/19/28/87/90/19288790/bild/2250037',
24 },
25 'skip': 'Blocked outside of Germany',
26 }
27
28 def _real_extract(self, url):
29 # determine video id from url
30 m = re.match(self._VALID_URL, url)
31
32 numid = re.search(r'documentId=([0-9]+)', url)
33 if numid:
34 video_id = numid.group(1)
35 else:
36 video_id = m.group('video_id')
37
38 webpage = self._download_webpage(url, video_id)
39
40 title = self._html_search_regex(
41 [r'<h1(?:\s+class="boxTopHeadline")?>(.*?)</h1>',
42 r'<meta name="dcterms.title" content="(.*?)"/>',
43 r'<h4 class="headline">(.*?)</h4>'],
44 webpage, 'title')
45 description = self._html_search_meta(
46 'dcterms.abstract', webpage, 'description')
47 thumbnail = self._og_search_thumbnail(webpage)
48
49
50 media_info = self._download_json(
51 'http://www.ardmediathek.de/play/media/%s' % video_id, video_id)
52 # The second element of the _mediaArray contains the standard http urls
53 streams = media_info['_mediaArray'][1]['_mediaStreamArray']
54 if not streams:
55 if '"fsk"' in webpage:
56 raise ExtractorError('This video is only available after 20:00')
57
58 formats = []
59 for s in streams:
60 format = {
61 'quality': s['_quality'],
62 'url': s['_stream'],
63 }
64
65 format['format_id'] = '%s-%s' % (
66 determine_ext(format['url']), format['quality'])
67
68 formats.append(format)
69
70 self._sort_formats(formats)
71
72 return {
73 'id': video_id,
74 'title': title,
75 'description': description,
76 'formats': formats,
77 'thumbnail': thumbnail,
78 }
79
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/youtube_dl/extractor/ard.py b/youtube_dl/extractor/ard.py
--- a/youtube_dl/extractor/ard.py
+++ b/youtube_dl/extractor/ard.py
@@ -56,7 +56,18 @@
raise ExtractorError('This video is only available after 20:00')
formats = []
+
for s in streams:
+ if type(s['_stream']) == list:
+ for index, url in enumerate(s['_stream'][::-1]):
+ quality = s['_quality'] + index
+ formats.append({
+ 'quality': quality,
+ 'url': url,
+ 'format_id': '%s-%s' % (determine_ext(url), quality)
+ })
+ continue
+
format = {
'quality': s['_quality'],
'url': s['_stream'],
| {"golden_diff": "diff --git a/youtube_dl/extractor/ard.py b/youtube_dl/extractor/ard.py\n--- a/youtube_dl/extractor/ard.py\n+++ b/youtube_dl/extractor/ard.py\n@@ -56,7 +56,18 @@\n raise ExtractorError('This video is only available after 20:00')\n \n formats = []\n+\n for s in streams:\n+ if type(s['_stream']) == list:\n+ for index, url in enumerate(s['_stream'][::-1]):\n+ quality = s['_quality'] + index\n+ formats.append({\n+ 'quality': quality,\n+ 'url': url,\n+ 'format_id': '%s-%s' % (determine_ext(url), quality)\n+ })\n+ continue\n+\n format = {\n 'quality': s['_quality'],\n 'url': s['_stream'],\n", "issue": "AttributeError in ard module\nWith the command given below, I get the error message below. I'm using version 2014.06.09.\n\n`youtube-dl http://www.ardmediathek.de/tv/Klassiker-der-Weltliteratur/Max-Frisch/BR-alpha/Video\\?documentId\\=19067308\\&bcastId\\=14913194`\n\n```\n[ARD] 19067308: Downloading webpage\n[ARD] 19067308: Downloading JSON metadata\nTraceback (most recent call last):\n File \"/usr/bin/youtube-dl\", line 9, in <module>\n load_entry_point('youtube-dl==2014.06.09', 'console_scripts', 'youtube-dl')()\n File \"/usr/lib/python3.4/site-packages/youtube_dl/__init__.py\", line 853, in main\n _real_main(argv)\n File \"/usr/lib/python3.4/site-packages/youtube_dl/__init__.py\", line 843, in _real_main\n retcode = ydl.download(all_urls)\n File \"/usr/lib/python3.4/site-packages/youtube_dl/YoutubeDL.py\", line 1050, in download\n self.extract_info(url)\n File \"/usr/lib/python3.4/site-packages/youtube_dl/YoutubeDL.py\", line 516, in extract_info\n ie_result = ie.extract(url)\n File \"/usr/lib/python3.4/site-packages/youtube_dl/extractor/common.py\", line 168, in extract\n return self._real_extract(url)\n File \"/usr/lib/python3.4/site-packages/youtube_dl/extractor/ard.py\", line 66, in _real_extract\n determine_ext(format['url']), format['quality'])\n File \"/usr/lib/python3.4/site-packages/youtube_dl/utils.py\", line 845, in determine_ext\n guess = url.partition(u'?')[0].rpartition(u'.')[2]\nAttributeError: 'list' object has no attribute 'partition'\n```\n\n", "before_files": [{"content": "# coding: utf-8\nfrom __future__ import unicode_literals\n\nimport re\n\nfrom .common import InfoExtractor\nfrom ..utils import (\n determine_ext,\n ExtractorError,\n)\n\n\nclass ARDIE(InfoExtractor):\n _VALID_URL = r'^https?://(?:(?:www\\.)?ardmediathek\\.de|mediathek\\.daserste\\.de)/(?:.*/)(?P<video_id>[^/\\?]+)(?:\\?.*)?'\n\n _TEST = {\n 'url': 'http://www.ardmediathek.de/das-erste/guenther-jauch/edward-snowden-im-interview-held-oder-verraeter?documentId=19288786',\n 'file': '19288786.mp4',\n 'md5': '515bf47ce209fb3f5a61b7aad364634c',\n 'info_dict': {\n 'title': 'Edward Snowden im Interview - Held oder Verr\u00e4ter?',\n 'description': 'Edward Snowden hat alles aufs Spiel gesetzt, um die weltweite \\xdcberwachung durch die Geheimdienste zu enttarnen. Nun stellt sich der ehemalige NSA-Mitarbeiter erstmals weltweit in einem TV-Interview den Fragen eines NDR-Journalisten. Die Sendung vom Sonntagabend.',\n 'thumbnail': 'http://www.ardmediathek.de/ard/servlet/contentblob/19/28/87/90/19288790/bild/2250037',\n },\n 'skip': 'Blocked outside of Germany',\n }\n\n def _real_extract(self, url):\n # determine video id from url\n m = re.match(self._VALID_URL, url)\n\n numid = re.search(r'documentId=([0-9]+)', url)\n if numid:\n video_id = numid.group(1)\n else:\n video_id = m.group('video_id')\n\n webpage = self._download_webpage(url, video_id)\n\n title = self._html_search_regex(\n [r'<h1(?:\\s+class=\"boxTopHeadline\")?>(.*?)</h1>',\n r'<meta name=\"dcterms.title\" content=\"(.*?)\"/>',\n r'<h4 class=\"headline\">(.*?)</h4>'],\n webpage, 'title')\n description = self._html_search_meta(\n 'dcterms.abstract', webpage, 'description')\n thumbnail = self._og_search_thumbnail(webpage)\n\n\n media_info = self._download_json(\n 'http://www.ardmediathek.de/play/media/%s' % video_id, video_id)\n # The second element of the _mediaArray contains the standard http urls\n streams = media_info['_mediaArray'][1]['_mediaStreamArray']\n if not streams:\n if '\"fsk\"' in webpage:\n raise ExtractorError('This video is only available after 20:00')\n\n formats = []\n for s in streams:\n format = {\n 'quality': s['_quality'],\n 'url': s['_stream'],\n }\n\n format['format_id'] = '%s-%s' % (\n determine_ext(format['url']), format['quality'])\n\n formats.append(format)\n\n self._sort_formats(formats)\n\n return {\n 'id': video_id,\n 'title': title,\n 'description': description,\n 'formats': formats,\n 'thumbnail': thumbnail,\n }\n", "path": "youtube_dl/extractor/ard.py"}], "after_files": [{"content": "# coding: utf-8\nfrom __future__ import unicode_literals\n\nimport re\n\nfrom .common import InfoExtractor\nfrom ..utils import (\n determine_ext,\n ExtractorError,\n)\n\n\nclass ARDIE(InfoExtractor):\n _VALID_URL = r'^https?://(?:(?:www\\.)?ardmediathek\\.de|mediathek\\.daserste\\.de)/(?:.*/)(?P<video_id>[^/\\?]+)(?:\\?.*)?'\n\n _TEST = {\n 'url': 'http://www.ardmediathek.de/das-erste/guenther-jauch/edward-snowden-im-interview-held-oder-verraeter?documentId=19288786',\n 'file': '19288786.mp4',\n 'md5': '515bf47ce209fb3f5a61b7aad364634c',\n 'info_dict': {\n 'title': 'Edward Snowden im Interview - Held oder Verr\u00e4ter?',\n 'description': 'Edward Snowden hat alles aufs Spiel gesetzt, um die weltweite \\xdcberwachung durch die Geheimdienste zu enttarnen. Nun stellt sich der ehemalige NSA-Mitarbeiter erstmals weltweit in einem TV-Interview den Fragen eines NDR-Journalisten. Die Sendung vom Sonntagabend.',\n 'thumbnail': 'http://www.ardmediathek.de/ard/servlet/contentblob/19/28/87/90/19288790/bild/2250037',\n },\n 'skip': 'Blocked outside of Germany',\n }\n\n def _real_extract(self, url):\n # determine video id from url\n m = re.match(self._VALID_URL, url)\n\n numid = re.search(r'documentId=([0-9]+)', url)\n if numid:\n video_id = numid.group(1)\n else:\n video_id = m.group('video_id')\n\n webpage = self._download_webpage(url, video_id)\n\n title = self._html_search_regex(\n [r'<h1(?:\\s+class=\"boxTopHeadline\")?>(.*?)</h1>',\n r'<meta name=\"dcterms.title\" content=\"(.*?)\"/>',\n r'<h4 class=\"headline\">(.*?)</h4>'],\n webpage, 'title')\n description = self._html_search_meta(\n 'dcterms.abstract', webpage, 'description')\n thumbnail = self._og_search_thumbnail(webpage)\n\n\n media_info = self._download_json(\n 'http://www.ardmediathek.de/play/media/%s' % video_id, video_id)\n # The second element of the _mediaArray contains the standard http urls\n streams = media_info['_mediaArray'][1]['_mediaStreamArray']\n if not streams:\n if '\"fsk\"' in webpage:\n raise ExtractorError('This video is only available after 20:00')\n\n formats = []\n\n for s in streams:\n if type(s['_stream']) == list:\n for index, url in enumerate(s['_stream'][::-1]):\n quality = s['_quality'] + index\n formats.append({\n 'quality': quality,\n 'url': url,\n 'format_id': '%s-%s' % (determine_ext(url), quality)\n })\n continue\n\n format = {\n 'quality': s['_quality'],\n 'url': s['_stream'],\n }\n\n format['format_id'] = '%s-%s' % (\n determine_ext(format['url']), format['quality'])\n\n formats.append(format)\n\n self._sort_formats(formats)\n\n return {\n 'id': video_id,\n 'title': title,\n 'description': description,\n 'formats': formats,\n 'thumbnail': thumbnail,\n }\n", "path": "youtube_dl/extractor/ard.py"}]} | 1,675 | 196 |
gh_patches_debug_22327 | rasdani/github-patches | git_diff | kivy__kivy-5727 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Probesysfs provider requires getconf
<!--
The issue tracker is a tool to address bugs.
Please use the #kivy IRC channel on freenode or Stack Overflow for
support questions, more information at https://git.io/vM1yQ.
Before opening a new issue, make sure you do the following:
* check that your issue isn't already filed: https://git.io/vM1iE
* prepare a short, runnable example that reproduces the issue
* reproduce the problem with the latest development version of Kivy
* double-check that the issue is indeed a bug and not a support request
-->
### Versions
* Python: 3.6.4
* OS: Linux
* Kivy: 1.10.0
* Kivy installation method: setuptools
### Description
Kivy's probesysfs provider requires getconf, provided by glibc, to get the platform's LONG_BIT value.
This dependency precludes the use of other C libraries, such as musl, as well as platforms that choose not to install getconf.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kivy/input/providers/probesysfs.py`
Content:
```
1 '''
2 Auto Create Input Provider Config Entry for Available MT Hardware (linux only).
3 ===============================================================================
4
5 Thanks to Marc Tardif for the probing code, taken from scan-for-mt-device.
6
7 The device discovery is done by this provider. However, the reading of
8 input can be performed by other providers like: hidinput, mtdev and
9 linuxwacom. mtdev is used prior to other providers. For more
10 information about mtdev, check :py:class:`~kivy.input.providers.mtdev`.
11
12 Here is an example of auto creation::
13
14 [input]
15 # using mtdev
16 device_%(name)s = probesysfs,provider=mtdev
17 # using hidinput
18 device_%(name)s = probesysfs,provider=hidinput
19 # using mtdev with a match on name
20 device_%(name)s = probesysfs,provider=mtdev,match=acer
21
22 # using hidinput with custom parameters to hidinput (all on one line)
23 %(name)s = probesysfs,
24 provider=hidinput,param=min_pressure=1,param=max_pressure=99
25
26 # you can also match your wacom touchscreen
27 touch = probesysfs,match=E3 Finger,provider=linuxwacom,
28 select_all=1,param=mode=touch
29 # and your wacom pen
30 pen = probesysfs,match=E3 Pen,provider=linuxwacom,
31 select_all=1,param=mode=pen
32
33 By default, ProbeSysfs module will enumerate hardware from the /sys/class/input
34 device, and configure hardware with ABS_MT_POSITION_X capability. But for
35 example, the wacom screen doesn't support this capability. You can prevent this
36 behavior by putting select_all=1 in your config line. Add use_mouse=1 to also
37 include touchscreen hardware that offers core pointer functionality.
38 '''
39
40 __all__ = ('ProbeSysfsHardwareProbe', )
41
42 import os
43 from os.path import sep
44
45 if 'KIVY_DOC' in os.environ:
46
47 ProbeSysfsHardwareProbe = None
48
49 else:
50 from re import match, IGNORECASE
51 from glob import glob
52 from subprocess import Popen, PIPE
53 from kivy.logger import Logger
54 from kivy.input.provider import MotionEventProvider
55 from kivy.input.providers.mouse import MouseMotionEventProvider
56 from kivy.input.factory import MotionEventFactory
57 from kivy.config import _is_rpi
58
59 EventLoop = None
60
61 # See linux/input.h
62 ABS_MT_POSITION_X = 0x35
63
64 _cache_input = None
65 _cache_xinput = None
66
67 class Input(object):
68
69 def __init__(self, path):
70 query_xinput()
71 self.path = path
72
73 @property
74 def device(self):
75 base = os.path.basename(self.path)
76 return os.path.join("/dev", "input", base)
77
78 @property
79 def name(self):
80 path = os.path.join(self.path, "device", "name")
81 return read_line(path)
82
83 def get_capabilities(self):
84 path = os.path.join(self.path, "device", "capabilities", "abs")
85 line = "0"
86 try:
87 line = read_line(path)
88 except OSError:
89 return []
90
91 capabilities = []
92 long_bit = getconf("LONG_BIT")
93 for i, word in enumerate(line.split(" ")):
94 word = int(word, 16)
95 subcapabilities = [bool(word & 1 << i)
96 for i in range(long_bit)]
97 capabilities[:0] = subcapabilities
98
99 return capabilities
100
101 def has_capability(self, capability):
102 capabilities = self.get_capabilities()
103 return len(capabilities) > capability and capabilities[capability]
104
105 @property
106 def is_mouse(self):
107 return self.device in _cache_xinput
108
109 def getout(*args):
110 try:
111 return Popen(args, stdout=PIPE).communicate()[0]
112 except OSError:
113 return ''
114
115 def getconf(var):
116 output = getout("getconf", var)
117 return int(output)
118
119 def query_xinput():
120 global _cache_xinput
121 if _cache_xinput is None:
122 _cache_xinput = []
123 devids = getout('xinput', '--list', '--id-only')
124 for did in devids.splitlines():
125 devprops = getout('xinput', '--list-props', did)
126 evpath = None
127 for prop in devprops.splitlines():
128 prop = prop.strip()
129 if (prop.startswith(b'Device Enabled') and
130 prop.endswith(b'0')):
131 evpath = None
132 break
133 if prop.startswith(b'Device Node'):
134 try:
135 evpath = prop.split('"')[1]
136 except Exception:
137 evpath = None
138 if evpath:
139 _cache_xinput.append(evpath)
140
141 def get_inputs(path):
142 global _cache_input
143 if _cache_input is None:
144 event_glob = os.path.join(path, "event*")
145 _cache_input = [Input(x) for x in glob(event_glob)]
146 return _cache_input
147
148 def read_line(path):
149 f = open(path)
150 try:
151 return f.readline().strip()
152 finally:
153 f.close()
154
155 class ProbeSysfsHardwareProbe(MotionEventProvider):
156
157 def __new__(self, device, args):
158 # hack to not return an instance of this provider.
159 # :)
160 instance = super(ProbeSysfsHardwareProbe, self).__new__(self)
161 instance.__init__(device, args)
162
163 def __init__(self, device, args):
164 super(ProbeSysfsHardwareProbe, self).__init__(device, args)
165 self.provider = 'mtdev'
166 self.match = None
167 self.input_path = '/sys/class/input'
168 self.select_all = True if _is_rpi else False
169 self.use_mouse = False
170 self.use_regex = False
171 self.args = []
172
173 args = args.split(',')
174 for arg in args:
175 if arg == '':
176 continue
177 arg = arg.split('=', 1)
178 # ensure it's a key = value
179 if len(arg) != 2:
180 Logger.error('ProbeSysfs: invalid parameters %s, not'
181 ' key=value format' % arg)
182 continue
183
184 key, value = arg
185 if key == 'match':
186 self.match = value
187 elif key == 'provider':
188 self.provider = value
189 elif key == 'use_regex':
190 self.use_regex = bool(int(value))
191 elif key == 'select_all':
192 self.select_all = bool(int(value))
193 elif key == 'use_mouse':
194 self.use_mouse = bool(int(value))
195 elif key == 'param':
196 self.args.append(value)
197 else:
198 Logger.error('ProbeSysfs: unknown %s option' % key)
199 continue
200
201 self.probe()
202
203 def should_use_mouse(self):
204 return (self.use_mouse or
205 not any(p for p in EventLoop.input_providers
206 if isinstance(p, MouseMotionEventProvider)))
207
208 def probe(self):
209 global EventLoop
210 from kivy.base import EventLoop
211
212 inputs = get_inputs(self.input_path)
213 Logger.debug('ProbeSysfs: using probesysfs!')
214
215 use_mouse = self.should_use_mouse()
216
217 if not self.select_all:
218 inputs = [x for x in inputs if
219 x.has_capability(ABS_MT_POSITION_X) and
220 (use_mouse or not x.is_mouse)]
221 for device in inputs:
222 Logger.debug('ProbeSysfs: found device: %s at %s' % (
223 device.name, device.device))
224
225 # must ignore ?
226 if self.match:
227 if self.use_regex:
228 if not match(self.match, device.name, IGNORECASE):
229 Logger.debug('ProbeSysfs: device not match the'
230 ' rule in config, ignoring.')
231 continue
232 else:
233 if self.match not in device.name:
234 continue
235
236 Logger.info('ProbeSysfs: device match: %s' % device.device)
237
238 d = device.device
239 devicename = self.device % dict(name=d.split(sep)[-1])
240
241 provider = MotionEventFactory.get(self.provider)
242 if provider is None:
243 Logger.info('ProbeSysfs: unable to found provider %s' %
244 self.provider)
245 Logger.info('ProbeSysfs: fallback on hidinput')
246 provider = MotionEventFactory.get('hidinput')
247 if provider is None:
248 Logger.critical('ProbeSysfs: no input provider found'
249 ' to handle this device !')
250 continue
251
252 instance = provider(devicename, '%s,%s' % (
253 device.device, ','.join(self.args)))
254 if instance:
255 EventLoop.add_input_provider(instance)
256
257 MotionEventFactory.register('probesysfs', ProbeSysfsHardwareProbe)
258
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kivy/input/providers/probesysfs.py b/kivy/input/providers/probesysfs.py
--- a/kivy/input/providers/probesysfs.py
+++ b/kivy/input/providers/probesysfs.py
@@ -47,6 +47,7 @@
ProbeSysfsHardwareProbe = None
else:
+ import ctypes
from re import match, IGNORECASE
from glob import glob
from subprocess import Popen, PIPE
@@ -89,7 +90,7 @@
return []
capabilities = []
- long_bit = getconf("LONG_BIT")
+ long_bit = ctypes.sizeof(ctypes.c_long) * 8
for i, word in enumerate(line.split(" ")):
word = int(word, 16)
subcapabilities = [bool(word & 1 << i)
@@ -112,10 +113,6 @@
except OSError:
return ''
- def getconf(var):
- output = getout("getconf", var)
- return int(output)
-
def query_xinput():
global _cache_xinput
if _cache_xinput is None:
| {"golden_diff": "diff --git a/kivy/input/providers/probesysfs.py b/kivy/input/providers/probesysfs.py\n--- a/kivy/input/providers/probesysfs.py\n+++ b/kivy/input/providers/probesysfs.py\n@@ -47,6 +47,7 @@\n ProbeSysfsHardwareProbe = None\n \n else:\n+ import ctypes\n from re import match, IGNORECASE\n from glob import glob\n from subprocess import Popen, PIPE\n@@ -89,7 +90,7 @@\n return []\n \n capabilities = []\n- long_bit = getconf(\"LONG_BIT\")\n+ long_bit = ctypes.sizeof(ctypes.c_long) * 8\n for i, word in enumerate(line.split(\" \")):\n word = int(word, 16)\n subcapabilities = [bool(word & 1 << i)\n@@ -112,10 +113,6 @@\n except OSError:\n return ''\n \n- def getconf(var):\n- output = getout(\"getconf\", var)\n- return int(output)\n-\n def query_xinput():\n global _cache_xinput\n if _cache_xinput is None:\n", "issue": "Probesysfs provider requires getconf\n<!--\r\nThe issue tracker is a tool to address bugs.\r\nPlease use the #kivy IRC channel on freenode or Stack Overflow for\r\nsupport questions, more information at https://git.io/vM1yQ.\r\n\r\nBefore opening a new issue, make sure you do the following:\r\n * check that your issue isn't already filed: https://git.io/vM1iE\r\n * prepare a short, runnable example that reproduces the issue\r\n * reproduce the problem with the latest development version of Kivy\r\n * double-check that the issue is indeed a bug and not a support request\r\n-->\r\n\r\n### Versions\r\n\r\n* Python: 3.6.4\r\n* OS: Linux\r\n* Kivy: 1.10.0\r\n* Kivy installation method: setuptools\r\n\r\n### Description\r\n\r\nKivy's probesysfs provider requires getconf, provided by glibc, to get the platform's LONG_BIT value.\r\n\r\nThis dependency precludes the use of other C libraries, such as musl, as well as platforms that choose not to install getconf.\r\n\n", "before_files": [{"content": "'''\nAuto Create Input Provider Config Entry for Available MT Hardware (linux only).\n===============================================================================\n\nThanks to Marc Tardif for the probing code, taken from scan-for-mt-device.\n\nThe device discovery is done by this provider. However, the reading of\ninput can be performed by other providers like: hidinput, mtdev and\nlinuxwacom. mtdev is used prior to other providers. For more\ninformation about mtdev, check :py:class:`~kivy.input.providers.mtdev`.\n\nHere is an example of auto creation::\n\n [input]\n # using mtdev\n device_%(name)s = probesysfs,provider=mtdev\n # using hidinput\n device_%(name)s = probesysfs,provider=hidinput\n # using mtdev with a match on name\n device_%(name)s = probesysfs,provider=mtdev,match=acer\n\n # using hidinput with custom parameters to hidinput (all on one line)\n %(name)s = probesysfs,\n provider=hidinput,param=min_pressure=1,param=max_pressure=99\n\n # you can also match your wacom touchscreen\n touch = probesysfs,match=E3 Finger,provider=linuxwacom,\n select_all=1,param=mode=touch\n # and your wacom pen\n pen = probesysfs,match=E3 Pen,provider=linuxwacom,\n select_all=1,param=mode=pen\n\nBy default, ProbeSysfs module will enumerate hardware from the /sys/class/input\ndevice, and configure hardware with ABS_MT_POSITION_X capability. But for\nexample, the wacom screen doesn't support this capability. You can prevent this\nbehavior by putting select_all=1 in your config line. Add use_mouse=1 to also\ninclude touchscreen hardware that offers core pointer functionality.\n'''\n\n__all__ = ('ProbeSysfsHardwareProbe', )\n\nimport os\nfrom os.path import sep\n\nif 'KIVY_DOC' in os.environ:\n\n ProbeSysfsHardwareProbe = None\n\nelse:\n from re import match, IGNORECASE\n from glob import glob\n from subprocess import Popen, PIPE\n from kivy.logger import Logger\n from kivy.input.provider import MotionEventProvider\n from kivy.input.providers.mouse import MouseMotionEventProvider\n from kivy.input.factory import MotionEventFactory\n from kivy.config import _is_rpi\n\n EventLoop = None\n\n # See linux/input.h\n ABS_MT_POSITION_X = 0x35\n\n _cache_input = None\n _cache_xinput = None\n\n class Input(object):\n\n def __init__(self, path):\n query_xinput()\n self.path = path\n\n @property\n def device(self):\n base = os.path.basename(self.path)\n return os.path.join(\"/dev\", \"input\", base)\n\n @property\n def name(self):\n path = os.path.join(self.path, \"device\", \"name\")\n return read_line(path)\n\n def get_capabilities(self):\n path = os.path.join(self.path, \"device\", \"capabilities\", \"abs\")\n line = \"0\"\n try:\n line = read_line(path)\n except OSError:\n return []\n\n capabilities = []\n long_bit = getconf(\"LONG_BIT\")\n for i, word in enumerate(line.split(\" \")):\n word = int(word, 16)\n subcapabilities = [bool(word & 1 << i)\n for i in range(long_bit)]\n capabilities[:0] = subcapabilities\n\n return capabilities\n\n def has_capability(self, capability):\n capabilities = self.get_capabilities()\n return len(capabilities) > capability and capabilities[capability]\n\n @property\n def is_mouse(self):\n return self.device in _cache_xinput\n\n def getout(*args):\n try:\n return Popen(args, stdout=PIPE).communicate()[0]\n except OSError:\n return ''\n\n def getconf(var):\n output = getout(\"getconf\", var)\n return int(output)\n\n def query_xinput():\n global _cache_xinput\n if _cache_xinput is None:\n _cache_xinput = []\n devids = getout('xinput', '--list', '--id-only')\n for did in devids.splitlines():\n devprops = getout('xinput', '--list-props', did)\n evpath = None\n for prop in devprops.splitlines():\n prop = prop.strip()\n if (prop.startswith(b'Device Enabled') and\n prop.endswith(b'0')):\n evpath = None\n break\n if prop.startswith(b'Device Node'):\n try:\n evpath = prop.split('\"')[1]\n except Exception:\n evpath = None\n if evpath:\n _cache_xinput.append(evpath)\n\n def get_inputs(path):\n global _cache_input\n if _cache_input is None:\n event_glob = os.path.join(path, \"event*\")\n _cache_input = [Input(x) for x in glob(event_glob)]\n return _cache_input\n\n def read_line(path):\n f = open(path)\n try:\n return f.readline().strip()\n finally:\n f.close()\n\n class ProbeSysfsHardwareProbe(MotionEventProvider):\n\n def __new__(self, device, args):\n # hack to not return an instance of this provider.\n # :)\n instance = super(ProbeSysfsHardwareProbe, self).__new__(self)\n instance.__init__(device, args)\n\n def __init__(self, device, args):\n super(ProbeSysfsHardwareProbe, self).__init__(device, args)\n self.provider = 'mtdev'\n self.match = None\n self.input_path = '/sys/class/input'\n self.select_all = True if _is_rpi else False\n self.use_mouse = False\n self.use_regex = False\n self.args = []\n\n args = args.split(',')\n for arg in args:\n if arg == '':\n continue\n arg = arg.split('=', 1)\n # ensure it's a key = value\n if len(arg) != 2:\n Logger.error('ProbeSysfs: invalid parameters %s, not'\n ' key=value format' % arg)\n continue\n\n key, value = arg\n if key == 'match':\n self.match = value\n elif key == 'provider':\n self.provider = value\n elif key == 'use_regex':\n self.use_regex = bool(int(value))\n elif key == 'select_all':\n self.select_all = bool(int(value))\n elif key == 'use_mouse':\n self.use_mouse = bool(int(value))\n elif key == 'param':\n self.args.append(value)\n else:\n Logger.error('ProbeSysfs: unknown %s option' % key)\n continue\n\n self.probe()\n\n def should_use_mouse(self):\n return (self.use_mouse or\n not any(p for p in EventLoop.input_providers\n if isinstance(p, MouseMotionEventProvider)))\n\n def probe(self):\n global EventLoop\n from kivy.base import EventLoop\n\n inputs = get_inputs(self.input_path)\n Logger.debug('ProbeSysfs: using probesysfs!')\n\n use_mouse = self.should_use_mouse()\n\n if not self.select_all:\n inputs = [x for x in inputs if\n x.has_capability(ABS_MT_POSITION_X) and\n (use_mouse or not x.is_mouse)]\n for device in inputs:\n Logger.debug('ProbeSysfs: found device: %s at %s' % (\n device.name, device.device))\n\n # must ignore ?\n if self.match:\n if self.use_regex:\n if not match(self.match, device.name, IGNORECASE):\n Logger.debug('ProbeSysfs: device not match the'\n ' rule in config, ignoring.')\n continue\n else:\n if self.match not in device.name:\n continue\n\n Logger.info('ProbeSysfs: device match: %s' % device.device)\n\n d = device.device\n devicename = self.device % dict(name=d.split(sep)[-1])\n\n provider = MotionEventFactory.get(self.provider)\n if provider is None:\n Logger.info('ProbeSysfs: unable to found provider %s' %\n self.provider)\n Logger.info('ProbeSysfs: fallback on hidinput')\n provider = MotionEventFactory.get('hidinput')\n if provider is None:\n Logger.critical('ProbeSysfs: no input provider found'\n ' to handle this device !')\n continue\n\n instance = provider(devicename, '%s,%s' % (\n device.device, ','.join(self.args)))\n if instance:\n EventLoop.add_input_provider(instance)\n\n MotionEventFactory.register('probesysfs', ProbeSysfsHardwareProbe)\n", "path": "kivy/input/providers/probesysfs.py"}], "after_files": [{"content": "'''\nAuto Create Input Provider Config Entry for Available MT Hardware (linux only).\n===============================================================================\n\nThanks to Marc Tardif for the probing code, taken from scan-for-mt-device.\n\nThe device discovery is done by this provider. However, the reading of\ninput can be performed by other providers like: hidinput, mtdev and\nlinuxwacom. mtdev is used prior to other providers. For more\ninformation about mtdev, check :py:class:`~kivy.input.providers.mtdev`.\n\nHere is an example of auto creation::\n\n [input]\n # using mtdev\n device_%(name)s = probesysfs,provider=mtdev\n # using hidinput\n device_%(name)s = probesysfs,provider=hidinput\n # using mtdev with a match on name\n device_%(name)s = probesysfs,provider=mtdev,match=acer\n\n # using hidinput with custom parameters to hidinput (all on one line)\n %(name)s = probesysfs,\n provider=hidinput,param=min_pressure=1,param=max_pressure=99\n\n # you can also match your wacom touchscreen\n touch = probesysfs,match=E3 Finger,provider=linuxwacom,\n select_all=1,param=mode=touch\n # and your wacom pen\n pen = probesysfs,match=E3 Pen,provider=linuxwacom,\n select_all=1,param=mode=pen\n\nBy default, ProbeSysfs module will enumerate hardware from the /sys/class/input\ndevice, and configure hardware with ABS_MT_POSITION_X capability. But for\nexample, the wacom screen doesn't support this capability. You can prevent this\nbehavior by putting select_all=1 in your config line. Add use_mouse=1 to also\ninclude touchscreen hardware that offers core pointer functionality.\n'''\n\n__all__ = ('ProbeSysfsHardwareProbe', )\n\nimport os\nfrom os.path import sep\n\nif 'KIVY_DOC' in os.environ:\n\n ProbeSysfsHardwareProbe = None\n\nelse:\n import ctypes\n from re import match, IGNORECASE\n from glob import glob\n from subprocess import Popen, PIPE\n from kivy.logger import Logger\n from kivy.input.provider import MotionEventProvider\n from kivy.input.providers.mouse import MouseMotionEventProvider\n from kivy.input.factory import MotionEventFactory\n from kivy.config import _is_rpi\n\n EventLoop = None\n\n # See linux/input.h\n ABS_MT_POSITION_X = 0x35\n\n _cache_input = None\n _cache_xinput = None\n\n class Input(object):\n\n def __init__(self, path):\n query_xinput()\n self.path = path\n\n @property\n def device(self):\n base = os.path.basename(self.path)\n return os.path.join(\"/dev\", \"input\", base)\n\n @property\n def name(self):\n path = os.path.join(self.path, \"device\", \"name\")\n return read_line(path)\n\n def get_capabilities(self):\n path = os.path.join(self.path, \"device\", \"capabilities\", \"abs\")\n line = \"0\"\n try:\n line = read_line(path)\n except OSError:\n return []\n\n capabilities = []\n long_bit = ctypes.sizeof(ctypes.c_long) * 8\n for i, word in enumerate(line.split(\" \")):\n word = int(word, 16)\n subcapabilities = [bool(word & 1 << i)\n for i in range(long_bit)]\n capabilities[:0] = subcapabilities\n\n return capabilities\n\n def has_capability(self, capability):\n capabilities = self.get_capabilities()\n return len(capabilities) > capability and capabilities[capability]\n\n @property\n def is_mouse(self):\n return self.device in _cache_xinput\n\n def getout(*args):\n try:\n return Popen(args, stdout=PIPE).communicate()[0]\n except OSError:\n return ''\n\n def query_xinput():\n global _cache_xinput\n if _cache_xinput is None:\n _cache_xinput = []\n devids = getout('xinput', '--list', '--id-only')\n for did in devids.splitlines():\n devprops = getout('xinput', '--list-props', did)\n evpath = None\n for prop in devprops.splitlines():\n prop = prop.strip()\n if (prop.startswith(b'Device Enabled') and\n prop.endswith(b'0')):\n evpath = None\n break\n if prop.startswith(b'Device Node'):\n try:\n evpath = prop.split('\"')[1]\n except Exception:\n evpath = None\n if evpath:\n _cache_xinput.append(evpath)\n\n def get_inputs(path):\n global _cache_input\n if _cache_input is None:\n event_glob = os.path.join(path, \"event*\")\n _cache_input = [Input(x) for x in glob(event_glob)]\n return _cache_input\n\n def read_line(path):\n f = open(path)\n try:\n return f.readline().strip()\n finally:\n f.close()\n\n class ProbeSysfsHardwareProbe(MotionEventProvider):\n\n def __new__(self, device, args):\n # hack to not return an instance of this provider.\n # :)\n instance = super(ProbeSysfsHardwareProbe, self).__new__(self)\n instance.__init__(device, args)\n\n def __init__(self, device, args):\n super(ProbeSysfsHardwareProbe, self).__init__(device, args)\n self.provider = 'mtdev'\n self.match = None\n self.input_path = '/sys/class/input'\n self.select_all = True if _is_rpi else False\n self.use_mouse = False\n self.use_regex = False\n self.args = []\n\n args = args.split(',')\n for arg in args:\n if arg == '':\n continue\n arg = arg.split('=', 1)\n # ensure it's a key = value\n if len(arg) != 2:\n Logger.error('ProbeSysfs: invalid parameters %s, not'\n ' key=value format' % arg)\n continue\n\n key, value = arg\n if key == 'match':\n self.match = value\n elif key == 'provider':\n self.provider = value\n elif key == 'use_regex':\n self.use_regex = bool(int(value))\n elif key == 'select_all':\n self.select_all = bool(int(value))\n elif key == 'use_mouse':\n self.use_mouse = bool(int(value))\n elif key == 'param':\n self.args.append(value)\n else:\n Logger.error('ProbeSysfs: unknown %s option' % key)\n continue\n\n self.probe()\n\n def should_use_mouse(self):\n return (self.use_mouse or\n not any(p for p in EventLoop.input_providers\n if isinstance(p, MouseMotionEventProvider)))\n\n def probe(self):\n global EventLoop\n from kivy.base import EventLoop\n\n inputs = get_inputs(self.input_path)\n Logger.debug('ProbeSysfs: using probesysfs!')\n\n use_mouse = self.should_use_mouse()\n\n if not self.select_all:\n inputs = [x for x in inputs if\n x.has_capability(ABS_MT_POSITION_X) and\n (use_mouse or not x.is_mouse)]\n for device in inputs:\n Logger.debug('ProbeSysfs: found device: %s at %s' % (\n device.name, device.device))\n\n # must ignore ?\n if self.match:\n if self.use_regex:\n if not match(self.match, device.name, IGNORECASE):\n Logger.debug('ProbeSysfs: device not match the'\n ' rule in config, ignoring.')\n continue\n else:\n if self.match not in device.name:\n continue\n\n Logger.info('ProbeSysfs: device match: %s' % device.device)\n\n d = device.device\n devicename = self.device % dict(name=d.split(sep)[-1])\n\n provider = MotionEventFactory.get(self.provider)\n if provider is None:\n Logger.info('ProbeSysfs: unable to found provider %s' %\n self.provider)\n Logger.info('ProbeSysfs: fallback on hidinput')\n provider = MotionEventFactory.get('hidinput')\n if provider is None:\n Logger.critical('ProbeSysfs: no input provider found'\n ' to handle this device !')\n continue\n\n instance = provider(devicename, '%s,%s' % (\n device.device, ','.join(self.args)))\n if instance:\n EventLoop.add_input_provider(instance)\n\n MotionEventFactory.register('probesysfs', ProbeSysfsHardwareProbe)\n", "path": "kivy/input/providers/probesysfs.py"}]} | 3,068 | 254 |
gh_patches_debug_42034 | rasdani/github-patches | git_diff | fossasia__open-event-server-4975 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Access code should only be linked to hidden tickets
**Describe the bug**
<!-- A clear and concise description of what the bug is. -->
Right now we are allowing access code to be linked to any ticket.
**To Reproduce**
Steps to reproduce the behavior:
1. Create an access code linking it to a public ticket
**Expected behavior**
<!-- A clear and concise description of what you expected to happen. -->
We should only allow creating access codes for hidden tickets.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `app/api/schema/access_codes.py`
Content:
```
1 from marshmallow import validates_schema
2 from marshmallow_jsonapi import fields
3 from marshmallow_jsonapi.flask import Relationship
4
5 from app.api.helpers.exceptions import UnprocessableEntity
6 from app.api.helpers.utilities import dasherize
7 from app.api.schema.base import SoftDeletionSchema
8 from app.models.access_code import AccessCode
9 from utils.common import use_defaults
10
11
12 @use_defaults()
13 class AccessCodeSchema(SoftDeletionSchema):
14 """
15 Api schema for Access Code Model
16 """
17
18 class Meta:
19 """
20 Meta class for Access Code Api Schema
21 """
22 type_ = 'access-code'
23 self_view = 'v1.access_code_detail'
24 self_view_kwargs = {'id': '<id>'}
25 inflect = dasherize
26
27 @validates_schema(pass_original=True)
28 def validate_date(self, data, original_data):
29 if 'id' in original_data['data']:
30 access_code = AccessCode.query.filter_by(id=original_data['data']['id']).one()
31
32 if 'valid_from' not in data:
33 data['valid_from'] = access_code.valid_from
34
35 if 'valid_till' not in data:
36 data['valid_till'] = access_code.valid_till
37
38 if data['valid_from'] > data['valid_till']:
39 raise UnprocessableEntity({'pointer': '/data/attributes/valid-till'},
40 "valid_till should be after valid_from")
41
42 @validates_schema(pass_original=True)
43 def validate_order_quantity(self, data, original_data):
44 if 'id' in original_data['data']:
45 access_code = AccessCode.query.filter_by(id=original_data['data']['id']).one()
46
47 if 'min_quantity' not in data:
48 data['min_quantity'] = access_code.min_quantity
49
50 if 'max_quantity' not in data:
51 data['max_quantity'] = access_code.max_quantity
52
53 if 'tickets_number' not in data:
54 data['tickets_number'] = access_code.tickets_number
55
56 min_quantity = data.get('min_quantity', None)
57 max_quantity = data.get('max_quantity', None)
58 if min_quantity is not None and max_quantity is not None:
59 if min_quantity > max_quantity:
60 raise UnprocessableEntity(
61 {'pointer': '/data/attributes/min-quantity'},
62 "min-quantity should be less than max-quantity"
63 )
64
65 if 'tickets_number' in data and 'max_quantity' in data:
66 if data['tickets_number'] < data['max_quantity']:
67 raise UnprocessableEntity({'pointer': '/data/attributes/tickets-number'},
68 "tickets-number should be greater than max-quantity")
69
70 id = fields.Integer(dump_ony=True)
71 code = fields.Str(required=True)
72 access_url = fields.Url(allow_none=True)
73 is_active = fields.Boolean(default=False)
74
75 # For event level access this holds the max. uses
76 tickets_number = fields.Integer(validate=lambda n: n >= 0, allow_none=True)
77
78 min_quantity = fields.Integer(validate=lambda n: n >= 0, allow_none=True)
79 max_quantity = fields.Integer(validate=lambda n: n >= 0, allow_none=True)
80 valid_from = fields.DateTime(required=True)
81 valid_till = fields.DateTime(required=True)
82 event = Relationship(attribute='event',
83 self_view='v1.access_code_event',
84 self_view_kwargs={'id': '<id>'},
85 related_view='v1.event_detail',
86 related_view_kwargs={'access_code_id': '<id>'},
87 schema='EventSchemaPublic',
88 type_='event')
89 marketer = Relationship(attribute='user',
90 self_view='v1.access_code_user',
91 self_view_kwargs={'id': '<id>'},
92 related_view='v1.user_detail',
93 related_view_kwargs={'access_code_id': '<id>'},
94 schema='UserSchemaPublic',
95 type_='user')
96 tickets = Relationship(attribute='tickets',
97 self_view='v1.access_code_tickets',
98 self_view_kwargs={'id': '<id>'},
99 related_view='v1.ticket_list',
100 related_view_kwargs={'access_code_id': '<id>'},
101 schema='TicketSchemaPublic',
102 many=True,
103 type_='ticket')
104
```
Path: `app/api/access_codes.py`
Content:
```
1 from flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship
2 from flask_rest_jsonapi.exceptions import ObjectNotFound
3
4 from app.api.bootstrap import api
5 from app.api.helpers.db import safe_query
6 from app.api.helpers.exceptions import ForbiddenException, UnprocessableEntity
7 from app.api.helpers.permission_manager import has_access
8 from app.api.helpers.permissions import jwt_required
9 from app.api.helpers.query import event_query
10 from app.api.helpers.utilities import require_relationship
11 from app.api.schema.access_codes import AccessCodeSchema
12 from app.models import db
13 from app.models.access_code import AccessCode
14 from app.models.ticket import Ticket
15 from app.models.user import User
16
17
18 class AccessCodeListPost(ResourceList):
19 """
20 Create AccessCodes
21 """
22 def before_post(self, args, kwargs, data):
23 """
24 before post method to check for required relationships and permissions
25 :param args:
26 :param kwargs:
27 :param data:
28 :return:
29 """
30 require_relationship(['event', 'user'], data)
31 if not has_access('is_coorganizer', event_id=data['event']):
32 raise ForbiddenException({'source': ''}, "Minimum Organizer access required")
33
34 schema = AccessCodeSchema
35 methods = ['POST', ]
36 data_layer = {'session': db.session,
37 'model': AccessCode
38 }
39
40
41 class AccessCodeList(ResourceList):
42 """
43 List AccessCodes
44 """
45 def query(self, view_kwargs):
46 """
47 Method to get access codes list based on different view_kwargs
48 :param view_kwargs:
49 :return:
50 """
51 query_ = self.session.query(AccessCode)
52 query_ = event_query(self, query_, view_kwargs, permission='is_coorganizer')
53 if view_kwargs.get('user_id'):
54 user = safe_query(self, User, 'id', view_kwargs['user_id'], 'user_id')
55 if not has_access('is_user_itself', user_id=user.id):
56 raise ForbiddenException({'source': ''}, 'Access Forbidden')
57 query_ = query_.join(User).filter(User.id == user.id)
58 if view_kwargs.get('ticket_id'):
59 ticket = safe_query(self, Ticket, 'id', view_kwargs['ticket_id'], 'ticket_id')
60 if not has_access('is_coorganizer', event_id=ticket.event_id):
61 raise ForbiddenException({'source': ''}, 'Access Forbidden')
62 # access_code - ticket :: many-to-many relationship
63 query_ = AccessCode.query.filter(AccessCode.tickets.any(id=ticket.id))
64 query_
65 return query_
66
67 view_kwargs = True
68 methods = ['GET', ]
69 schema = AccessCodeSchema
70 data_layer = {'session': db.session,
71 'model': AccessCode,
72 'methods': {
73 'query': query,
74 }}
75
76
77 class AccessCodeDetail(ResourceDetail):
78 """
79 AccessCode detail by id or code
80 """
81 def before_get(self, args, kwargs):
82 """
83 before get method of access code details.
84 Check for permissions on the basis of kwargs.
85 :param args:
86 :param kwargs:
87 :return:
88 """
89 # Any registered user can fetch access code details using the code.
90 if kwargs.get('code'):
91 access = db.session.query(AccessCode).filter_by(code=kwargs.get('code')).first()
92 if access:
93 kwargs['id'] = access.id
94 else:
95 raise ObjectNotFound({'parameter': '{code}'}, "Access Code: not found")
96 return
97
98 # Co-organizer or the admin can fetch access code details using the id.
99 if kwargs.get('id'):
100 access = db.session.query(AccessCode).filter_by(id=kwargs.get('id')).one()
101 if not access:
102 raise ObjectNotFound({'parameter': '{id}'}, "Access Code: not found")
103
104 if not has_access('is_coorganizer', event_id=access.event_id):
105 raise UnprocessableEntity({'source': ''},
106 "Please verify your permission")
107
108 decorators = (jwt_required, api.has_permission('is_coorganizer', fetch='event_id',
109 fetch_as="event_id", model=AccessCode, methods="PATCH"),
110 api.has_permission('is_coorganizer_but_not_admin', fetch='event_id',
111 fetch_as="event_id", model=AccessCode, methods="DELETE"),)
112 schema = AccessCodeSchema
113 data_layer = {'session': db.session,
114 'model': AccessCode
115 }
116
117
118 class AccessCodeRelationshipRequired(ResourceRelationship):
119 """
120 AccessCode Relationship Required
121 """
122 decorators = (jwt_required,)
123 methods = ['GET', 'PATCH']
124 schema = AccessCodeSchema
125 data_layer = {'session': db.session,
126 'model': AccessCode}
127
128
129 class AccessCodeRelationshipOptional(ResourceRelationship):
130 """
131 AccessCode Relationship Optional
132 """
133 decorators = (jwt_required,)
134 schema = AccessCodeSchema
135 data_layer = {'session': db.session,
136 'model': AccessCode}
137
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/app/api/access_codes.py b/app/api/access_codes.py
--- a/app/api/access_codes.py
+++ b/app/api/access_codes.py
@@ -1,9 +1,11 @@
from flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship
from flask_rest_jsonapi.exceptions import ObjectNotFound
+from sqlalchemy.orm.exc import NoResultFound
from app.api.bootstrap import api
from app.api.helpers.db import safe_query
-from app.api.helpers.exceptions import ForbiddenException, UnprocessableEntity
+from app.api.helpers.exceptions import ForbiddenException, ConflictException
+from app.api.helpers.exceptions import UnprocessableEntity
from app.api.helpers.permission_manager import has_access
from app.api.helpers.permissions import jwt_required
from app.api.helpers.query import event_query
@@ -31,11 +33,33 @@
if not has_access('is_coorganizer', event_id=data['event']):
raise ForbiddenException({'source': ''}, "Minimum Organizer access required")
+ def before_create_object(self, data, view_kwargs):
+ """
+ before create object method for AccessCodeListPost Class
+ :param data:
+ :param view_kwargs:
+ :return:
+ """
+ if data.get('tickets', None):
+ for ticket in data['tickets']:
+ # Ensuring that the ticket exists and is hidden.
+ try:
+ ticket_object = self.session.query(Ticket).filter_by(id=int(ticket),
+ deleted_at=None).one()
+ if not ticket_object.is_hidden:
+ raise ConflictException({'pointer': '/data/relationships/tickets'},
+ "Ticket with id {} is public.".format(ticket) +
+ " Access code cannot be applied to public tickets")
+ except NoResultFound:
+ raise ConflictException({'pointer': '/data/relationships/tickets'},
+ "Ticket with id {} does not exists".format(str(ticket)))
+
schema = AccessCodeSchema
methods = ['POST', ]
data_layer = {'session': db.session,
- 'model': AccessCode
- }
+ 'model': AccessCode,
+ 'methods': {'before_create_object': before_create_object
+ }}
class AccessCodeList(ResourceList):
diff --git a/app/api/schema/access_codes.py b/app/api/schema/access_codes.py
--- a/app/api/schema/access_codes.py
+++ b/app/api/schema/access_codes.py
@@ -55,17 +55,16 @@
min_quantity = data.get('min_quantity', None)
max_quantity = data.get('max_quantity', None)
- if min_quantity is not None and max_quantity is not None:
- if min_quantity > max_quantity:
- raise UnprocessableEntity(
+ tickets_number = data.get('tickets_number', None)
+ if min_quantity and max_quantity and (min_quantity > max_quantity):
+ raise UnprocessableEntity(
{'pointer': '/data/attributes/min-quantity'},
"min-quantity should be less than max-quantity"
- )
+ )
- if 'tickets_number' in data and 'max_quantity' in data:
- if data['tickets_number'] < data['max_quantity']:
- raise UnprocessableEntity({'pointer': '/data/attributes/tickets-number'},
- "tickets-number should be greater than max-quantity")
+ if tickets_number and max_quantity and (tickets_number < max_quantity):
+ raise UnprocessableEntity({'pointer': '/data/attributes/tickets-number'},
+ "tickets-number should be greater than max-quantity")
id = fields.Integer(dump_ony=True)
code = fields.Str(required=True)
| {"golden_diff": "diff --git a/app/api/access_codes.py b/app/api/access_codes.py\n--- a/app/api/access_codes.py\n+++ b/app/api/access_codes.py\n@@ -1,9 +1,11 @@\n from flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship\n from flask_rest_jsonapi.exceptions import ObjectNotFound\n+from sqlalchemy.orm.exc import NoResultFound\n \n from app.api.bootstrap import api\n from app.api.helpers.db import safe_query\n-from app.api.helpers.exceptions import ForbiddenException, UnprocessableEntity\n+from app.api.helpers.exceptions import ForbiddenException, ConflictException\n+from app.api.helpers.exceptions import UnprocessableEntity\n from app.api.helpers.permission_manager import has_access\n from app.api.helpers.permissions import jwt_required\n from app.api.helpers.query import event_query\n@@ -31,11 +33,33 @@\n if not has_access('is_coorganizer', event_id=data['event']):\n raise ForbiddenException({'source': ''}, \"Minimum Organizer access required\")\n \n+ def before_create_object(self, data, view_kwargs):\n+ \"\"\"\n+ before create object method for AccessCodeListPost Class\n+ :param data:\n+ :param view_kwargs:\n+ :return:\n+ \"\"\"\n+ if data.get('tickets', None):\n+ for ticket in data['tickets']:\n+ # Ensuring that the ticket exists and is hidden.\n+ try:\n+ ticket_object = self.session.query(Ticket).filter_by(id=int(ticket),\n+ deleted_at=None).one()\n+ if not ticket_object.is_hidden:\n+ raise ConflictException({'pointer': '/data/relationships/tickets'},\n+ \"Ticket with id {} is public.\".format(ticket) +\n+ \" Access code cannot be applied to public tickets\")\n+ except NoResultFound:\n+ raise ConflictException({'pointer': '/data/relationships/tickets'},\n+ \"Ticket with id {} does not exists\".format(str(ticket)))\n+\n schema = AccessCodeSchema\n methods = ['POST', ]\n data_layer = {'session': db.session,\n- 'model': AccessCode\n- }\n+ 'model': AccessCode,\n+ 'methods': {'before_create_object': before_create_object\n+ }}\n \n \n class AccessCodeList(ResourceList):\ndiff --git a/app/api/schema/access_codes.py b/app/api/schema/access_codes.py\n--- a/app/api/schema/access_codes.py\n+++ b/app/api/schema/access_codes.py\n@@ -55,17 +55,16 @@\n \n min_quantity = data.get('min_quantity', None)\n max_quantity = data.get('max_quantity', None)\n- if min_quantity is not None and max_quantity is not None:\n- if min_quantity > max_quantity:\n- raise UnprocessableEntity(\n+ tickets_number = data.get('tickets_number', None)\n+ if min_quantity and max_quantity and (min_quantity > max_quantity):\n+ raise UnprocessableEntity(\n {'pointer': '/data/attributes/min-quantity'},\n \"min-quantity should be less than max-quantity\"\n- )\n+ )\n \n- if 'tickets_number' in data and 'max_quantity' in data:\n- if data['tickets_number'] < data['max_quantity']:\n- raise UnprocessableEntity({'pointer': '/data/attributes/tickets-number'},\n- \"tickets-number should be greater than max-quantity\")\n+ if tickets_number and max_quantity and (tickets_number < max_quantity):\n+ raise UnprocessableEntity({'pointer': '/data/attributes/tickets-number'},\n+ \"tickets-number should be greater than max-quantity\")\n \n id = fields.Integer(dump_ony=True)\n code = fields.Str(required=True)\n", "issue": "Access code should only be linked to hidden tickets\n**Describe the bug**\r\n<!-- A clear and concise description of what the bug is. -->\r\nRight now we are allowing access code to be linked to any ticket.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Create an access code linking it to a public ticket\r\n\r\n**Expected behavior**\r\n<!-- A clear and concise description of what you expected to happen. -->\r\nWe should only allow creating access codes for hidden tickets.\n", "before_files": [{"content": "from marshmallow import validates_schema\nfrom marshmallow_jsonapi import fields\nfrom marshmallow_jsonapi.flask import Relationship\n\nfrom app.api.helpers.exceptions import UnprocessableEntity\nfrom app.api.helpers.utilities import dasherize\nfrom app.api.schema.base import SoftDeletionSchema\nfrom app.models.access_code import AccessCode\nfrom utils.common import use_defaults\n\n\n@use_defaults()\nclass AccessCodeSchema(SoftDeletionSchema):\n \"\"\"\n Api schema for Access Code Model\n \"\"\"\n\n class Meta:\n \"\"\"\n Meta class for Access Code Api Schema\n \"\"\"\n type_ = 'access-code'\n self_view = 'v1.access_code_detail'\n self_view_kwargs = {'id': '<id>'}\n inflect = dasherize\n\n @validates_schema(pass_original=True)\n def validate_date(self, data, original_data):\n if 'id' in original_data['data']:\n access_code = AccessCode.query.filter_by(id=original_data['data']['id']).one()\n\n if 'valid_from' not in data:\n data['valid_from'] = access_code.valid_from\n\n if 'valid_till' not in data:\n data['valid_till'] = access_code.valid_till\n\n if data['valid_from'] > data['valid_till']:\n raise UnprocessableEntity({'pointer': '/data/attributes/valid-till'},\n \"valid_till should be after valid_from\")\n\n @validates_schema(pass_original=True)\n def validate_order_quantity(self, data, original_data):\n if 'id' in original_data['data']:\n access_code = AccessCode.query.filter_by(id=original_data['data']['id']).one()\n\n if 'min_quantity' not in data:\n data['min_quantity'] = access_code.min_quantity\n\n if 'max_quantity' not in data:\n data['max_quantity'] = access_code.max_quantity\n\n if 'tickets_number' not in data:\n data['tickets_number'] = access_code.tickets_number\n\n min_quantity = data.get('min_quantity', None)\n max_quantity = data.get('max_quantity', None)\n if min_quantity is not None and max_quantity is not None:\n if min_quantity > max_quantity:\n raise UnprocessableEntity(\n {'pointer': '/data/attributes/min-quantity'},\n \"min-quantity should be less than max-quantity\"\n )\n\n if 'tickets_number' in data and 'max_quantity' in data:\n if data['tickets_number'] < data['max_quantity']:\n raise UnprocessableEntity({'pointer': '/data/attributes/tickets-number'},\n \"tickets-number should be greater than max-quantity\")\n\n id = fields.Integer(dump_ony=True)\n code = fields.Str(required=True)\n access_url = fields.Url(allow_none=True)\n is_active = fields.Boolean(default=False)\n\n # For event level access this holds the max. uses\n tickets_number = fields.Integer(validate=lambda n: n >= 0, allow_none=True)\n\n min_quantity = fields.Integer(validate=lambda n: n >= 0, allow_none=True)\n max_quantity = fields.Integer(validate=lambda n: n >= 0, allow_none=True)\n valid_from = fields.DateTime(required=True)\n valid_till = fields.DateTime(required=True)\n event = Relationship(attribute='event',\n self_view='v1.access_code_event',\n self_view_kwargs={'id': '<id>'},\n related_view='v1.event_detail',\n related_view_kwargs={'access_code_id': '<id>'},\n schema='EventSchemaPublic',\n type_='event')\n marketer = Relationship(attribute='user',\n self_view='v1.access_code_user',\n self_view_kwargs={'id': '<id>'},\n related_view='v1.user_detail',\n related_view_kwargs={'access_code_id': '<id>'},\n schema='UserSchemaPublic',\n type_='user')\n tickets = Relationship(attribute='tickets',\n self_view='v1.access_code_tickets',\n self_view_kwargs={'id': '<id>'},\n related_view='v1.ticket_list',\n related_view_kwargs={'access_code_id': '<id>'},\n schema='TicketSchemaPublic',\n many=True,\n type_='ticket')\n", "path": "app/api/schema/access_codes.py"}, {"content": "from flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship\nfrom flask_rest_jsonapi.exceptions import ObjectNotFound\n\nfrom app.api.bootstrap import api\nfrom app.api.helpers.db import safe_query\nfrom app.api.helpers.exceptions import ForbiddenException, UnprocessableEntity\nfrom app.api.helpers.permission_manager import has_access\nfrom app.api.helpers.permissions import jwt_required\nfrom app.api.helpers.query import event_query\nfrom app.api.helpers.utilities import require_relationship\nfrom app.api.schema.access_codes import AccessCodeSchema\nfrom app.models import db\nfrom app.models.access_code import AccessCode\nfrom app.models.ticket import Ticket\nfrom app.models.user import User\n\n\nclass AccessCodeListPost(ResourceList):\n \"\"\"\n Create AccessCodes\n \"\"\"\n def before_post(self, args, kwargs, data):\n \"\"\"\n before post method to check for required relationships and permissions\n :param args:\n :param kwargs:\n :param data:\n :return:\n \"\"\"\n require_relationship(['event', 'user'], data)\n if not has_access('is_coorganizer', event_id=data['event']):\n raise ForbiddenException({'source': ''}, \"Minimum Organizer access required\")\n\n schema = AccessCodeSchema\n methods = ['POST', ]\n data_layer = {'session': db.session,\n 'model': AccessCode\n }\n\n\nclass AccessCodeList(ResourceList):\n \"\"\"\n List AccessCodes\n \"\"\"\n def query(self, view_kwargs):\n \"\"\"\n Method to get access codes list based on different view_kwargs\n :param view_kwargs:\n :return:\n \"\"\"\n query_ = self.session.query(AccessCode)\n query_ = event_query(self, query_, view_kwargs, permission='is_coorganizer')\n if view_kwargs.get('user_id'):\n user = safe_query(self, User, 'id', view_kwargs['user_id'], 'user_id')\n if not has_access('is_user_itself', user_id=user.id):\n raise ForbiddenException({'source': ''}, 'Access Forbidden')\n query_ = query_.join(User).filter(User.id == user.id)\n if view_kwargs.get('ticket_id'):\n ticket = safe_query(self, Ticket, 'id', view_kwargs['ticket_id'], 'ticket_id')\n if not has_access('is_coorganizer', event_id=ticket.event_id):\n raise ForbiddenException({'source': ''}, 'Access Forbidden')\n # access_code - ticket :: many-to-many relationship\n query_ = AccessCode.query.filter(AccessCode.tickets.any(id=ticket.id))\n query_\n return query_\n\n view_kwargs = True\n methods = ['GET', ]\n schema = AccessCodeSchema\n data_layer = {'session': db.session,\n 'model': AccessCode,\n 'methods': {\n 'query': query,\n }}\n\n\nclass AccessCodeDetail(ResourceDetail):\n \"\"\"\n AccessCode detail by id or code\n \"\"\"\n def before_get(self, args, kwargs):\n \"\"\"\n before get method of access code details.\n Check for permissions on the basis of kwargs.\n :param args:\n :param kwargs:\n :return:\n \"\"\"\n # Any registered user can fetch access code details using the code.\n if kwargs.get('code'):\n access = db.session.query(AccessCode).filter_by(code=kwargs.get('code')).first()\n if access:\n kwargs['id'] = access.id\n else:\n raise ObjectNotFound({'parameter': '{code}'}, \"Access Code: not found\")\n return\n\n # Co-organizer or the admin can fetch access code details using the id.\n if kwargs.get('id'):\n access = db.session.query(AccessCode).filter_by(id=kwargs.get('id')).one()\n if not access:\n raise ObjectNotFound({'parameter': '{id}'}, \"Access Code: not found\")\n\n if not has_access('is_coorganizer', event_id=access.event_id):\n raise UnprocessableEntity({'source': ''},\n \"Please verify your permission\")\n\n decorators = (jwt_required, api.has_permission('is_coorganizer', fetch='event_id',\n fetch_as=\"event_id\", model=AccessCode, methods=\"PATCH\"),\n api.has_permission('is_coorganizer_but_not_admin', fetch='event_id',\n fetch_as=\"event_id\", model=AccessCode, methods=\"DELETE\"),)\n schema = AccessCodeSchema\n data_layer = {'session': db.session,\n 'model': AccessCode\n }\n\n\nclass AccessCodeRelationshipRequired(ResourceRelationship):\n \"\"\"\n AccessCode Relationship Required\n \"\"\"\n decorators = (jwt_required,)\n methods = ['GET', 'PATCH']\n schema = AccessCodeSchema\n data_layer = {'session': db.session,\n 'model': AccessCode}\n\n\nclass AccessCodeRelationshipOptional(ResourceRelationship):\n \"\"\"\n AccessCode Relationship Optional\n \"\"\"\n decorators = (jwt_required,)\n schema = AccessCodeSchema\n data_layer = {'session': db.session,\n 'model': AccessCode}\n", "path": "app/api/access_codes.py"}], "after_files": [{"content": "from marshmallow import validates_schema\nfrom marshmallow_jsonapi import fields\nfrom marshmallow_jsonapi.flask import Relationship\n\nfrom app.api.helpers.exceptions import UnprocessableEntity\nfrom app.api.helpers.utilities import dasherize\nfrom app.api.schema.base import SoftDeletionSchema\nfrom app.models.access_code import AccessCode\nfrom utils.common import use_defaults\n\n\n@use_defaults()\nclass AccessCodeSchema(SoftDeletionSchema):\n \"\"\"\n Api schema for Access Code Model\n \"\"\"\n\n class Meta:\n \"\"\"\n Meta class for Access Code Api Schema\n \"\"\"\n type_ = 'access-code'\n self_view = 'v1.access_code_detail'\n self_view_kwargs = {'id': '<id>'}\n inflect = dasherize\n\n @validates_schema(pass_original=True)\n def validate_date(self, data, original_data):\n if 'id' in original_data['data']:\n access_code = AccessCode.query.filter_by(id=original_data['data']['id']).one()\n\n if 'valid_from' not in data:\n data['valid_from'] = access_code.valid_from\n\n if 'valid_till' not in data:\n data['valid_till'] = access_code.valid_till\n\n if data['valid_from'] > data['valid_till']:\n raise UnprocessableEntity({'pointer': '/data/attributes/valid-till'},\n \"valid_till should be after valid_from\")\n\n @validates_schema(pass_original=True)\n def validate_order_quantity(self, data, original_data):\n if 'id' in original_data['data']:\n access_code = AccessCode.query.filter_by(id=original_data['data']['id']).one()\n\n if 'min_quantity' not in data:\n data['min_quantity'] = access_code.min_quantity\n\n if 'max_quantity' not in data:\n data['max_quantity'] = access_code.max_quantity\n\n if 'tickets_number' not in data:\n data['tickets_number'] = access_code.tickets_number\n\n min_quantity = data.get('min_quantity', None)\n max_quantity = data.get('max_quantity', None)\n tickets_number = data.get('tickets_number', None)\n if min_quantity and max_quantity and (min_quantity > max_quantity):\n raise UnprocessableEntity(\n {'pointer': '/data/attributes/min-quantity'},\n \"min-quantity should be less than max-quantity\"\n )\n\n if tickets_number and max_quantity and (tickets_number < max_quantity):\n raise UnprocessableEntity({'pointer': '/data/attributes/tickets-number'},\n \"tickets-number should be greater than max-quantity\")\n\n id = fields.Integer(dump_ony=True)\n code = fields.Str(required=True)\n access_url = fields.Url(allow_none=True)\n is_active = fields.Boolean(default=False)\n\n # For event level access this holds the max. uses\n tickets_number = fields.Integer(validate=lambda n: n >= 0, allow_none=True)\n\n min_quantity = fields.Integer(validate=lambda n: n >= 0, allow_none=True)\n max_quantity = fields.Integer(validate=lambda n: n >= 0, allow_none=True)\n valid_from = fields.DateTime(required=True)\n valid_till = fields.DateTime(required=True)\n event = Relationship(attribute='event',\n self_view='v1.access_code_event',\n self_view_kwargs={'id': '<id>'},\n related_view='v1.event_detail',\n related_view_kwargs={'access_code_id': '<id>'},\n schema='EventSchemaPublic',\n type_='event')\n marketer = Relationship(attribute='user',\n self_view='v1.access_code_user',\n self_view_kwargs={'id': '<id>'},\n related_view='v1.user_detail',\n related_view_kwargs={'access_code_id': '<id>'},\n schema='UserSchemaPublic',\n type_='user')\n tickets = Relationship(attribute='tickets',\n self_view='v1.access_code_tickets',\n self_view_kwargs={'id': '<id>'},\n related_view='v1.ticket_list',\n related_view_kwargs={'access_code_id': '<id>'},\n schema='TicketSchemaPublic',\n many=True,\n type_='ticket')\n", "path": "app/api/schema/access_codes.py"}, {"content": "from flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship\nfrom flask_rest_jsonapi.exceptions import ObjectNotFound\nfrom sqlalchemy.orm.exc import NoResultFound\n\nfrom app.api.bootstrap import api\nfrom app.api.helpers.db import safe_query\nfrom app.api.helpers.exceptions import ForbiddenException, ConflictException\nfrom app.api.helpers.exceptions import UnprocessableEntity\nfrom app.api.helpers.permission_manager import has_access\nfrom app.api.helpers.permissions import jwt_required\nfrom app.api.helpers.query import event_query\nfrom app.api.helpers.utilities import require_relationship\nfrom app.api.schema.access_codes import AccessCodeSchema\nfrom app.models import db\nfrom app.models.access_code import AccessCode\nfrom app.models.ticket import Ticket\nfrom app.models.user import User\n\n\nclass AccessCodeListPost(ResourceList):\n \"\"\"\n Create AccessCodes\n \"\"\"\n def before_post(self, args, kwargs, data):\n \"\"\"\n before post method to check for required relationships and permissions\n :param args:\n :param kwargs:\n :param data:\n :return:\n \"\"\"\n require_relationship(['event', 'user'], data)\n if not has_access('is_coorganizer', event_id=data['event']):\n raise ForbiddenException({'source': ''}, \"Minimum Organizer access required\")\n\n def before_create_object(self, data, view_kwargs):\n \"\"\"\n before create object method for AccessCodeListPost Class\n :param data:\n :param view_kwargs:\n :return:\n \"\"\"\n if data.get('tickets', None):\n for ticket in data['tickets']:\n # Ensuring that the ticket exists and is hidden.\n try:\n ticket_object = self.session.query(Ticket).filter_by(id=int(ticket),\n deleted_at=None).one()\n if not ticket_object.is_hidden:\n raise ConflictException({'pointer': '/data/relationships/tickets'},\n \"Ticket with id {} is public.\".format(ticket) +\n \" Access code cannot be applied to public tickets\")\n except NoResultFound:\n raise ConflictException({'pointer': '/data/relationships/tickets'},\n \"Ticket with id {} does not exists\".format(str(ticket)))\n\n schema = AccessCodeSchema\n methods = ['POST', ]\n data_layer = {'session': db.session,\n 'model': AccessCode,\n 'methods': {'before_create_object': before_create_object\n }}\n\n\nclass AccessCodeList(ResourceList):\n \"\"\"\n List AccessCodes\n \"\"\"\n def query(self, view_kwargs):\n \"\"\"\n Method to get access codes list based on different view_kwargs\n :param view_kwargs:\n :return:\n \"\"\"\n query_ = self.session.query(AccessCode)\n query_ = event_query(self, query_, view_kwargs, permission='is_coorganizer')\n if view_kwargs.get('user_id'):\n user = safe_query(self, User, 'id', view_kwargs['user_id'], 'user_id')\n if not has_access('is_user_itself', user_id=user.id):\n raise ForbiddenException({'source': ''}, 'Access Forbidden')\n query_ = query_.join(User).filter(User.id == user.id)\n if view_kwargs.get('ticket_id'):\n ticket = safe_query(self, Ticket, 'id', view_kwargs['ticket_id'], 'ticket_id')\n if not has_access('is_coorganizer', event_id=ticket.event_id):\n raise ForbiddenException({'source': ''}, 'Access Forbidden')\n # access_code - ticket :: many-to-many relationship\n query_ = AccessCode.query.filter(AccessCode.tickets.any(id=ticket.id))\n query_\n return query_\n\n view_kwargs = True\n methods = ['GET', ]\n schema = AccessCodeSchema\n data_layer = {'session': db.session,\n 'model': AccessCode,\n 'methods': {\n 'query': query,\n }}\n\n\nclass AccessCodeDetail(ResourceDetail):\n \"\"\"\n AccessCode detail by id or code\n \"\"\"\n def before_get(self, args, kwargs):\n \"\"\"\n before get method of access code details.\n Check for permissions on the basis of kwargs.\n :param args:\n :param kwargs:\n :return:\n \"\"\"\n # Any registered user can fetch access code details using the code.\n if kwargs.get('code'):\n access = db.session.query(AccessCode).filter_by(code=kwargs.get('code')).first()\n if access:\n kwargs['id'] = access.id\n else:\n raise ObjectNotFound({'parameter': '{code}'}, \"Access Code: not found\")\n return\n\n # Co-organizer or the admin can fetch access code details using the id.\n if kwargs.get('id'):\n access = db.session.query(AccessCode).filter_by(id=kwargs.get('id')).one()\n if not access:\n raise ObjectNotFound({'parameter': '{id}'}, \"Access Code: not found\")\n\n if not has_access('is_coorganizer', event_id=access.event_id):\n raise UnprocessableEntity({'source': ''},\n \"Please verify your permission\")\n\n decorators = (jwt_required, api.has_permission('is_coorganizer', fetch='event_id',\n fetch_as=\"event_id\", model=AccessCode, methods=\"PATCH\"),\n api.has_permission('is_coorganizer_but_not_admin', fetch='event_id',\n fetch_as=\"event_id\", model=AccessCode, methods=\"DELETE\"),)\n schema = AccessCodeSchema\n data_layer = {'session': db.session,\n 'model': AccessCode\n }\n\n\nclass AccessCodeRelationshipRequired(ResourceRelationship):\n \"\"\"\n AccessCode Relationship Required\n \"\"\"\n decorators = (jwt_required,)\n methods = ['GET', 'PATCH']\n schema = AccessCodeSchema\n data_layer = {'session': db.session,\n 'model': AccessCode}\n\n\nclass AccessCodeRelationshipOptional(ResourceRelationship):\n \"\"\"\n AccessCode Relationship Optional\n \"\"\"\n decorators = (jwt_required,)\n schema = AccessCodeSchema\n data_layer = {'session': db.session,\n 'model': AccessCode}\n", "path": "app/api/access_codes.py"}]} | 2,817 | 774 |
gh_patches_debug_38422 | rasdani/github-patches | git_diff | encode__starlette-105 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Credentialed CORS standard requests should not respond with wildcard origins
See https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS#Credentialed_requests_and_wildcards
If a standard request is made, that includes any cookie headers, then CORSMiddleware *ought* to strictly respond with the requested origin, rather than a wildcard.
This is actually potentially a bit fiddly since we maybe also need to make sure to *set or add* Vary: Origin in those cases, in order to ensure correct cacheability.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `starlette/middleware/cors.py`
Content:
```
1 from starlette.datastructures import Headers, MutableHeaders, URL
2 from starlette.responses import PlainTextResponse
3 from starlette.types import ASGIApp, ASGIInstance, Scope
4 import functools
5 import typing
6 import re
7
8
9 ALL_METHODS = ("DELETE", "GET", "OPTIONS", "PATCH", "POST", "PUT")
10
11
12 class CORSMiddleware:
13 def __init__(
14 self,
15 app: ASGIApp,
16 allow_origins: typing.Sequence[str] = (),
17 allow_methods: typing.Sequence[str] = ("GET",),
18 allow_headers: typing.Sequence[str] = (),
19 allow_credentials: bool = False,
20 allow_origin_regex: str = None,
21 expose_headers: typing.Sequence[str] = (),
22 max_age: int = 600,
23 ) -> None:
24
25 if "*" in allow_methods:
26 allow_methods = ALL_METHODS
27
28 compiled_allow_origin_regex = None
29 if allow_origin_regex is not None:
30 compiled_allow_origin_regex = re.compile(allow_origin_regex)
31
32 simple_headers = {}
33 if "*" in allow_origins:
34 simple_headers["Access-Control-Allow-Origin"] = "*"
35 if allow_credentials:
36 simple_headers["Access-Control-Allow-Credentials"] = "true"
37 if expose_headers:
38 simple_headers["Access-Control-Expose-Headers"] = ", ".join(expose_headers)
39
40 preflight_headers = {}
41 if "*" in allow_origins:
42 preflight_headers["Access-Control-Allow-Origin"] = "*"
43 else:
44 preflight_headers["Vary"] = "Origin"
45 preflight_headers.update(
46 {
47 "Access-Control-Allow-Methods": ", ".join(allow_methods),
48 "Access-Control-Max-Age": str(max_age),
49 }
50 )
51 if allow_headers and "*" not in allow_headers:
52 preflight_headers["Access-Control-Allow-Headers"] = ", ".join(allow_headers)
53 if allow_credentials:
54 preflight_headers["Access-Control-Allow-Credentials"] = "true"
55
56 self.app = app
57 self.allow_origins = allow_origins
58 self.allow_methods = allow_methods
59 self.allow_headers = allow_headers
60 self.allow_all_origins = "*" in allow_origins
61 self.allow_all_headers = "*" in allow_headers
62 self.allow_origin_regex = compiled_allow_origin_regex
63 self.simple_headers = simple_headers
64 self.preflight_headers = preflight_headers
65
66 def __call__(self, scope: Scope):
67 if scope["type"] == "http":
68 method = scope["method"]
69 headers = Headers(scope["headers"])
70 origin = headers.get("origin")
71
72 if origin is not None:
73 if method == "OPTIONS" and "access-control-request-method" in headers:
74 return self.preflight_response(request_headers=headers)
75 else:
76 return functools.partial(
77 self.simple_response, scope=scope, origin=origin
78 )
79
80 return self.app(scope)
81
82 def is_allowed_origin(self, origin):
83 if self.allow_all_origins:
84 return True
85
86 if self.allow_origin_regex is not None and self.allow_origin_regex.match(
87 origin
88 ):
89 return True
90
91 return origin in self.allow_origins
92
93 def preflight_response(self, request_headers):
94 requested_origin = request_headers["origin"]
95 requested_method = request_headers["access-control-request-method"]
96 requested_headers = request_headers.get("access-control-request-headers")
97 requested_cookie = "cookie" in request_headers
98
99 headers = dict(self.preflight_headers)
100 failures = []
101
102 if self.is_allowed_origin(origin=requested_origin):
103 if not self.allow_all_origins:
104 # If self.allow_all_origins is True, then the "Access-Control-Allow-Origin"
105 # header is already set to "*".
106 # If we only allow specific origins, then we have to mirror back
107 # the Origin header in the response.
108 headers["Access-Control-Allow-Origin"] = requested_origin
109 else:
110 failures.append("origin")
111
112 if requested_method not in self.allow_methods:
113 failures.append("method")
114
115 # If we allow all headers, then we have to mirror back any requested
116 # headers in the response.
117 if self.allow_all_headers and requested_headers is not None:
118 headers["Access-Control-Allow-Headers"] = requested_headers
119 elif requested_headers is not None:
120 for header in requested_headers.split(","):
121 if header.strip() not in self.allow_headers:
122 failures.append("headers")
123
124 # We don't strictly need to use 400 responses here, since its up to
125 # the browser to enforce the CORS policy, but its more informative
126 # if we do.
127 if failures:
128 failure_text = "Disallowed CORS " + ", ".join(failures)
129 return PlainTextResponse(failure_text, status_code=400, headers=headers)
130
131 return PlainTextResponse("OK", status_code=200, headers=headers)
132
133 async def simple_response(self, receive, send, scope=None, origin=None):
134 inner = self.app(scope)
135 send = functools.partial(self.send, send=send, origin=origin)
136 await inner(receive, send)
137
138 async def send(self, message, send=None, origin=None):
139 if message["type"] != "http.response.start":
140 await send(message)
141 return
142
143 message.setdefault("headers", [])
144 headers = MutableHeaders(message["headers"])
145
146 # If we only allow specific origins, then we have to mirror back
147 # the Origin header in the response.
148 if not self.allow_all_origins and self.is_allowed_origin(origin=origin):
149 headers["Access-Control-Allow-Origin"] = origin
150 headers.update(self.simple_headers)
151 await send(message)
152
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/starlette/middleware/cors.py b/starlette/middleware/cors.py
--- a/starlette/middleware/cors.py
+++ b/starlette/middleware/cors.py
@@ -32,6 +32,8 @@
simple_headers = {}
if "*" in allow_origins:
simple_headers["Access-Control-Allow-Origin"] = "*"
+ else:
+ simple_headers["Vary"] = "Origin"
if allow_credentials:
simple_headers["Access-Control-Allow-Credentials"] = "true"
if expose_headers:
@@ -74,7 +76,7 @@
return self.preflight_response(request_headers=headers)
else:
return functools.partial(
- self.simple_response, scope=scope, origin=origin
+ self.simple_response, scope=scope, request_headers=headers
)
return self.app(scope)
@@ -130,22 +132,31 @@
return PlainTextResponse("OK", status_code=200, headers=headers)
- async def simple_response(self, receive, send, scope=None, origin=None):
+ async def simple_response(self, receive, send, scope=None, request_headers=None):
inner = self.app(scope)
- send = functools.partial(self.send, send=send, origin=origin)
+ send = functools.partial(self.send, send=send, request_headers=request_headers)
await inner(receive, send)
- async def send(self, message, send=None, origin=None):
+ async def send(self, message, send=None, request_headers=None):
if message["type"] != "http.response.start":
await send(message)
return
message.setdefault("headers", [])
headers = MutableHeaders(message["headers"])
+ origin = request_headers["Origin"]
+ has_cookie = "cookie" in request_headers
+
+ # If request includes any cookie headers, then we must respond
+ # with the specific origin instead of '*'.
+ if self.allow_all_origins and has_cookie:
+ self.simple_headers["Access-Control-Allow-Origin"] = origin
# If we only allow specific origins, then we have to mirror back
# the Origin header in the response.
- if not self.allow_all_origins and self.is_allowed_origin(origin=origin):
+ elif not self.allow_all_origins and self.is_allowed_origin(origin=origin):
headers["Access-Control-Allow-Origin"] = origin
+ if "vary" in headers:
+ self.simple_headers["Vary"] = f"{headers.get('vary')}, Origin"
headers.update(self.simple_headers)
await send(message)
| {"golden_diff": "diff --git a/starlette/middleware/cors.py b/starlette/middleware/cors.py\n--- a/starlette/middleware/cors.py\n+++ b/starlette/middleware/cors.py\n@@ -32,6 +32,8 @@\n simple_headers = {}\n if \"*\" in allow_origins:\n simple_headers[\"Access-Control-Allow-Origin\"] = \"*\"\n+ else:\n+ simple_headers[\"Vary\"] = \"Origin\"\n if allow_credentials:\n simple_headers[\"Access-Control-Allow-Credentials\"] = \"true\"\n if expose_headers:\n@@ -74,7 +76,7 @@\n return self.preflight_response(request_headers=headers)\n else:\n return functools.partial(\n- self.simple_response, scope=scope, origin=origin\n+ self.simple_response, scope=scope, request_headers=headers\n )\n \n return self.app(scope)\n@@ -130,22 +132,31 @@\n \n return PlainTextResponse(\"OK\", status_code=200, headers=headers)\n \n- async def simple_response(self, receive, send, scope=None, origin=None):\n+ async def simple_response(self, receive, send, scope=None, request_headers=None):\n inner = self.app(scope)\n- send = functools.partial(self.send, send=send, origin=origin)\n+ send = functools.partial(self.send, send=send, request_headers=request_headers)\n await inner(receive, send)\n \n- async def send(self, message, send=None, origin=None):\n+ async def send(self, message, send=None, request_headers=None):\n if message[\"type\"] != \"http.response.start\":\n await send(message)\n return\n \n message.setdefault(\"headers\", [])\n headers = MutableHeaders(message[\"headers\"])\n+ origin = request_headers[\"Origin\"]\n+ has_cookie = \"cookie\" in request_headers\n+\n+ # If request includes any cookie headers, then we must respond\n+ # with the specific origin instead of '*'.\n+ if self.allow_all_origins and has_cookie:\n+ self.simple_headers[\"Access-Control-Allow-Origin\"] = origin\n \n # If we only allow specific origins, then we have to mirror back\n # the Origin header in the response.\n- if not self.allow_all_origins and self.is_allowed_origin(origin=origin):\n+ elif not self.allow_all_origins and self.is_allowed_origin(origin=origin):\n headers[\"Access-Control-Allow-Origin\"] = origin\n+ if \"vary\" in headers:\n+ self.simple_headers[\"Vary\"] = f\"{headers.get('vary')}, Origin\"\n headers.update(self.simple_headers)\n await send(message)\n", "issue": "Credentialed CORS standard requests should not respond with wildcard origins\nSee https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS#Credentialed_requests_and_wildcards \r\n\r\nIf a standard request is made, that includes any cookie headers, then CORSMiddleware *ought* to strictly respond with the requested origin, rather than a wildcard.\r\n\r\nThis is actually potentially a bit fiddly since we maybe also need to make sure to *set or add* Vary: Origin in those cases, in order to ensure correct cacheability.\n", "before_files": [{"content": "from starlette.datastructures import Headers, MutableHeaders, URL\nfrom starlette.responses import PlainTextResponse\nfrom starlette.types import ASGIApp, ASGIInstance, Scope\nimport functools\nimport typing\nimport re\n\n\nALL_METHODS = (\"DELETE\", \"GET\", \"OPTIONS\", \"PATCH\", \"POST\", \"PUT\")\n\n\nclass CORSMiddleware:\n def __init__(\n self,\n app: ASGIApp,\n allow_origins: typing.Sequence[str] = (),\n allow_methods: typing.Sequence[str] = (\"GET\",),\n allow_headers: typing.Sequence[str] = (),\n allow_credentials: bool = False,\n allow_origin_regex: str = None,\n expose_headers: typing.Sequence[str] = (),\n max_age: int = 600,\n ) -> None:\n\n if \"*\" in allow_methods:\n allow_methods = ALL_METHODS\n\n compiled_allow_origin_regex = None\n if allow_origin_regex is not None:\n compiled_allow_origin_regex = re.compile(allow_origin_regex)\n\n simple_headers = {}\n if \"*\" in allow_origins:\n simple_headers[\"Access-Control-Allow-Origin\"] = \"*\"\n if allow_credentials:\n simple_headers[\"Access-Control-Allow-Credentials\"] = \"true\"\n if expose_headers:\n simple_headers[\"Access-Control-Expose-Headers\"] = \", \".join(expose_headers)\n\n preflight_headers = {}\n if \"*\" in allow_origins:\n preflight_headers[\"Access-Control-Allow-Origin\"] = \"*\"\n else:\n preflight_headers[\"Vary\"] = \"Origin\"\n preflight_headers.update(\n {\n \"Access-Control-Allow-Methods\": \", \".join(allow_methods),\n \"Access-Control-Max-Age\": str(max_age),\n }\n )\n if allow_headers and \"*\" not in allow_headers:\n preflight_headers[\"Access-Control-Allow-Headers\"] = \", \".join(allow_headers)\n if allow_credentials:\n preflight_headers[\"Access-Control-Allow-Credentials\"] = \"true\"\n\n self.app = app\n self.allow_origins = allow_origins\n self.allow_methods = allow_methods\n self.allow_headers = allow_headers\n self.allow_all_origins = \"*\" in allow_origins\n self.allow_all_headers = \"*\" in allow_headers\n self.allow_origin_regex = compiled_allow_origin_regex\n self.simple_headers = simple_headers\n self.preflight_headers = preflight_headers\n\n def __call__(self, scope: Scope):\n if scope[\"type\"] == \"http\":\n method = scope[\"method\"]\n headers = Headers(scope[\"headers\"])\n origin = headers.get(\"origin\")\n\n if origin is not None:\n if method == \"OPTIONS\" and \"access-control-request-method\" in headers:\n return self.preflight_response(request_headers=headers)\n else:\n return functools.partial(\n self.simple_response, scope=scope, origin=origin\n )\n\n return self.app(scope)\n\n def is_allowed_origin(self, origin):\n if self.allow_all_origins:\n return True\n\n if self.allow_origin_regex is not None and self.allow_origin_regex.match(\n origin\n ):\n return True\n\n return origin in self.allow_origins\n\n def preflight_response(self, request_headers):\n requested_origin = request_headers[\"origin\"]\n requested_method = request_headers[\"access-control-request-method\"]\n requested_headers = request_headers.get(\"access-control-request-headers\")\n requested_cookie = \"cookie\" in request_headers\n\n headers = dict(self.preflight_headers)\n failures = []\n\n if self.is_allowed_origin(origin=requested_origin):\n if not self.allow_all_origins:\n # If self.allow_all_origins is True, then the \"Access-Control-Allow-Origin\"\n # header is already set to \"*\".\n # If we only allow specific origins, then we have to mirror back\n # the Origin header in the response.\n headers[\"Access-Control-Allow-Origin\"] = requested_origin\n else:\n failures.append(\"origin\")\n\n if requested_method not in self.allow_methods:\n failures.append(\"method\")\n\n # If we allow all headers, then we have to mirror back any requested\n # headers in the response.\n if self.allow_all_headers and requested_headers is not None:\n headers[\"Access-Control-Allow-Headers\"] = requested_headers\n elif requested_headers is not None:\n for header in requested_headers.split(\",\"):\n if header.strip() not in self.allow_headers:\n failures.append(\"headers\")\n\n # We don't strictly need to use 400 responses here, since its up to\n # the browser to enforce the CORS policy, but its more informative\n # if we do.\n if failures:\n failure_text = \"Disallowed CORS \" + \", \".join(failures)\n return PlainTextResponse(failure_text, status_code=400, headers=headers)\n\n return PlainTextResponse(\"OK\", status_code=200, headers=headers)\n\n async def simple_response(self, receive, send, scope=None, origin=None):\n inner = self.app(scope)\n send = functools.partial(self.send, send=send, origin=origin)\n await inner(receive, send)\n\n async def send(self, message, send=None, origin=None):\n if message[\"type\"] != \"http.response.start\":\n await send(message)\n return\n\n message.setdefault(\"headers\", [])\n headers = MutableHeaders(message[\"headers\"])\n\n # If we only allow specific origins, then we have to mirror back\n # the Origin header in the response.\n if not self.allow_all_origins and self.is_allowed_origin(origin=origin):\n headers[\"Access-Control-Allow-Origin\"] = origin\n headers.update(self.simple_headers)\n await send(message)\n", "path": "starlette/middleware/cors.py"}], "after_files": [{"content": "from starlette.datastructures import Headers, MutableHeaders, URL\nfrom starlette.responses import PlainTextResponse\nfrom starlette.types import ASGIApp, ASGIInstance, Scope\nimport functools\nimport typing\nimport re\n\n\nALL_METHODS = (\"DELETE\", \"GET\", \"OPTIONS\", \"PATCH\", \"POST\", \"PUT\")\n\n\nclass CORSMiddleware:\n def __init__(\n self,\n app: ASGIApp,\n allow_origins: typing.Sequence[str] = (),\n allow_methods: typing.Sequence[str] = (\"GET\",),\n allow_headers: typing.Sequence[str] = (),\n allow_credentials: bool = False,\n allow_origin_regex: str = None,\n expose_headers: typing.Sequence[str] = (),\n max_age: int = 600,\n ) -> None:\n\n if \"*\" in allow_methods:\n allow_methods = ALL_METHODS\n\n compiled_allow_origin_regex = None\n if allow_origin_regex is not None:\n compiled_allow_origin_regex = re.compile(allow_origin_regex)\n\n simple_headers = {}\n if \"*\" in allow_origins:\n simple_headers[\"Access-Control-Allow-Origin\"] = \"*\"\n else:\n simple_headers[\"Vary\"] = \"Origin\"\n if allow_credentials:\n simple_headers[\"Access-Control-Allow-Credentials\"] = \"true\"\n if expose_headers:\n simple_headers[\"Access-Control-Expose-Headers\"] = \", \".join(expose_headers)\n\n preflight_headers = {}\n if \"*\" in allow_origins:\n preflight_headers[\"Access-Control-Allow-Origin\"] = \"*\"\n else:\n preflight_headers[\"Vary\"] = \"Origin\"\n preflight_headers.update(\n {\n \"Access-Control-Allow-Methods\": \", \".join(allow_methods),\n \"Access-Control-Max-Age\": str(max_age),\n }\n )\n if allow_headers and \"*\" not in allow_headers:\n preflight_headers[\"Access-Control-Allow-Headers\"] = \", \".join(allow_headers)\n if allow_credentials:\n preflight_headers[\"Access-Control-Allow-Credentials\"] = \"true\"\n\n self.app = app\n self.allow_origins = allow_origins\n self.allow_methods = allow_methods\n self.allow_headers = allow_headers\n self.allow_all_origins = \"*\" in allow_origins\n self.allow_all_headers = \"*\" in allow_headers\n self.allow_origin_regex = compiled_allow_origin_regex\n self.simple_headers = simple_headers\n self.preflight_headers = preflight_headers\n\n def __call__(self, scope: Scope):\n if scope[\"type\"] == \"http\":\n method = scope[\"method\"]\n headers = Headers(scope[\"headers\"])\n origin = headers.get(\"origin\")\n\n if origin is not None:\n if method == \"OPTIONS\" and \"access-control-request-method\" in headers:\n return self.preflight_response(request_headers=headers)\n else:\n return functools.partial(\n self.simple_response, scope=scope, request_headers=headers\n )\n\n return self.app(scope)\n\n def is_allowed_origin(self, origin):\n if self.allow_all_origins:\n return True\n\n if self.allow_origin_regex is not None and self.allow_origin_regex.match(\n origin\n ):\n return True\n\n return origin in self.allow_origins\n\n def preflight_response(self, request_headers):\n requested_origin = request_headers[\"origin\"]\n requested_method = request_headers[\"access-control-request-method\"]\n requested_headers = request_headers.get(\"access-control-request-headers\")\n requested_cookie = \"cookie\" in request_headers\n\n headers = dict(self.preflight_headers)\n failures = []\n\n if self.is_allowed_origin(origin=requested_origin):\n if not self.allow_all_origins:\n # If self.allow_all_origins is True, then the \"Access-Control-Allow-Origin\"\n # header is already set to \"*\".\n # If we only allow specific origins, then we have to mirror back\n # the Origin header in the response.\n headers[\"Access-Control-Allow-Origin\"] = requested_origin\n else:\n failures.append(\"origin\")\n\n if requested_method not in self.allow_methods:\n failures.append(\"method\")\n\n # If we allow all headers, then we have to mirror back any requested\n # headers in the response.\n if self.allow_all_headers and requested_headers is not None:\n headers[\"Access-Control-Allow-Headers\"] = requested_headers\n elif requested_headers is not None:\n for header in requested_headers.split(\",\"):\n if header.strip() not in self.allow_headers:\n failures.append(\"headers\")\n\n # We don't strictly need to use 400 responses here, since its up to\n # the browser to enforce the CORS policy, but its more informative\n # if we do.\n if failures:\n failure_text = \"Disallowed CORS \" + \", \".join(failures)\n return PlainTextResponse(failure_text, status_code=400, headers=headers)\n\n return PlainTextResponse(\"OK\", status_code=200, headers=headers)\n\n async def simple_response(self, receive, send, scope=None, request_headers=None):\n inner = self.app(scope)\n send = functools.partial(self.send, send=send, request_headers=request_headers)\n await inner(receive, send)\n\n async def send(self, message, send=None, request_headers=None):\n if message[\"type\"] != \"http.response.start\":\n await send(message)\n return\n\n message.setdefault(\"headers\", [])\n headers = MutableHeaders(message[\"headers\"])\n origin = request_headers[\"Origin\"]\n has_cookie = \"cookie\" in request_headers\n\n # If request includes any cookie headers, then we must respond\n # with the specific origin instead of '*'.\n if self.allow_all_origins and has_cookie:\n self.simple_headers[\"Access-Control-Allow-Origin\"] = origin\n\n # If we only allow specific origins, then we have to mirror back\n # the Origin header in the response.\n elif not self.allow_all_origins and self.is_allowed_origin(origin=origin):\n headers[\"Access-Control-Allow-Origin\"] = origin\n if \"vary\" in headers:\n self.simple_headers[\"Vary\"] = f\"{headers.get('vary')}, Origin\"\n headers.update(self.simple_headers)\n await send(message)\n", "path": "starlette/middleware/cors.py"}]} | 1,927 | 562 |
gh_patches_debug_16535 | rasdani/github-patches | git_diff | bridgecrewio__checkov-4243 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
CKV_AWS_118 Fails With MonitoringInterval Integer Value
**Describe the issue**
CKV_AWS_118 fails if the `MonitoringInterval` value is not wrapped in double quotes despite the fact that the source code says it should allow ints and strings.
**Examples**
```
RDSinstance:
Type: AWS::RDS::DBInstance
Properties:
DBClusterIdentifier: !Ref DBCluster
DBInstanceClass: !Ref DbType
DBInstanceIdentifier: !Sub ${AppName}-${EnvironmentName}
DBParameterGroupName: !Ref DbParameterGroup
DBSubnetGroupName: !Ref DBSubnetGroup
Engine: aurora-mysql
MonitoringInterval: 60
MonitoringRoleArn: !GetAtt RdsMonitoringRole.Arn
PubliclyAccessible: 'false'
```
**Version (please complete the following information):**
- Checkov Version 2.2.255 (CLI)
**Additional context**
The test failure happens with the CLI and also using a GItHub Action `bridgecrewio/checkov-action@master`

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `checkov/cloudformation/checks/resource/base_resource_value_check.py`
Content:
```
1 import re
2 from abc import abstractmethod
3 from collections.abc import Iterable
4 from typing import List, Any, Dict
5
6 from checkov.cloudformation.checks.resource.base_resource_check import BaseResourceCheck
7 from checkov.cloudformation.context_parser import ContextParser
8 from checkov.common.parsers.node import StrNode, DictNode
9 from checkov.common.models.consts import ANY_VALUE
10 from checkov.common.models.enums import CheckResult, CheckCategories
11 from checkov.common.util.type_forcers import force_list
12 from checkov.common.util.var_utils import is_cloudformation_variable_dependent
13
14 VARIABLE_DEPENDANT_REGEX = re.compile(r"(?:Ref)\.[^\s]+")
15
16
17 class BaseResourceValueCheck(BaseResourceCheck):
18 def __init__(
19 self,
20 name: str,
21 id: str,
22 categories: "Iterable[CheckCategories]",
23 supported_resources: "Iterable[str]",
24 missing_block_result: CheckResult = CheckResult.FAILED,
25 ) -> None:
26 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
27 self.missing_block_result = missing_block_result
28
29 @staticmethod
30 def _filter_key_path(path: str) -> List[str]:
31 """
32 Filter an attribute path to contain only named attributes by dropping array indices from the path)
33 :param path: valid JSONPath of an attribute
34 :return: List of named attributes with respect to the input JSONPath order
35 """
36 regex = re.compile(r"^\[?\d+\]?$")
37 return [x for x in path.split("/") if not re.search(regex, x)]
38
39 @staticmethod
40 def _is_variable_dependant(value: Any) -> bool:
41 return is_cloudformation_variable_dependent(value)
42
43 @staticmethod
44 def _is_nesting_key(inspected_attributes: List[str], key: str) -> bool:
45 """
46 Resolves whether a key is a subset of the inspected nesting attributes
47 :param inspected_attributes: list of nesting attributes
48 :param key: JSONPath key of an attribute
49 :return: True/False
50 """
51 return any(x in key for x in inspected_attributes)
52
53 def scan_resource_conf(self, conf: Dict[StrNode, DictNode]) -> CheckResult:
54 inspected_key = self.get_inspected_key()
55 expected_values = self.get_expected_values()
56 path_elements = inspected_key.split("/")
57 matches = ContextParser.search_deep_keys(path_elements[-1], conf, [])
58 if len(matches) > 0:
59 for match in matches:
60 # CFN files are parsed differently from terraform, which causes the path search above to behave differently.
61 # The tesult is path parts with integer indexes, instead of strings like '[0]'. This logic replaces
62 # those, allowing inspected_keys in checks to use the same syntax.
63 for i in range(0, len(match)):
64 if type(match[i]) == int:
65 match[i] = f"[{match[i]}]"
66
67 if match[:-1] == path_elements:
68 # Inspected key exists
69 value = match[-1]
70 if ANY_VALUE in expected_values and value is not None and (not isinstance(value, str) or value):
71 # Key is found on the configuration - if it accepts any value, the check is PASSED
72 return CheckResult.PASSED
73 if isinstance(value, list) and len(value) == 1:
74 value = value[0]
75 if self._is_variable_dependant(value):
76 # If the tested attribute is variable-dependant, then result is PASSED
77 return CheckResult.PASSED
78 if value in expected_values:
79 return CheckResult.PASSED
80
81 # handle boolean case sensitivity (e.g., CFN accepts the string "true" as a boolean)
82 if isinstance(value, str) and value.lower() in ('true', 'false'):
83 value = value.lower() == 'true'
84 if value in expected_values:
85 return CheckResult.PASSED
86 return CheckResult.FAILED
87
88 return self.missing_block_result
89
90 @abstractmethod
91 def get_inspected_key(self) -> str:
92 """
93 :return: JSONPath syntax path of the checked attribute
94 """
95 raise NotImplementedError()
96
97 def get_expected_values(self) -> List[Any]:
98 """
99 Override the method with the list of acceptable values if the check has more than one possible expected value, given
100 the inspected key
101 :return: List of expected values, defaults to a list of the expected value
102 """
103 return [self.get_expected_value()]
104
105 def get_expected_value(self) -> Any:
106 """
107 Returns the default expected value, governed by provider best practices
108 """
109 return True
110
111 def get_evaluated_keys(self) -> List[str]:
112 return force_list(self.get_inspected_key())
113
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/checkov/cloudformation/checks/resource/base_resource_value_check.py b/checkov/cloudformation/checks/resource/base_resource_value_check.py
--- a/checkov/cloudformation/checks/resource/base_resource_value_check.py
+++ b/checkov/cloudformation/checks/resource/base_resource_value_check.py
@@ -60,7 +60,8 @@
# CFN files are parsed differently from terraform, which causes the path search above to behave differently.
# The tesult is path parts with integer indexes, instead of strings like '[0]'. This logic replaces
# those, allowing inspected_keys in checks to use the same syntax.
- for i in range(0, len(match)):
+ # The last value shouldn't be changed, because it could be indeed a valid number
+ for i in range(0, len(match) - 1):
if type(match[i]) == int:
match[i] = f"[{match[i]}]"
| {"golden_diff": "diff --git a/checkov/cloudformation/checks/resource/base_resource_value_check.py b/checkov/cloudformation/checks/resource/base_resource_value_check.py\n--- a/checkov/cloudformation/checks/resource/base_resource_value_check.py\n+++ b/checkov/cloudformation/checks/resource/base_resource_value_check.py\n@@ -60,7 +60,8 @@\n # CFN files are parsed differently from terraform, which causes the path search above to behave differently.\n # The tesult is path parts with integer indexes, instead of strings like '[0]'. This logic replaces\n # those, allowing inspected_keys in checks to use the same syntax.\n- for i in range(0, len(match)):\n+ # The last value shouldn't be changed, because it could be indeed a valid number\n+ for i in range(0, len(match) - 1):\n if type(match[i]) == int:\n match[i] = f\"[{match[i]}]\"\n", "issue": "CKV_AWS_118 Fails With MonitoringInterval Integer Value\n**Describe the issue**\r\nCKV_AWS_118 fails if the `MonitoringInterval` value is not wrapped in double quotes despite the fact that the source code says it should allow ints and strings.\r\n\r\n**Examples**\r\n```\r\nRDSinstance:\r\n Type: AWS::RDS::DBInstance\r\n Properties:\r\n DBClusterIdentifier: !Ref DBCluster\r\n DBInstanceClass: !Ref DbType\r\n DBInstanceIdentifier: !Sub ${AppName}-${EnvironmentName}\r\n DBParameterGroupName: !Ref DbParameterGroup\r\n DBSubnetGroupName: !Ref DBSubnetGroup\r\n Engine: aurora-mysql\r\n MonitoringInterval: 60\r\n MonitoringRoleArn: !GetAtt RdsMonitoringRole.Arn\r\n PubliclyAccessible: 'false'\r\n```\r\n\r\n**Version (please complete the following information):**\r\n - Checkov Version 2.2.255 (CLI)\r\n\r\n**Additional context**\r\nThe test failure happens with the CLI and also using a GItHub Action `bridgecrewio/checkov-action@master`\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "import re\nfrom abc import abstractmethod\nfrom collections.abc import Iterable\nfrom typing import List, Any, Dict\n\nfrom checkov.cloudformation.checks.resource.base_resource_check import BaseResourceCheck\nfrom checkov.cloudformation.context_parser import ContextParser\nfrom checkov.common.parsers.node import StrNode, DictNode\nfrom checkov.common.models.consts import ANY_VALUE\nfrom checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.common.util.type_forcers import force_list\nfrom checkov.common.util.var_utils import is_cloudformation_variable_dependent\n\nVARIABLE_DEPENDANT_REGEX = re.compile(r\"(?:Ref)\\.[^\\s]+\")\n\n\nclass BaseResourceValueCheck(BaseResourceCheck):\n def __init__(\n self,\n name: str,\n id: str,\n categories: \"Iterable[CheckCategories]\",\n supported_resources: \"Iterable[str]\",\n missing_block_result: CheckResult = CheckResult.FAILED,\n ) -> None:\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n self.missing_block_result = missing_block_result\n\n @staticmethod\n def _filter_key_path(path: str) -> List[str]:\n \"\"\"\n Filter an attribute path to contain only named attributes by dropping array indices from the path)\n :param path: valid JSONPath of an attribute\n :return: List of named attributes with respect to the input JSONPath order\n \"\"\"\n regex = re.compile(r\"^\\[?\\d+\\]?$\")\n return [x for x in path.split(\"/\") if not re.search(regex, x)]\n\n @staticmethod\n def _is_variable_dependant(value: Any) -> bool:\n return is_cloudformation_variable_dependent(value)\n\n @staticmethod\n def _is_nesting_key(inspected_attributes: List[str], key: str) -> bool:\n \"\"\"\n Resolves whether a key is a subset of the inspected nesting attributes\n :param inspected_attributes: list of nesting attributes\n :param key: JSONPath key of an attribute\n :return: True/False\n \"\"\"\n return any(x in key for x in inspected_attributes)\n\n def scan_resource_conf(self, conf: Dict[StrNode, DictNode]) -> CheckResult:\n inspected_key = self.get_inspected_key()\n expected_values = self.get_expected_values()\n path_elements = inspected_key.split(\"/\")\n matches = ContextParser.search_deep_keys(path_elements[-1], conf, [])\n if len(matches) > 0:\n for match in matches:\n # CFN files are parsed differently from terraform, which causes the path search above to behave differently.\n # The tesult is path parts with integer indexes, instead of strings like '[0]'. This logic replaces\n # those, allowing inspected_keys in checks to use the same syntax.\n for i in range(0, len(match)):\n if type(match[i]) == int:\n match[i] = f\"[{match[i]}]\"\n\n if match[:-1] == path_elements:\n # Inspected key exists\n value = match[-1]\n if ANY_VALUE in expected_values and value is not None and (not isinstance(value, str) or value):\n # Key is found on the configuration - if it accepts any value, the check is PASSED\n return CheckResult.PASSED\n if isinstance(value, list) and len(value) == 1:\n value = value[0]\n if self._is_variable_dependant(value):\n # If the tested attribute is variable-dependant, then result is PASSED\n return CheckResult.PASSED\n if value in expected_values:\n return CheckResult.PASSED\n\n # handle boolean case sensitivity (e.g., CFN accepts the string \"true\" as a boolean)\n if isinstance(value, str) and value.lower() in ('true', 'false'):\n value = value.lower() == 'true'\n if value in expected_values:\n return CheckResult.PASSED\n return CheckResult.FAILED\n\n return self.missing_block_result\n\n @abstractmethod\n def get_inspected_key(self) -> str:\n \"\"\"\n :return: JSONPath syntax path of the checked attribute\n \"\"\"\n raise NotImplementedError()\n\n def get_expected_values(self) -> List[Any]:\n \"\"\"\n Override the method with the list of acceptable values if the check has more than one possible expected value, given\n the inspected key\n :return: List of expected values, defaults to a list of the expected value\n \"\"\"\n return [self.get_expected_value()]\n\n def get_expected_value(self) -> Any:\n \"\"\"\n Returns the default expected value, governed by provider best practices\n \"\"\"\n return True\n\n def get_evaluated_keys(self) -> List[str]:\n return force_list(self.get_inspected_key())\n", "path": "checkov/cloudformation/checks/resource/base_resource_value_check.py"}], "after_files": [{"content": "import re\nfrom abc import abstractmethod\nfrom collections.abc import Iterable\nfrom typing import List, Any, Dict\n\nfrom checkov.cloudformation.checks.resource.base_resource_check import BaseResourceCheck\nfrom checkov.cloudformation.context_parser import ContextParser\nfrom checkov.common.parsers.node import StrNode, DictNode\nfrom checkov.common.models.consts import ANY_VALUE\nfrom checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.common.util.type_forcers import force_list\nfrom checkov.common.util.var_utils import is_cloudformation_variable_dependent\n\nVARIABLE_DEPENDANT_REGEX = re.compile(r\"(?:Ref)\\.[^\\s]+\")\n\n\nclass BaseResourceValueCheck(BaseResourceCheck):\n def __init__(\n self,\n name: str,\n id: str,\n categories: \"Iterable[CheckCategories]\",\n supported_resources: \"Iterable[str]\",\n missing_block_result: CheckResult = CheckResult.FAILED,\n ) -> None:\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n self.missing_block_result = missing_block_result\n\n @staticmethod\n def _filter_key_path(path: str) -> List[str]:\n \"\"\"\n Filter an attribute path to contain only named attributes by dropping array indices from the path)\n :param path: valid JSONPath of an attribute\n :return: List of named attributes with respect to the input JSONPath order\n \"\"\"\n regex = re.compile(r\"^\\[?\\d+\\]?$\")\n return [x for x in path.split(\"/\") if not re.search(regex, x)]\n\n @staticmethod\n def _is_variable_dependant(value: Any) -> bool:\n return is_cloudformation_variable_dependent(value)\n\n @staticmethod\n def _is_nesting_key(inspected_attributes: List[str], key: str) -> bool:\n \"\"\"\n Resolves whether a key is a subset of the inspected nesting attributes\n :param inspected_attributes: list of nesting attributes\n :param key: JSONPath key of an attribute\n :return: True/False\n \"\"\"\n return any(x in key for x in inspected_attributes)\n\n def scan_resource_conf(self, conf: Dict[StrNode, DictNode]) -> CheckResult:\n inspected_key = self.get_inspected_key()\n expected_values = self.get_expected_values()\n path_elements = inspected_key.split(\"/\")\n matches = ContextParser.search_deep_keys(path_elements[-1], conf, [])\n if len(matches) > 0:\n for match in matches:\n # CFN files are parsed differently from terraform, which causes the path search above to behave differently.\n # The tesult is path parts with integer indexes, instead of strings like '[0]'. This logic replaces\n # those, allowing inspected_keys in checks to use the same syntax.\n # The last value shouldn't be changed, because it could be indeed a valid number\n for i in range(0, len(match) - 1):\n if type(match[i]) == int:\n match[i] = f\"[{match[i]}]\"\n\n if match[:-1] == path_elements:\n # Inspected key exists\n value = match[-1]\n if ANY_VALUE in expected_values and value is not None and (not isinstance(value, str) or value):\n # Key is found on the configuration - if it accepts any value, the check is PASSED\n return CheckResult.PASSED\n if isinstance(value, list) and len(value) == 1:\n value = value[0]\n if self._is_variable_dependant(value):\n # If the tested attribute is variable-dependant, then result is PASSED\n return CheckResult.PASSED\n if value in expected_values:\n return CheckResult.PASSED\n\n # handle boolean case sensitivity (e.g., CFN accepts the string \"true\" as a boolean)\n if isinstance(value, str) and value.lower() in ('true', 'false'):\n value = value.lower() == 'true'\n if value in expected_values:\n return CheckResult.PASSED\n return CheckResult.FAILED\n\n return self.missing_block_result\n\n @abstractmethod\n def get_inspected_key(self) -> str:\n \"\"\"\n :return: JSONPath syntax path of the checked attribute\n \"\"\"\n raise NotImplementedError()\n\n def get_expected_values(self) -> List[Any]:\n \"\"\"\n Override the method with the list of acceptable values if the check has more than one possible expected value, given\n the inspected key\n :return: List of expected values, defaults to a list of the expected value\n \"\"\"\n return [self.get_expected_value()]\n\n def get_expected_value(self) -> Any:\n \"\"\"\n Returns the default expected value, governed by provider best practices\n \"\"\"\n return True\n\n def get_evaluated_keys(self) -> List[str]:\n return force_list(self.get_inspected_key())\n", "path": "checkov/cloudformation/checks/resource/base_resource_value_check.py"}]} | 1,841 | 202 |
gh_patches_debug_59532 | rasdani/github-patches | git_diff | mit-ll-responsible-ai__hydra-zen-97 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
PEP 561 compatibility
Hi,
Would it be possible to make hydra-zen compliant with [PEP 561](https://www.python.org/dev/peps/pep-0561) by distributing a `py.typed` file with the package?
Currently I'm getting `Skipping analyzing "hydra_zen": found module but no type hints or library stubs` when I run mypy on a test file. Here are steps to reproduce this error:
```text
$ pip install hydra-zen mypy
...
Successfully installed PyYAML-5.4.1 antlr4-python3-runtime-4.8 hydra-core-1.1.1 hydra-zen-0.2.0 mypy-0.910 mypy-extensions-0.4.3 omegaconf-2.1.1 toml-0.10.2 typing-extensions-3.10.0.2
...
$ echo "from hydra_zen import builds" > tmp.py
$ mypy tmp.py
tmp.py:1: error: Skipping analyzing "hydra_zen": found module but no type hints or library stubs
tmp.py:1: note: See https://mypy.readthedocs.io/en/stable/running_mypy.html#missing-imports
Found 1 error in 1 file (checked 1 source file)
```
I believe that adding an empty `py.typed` file to the `src/hydra_zen` directory (and modifying `setup.py` so that the `py.typed` file is distributed with the `hydra-zen` package) would make it possible for type checkers following PEP 561 to discover the type hints in `src`.
(I'd be happy to submit a PR to this effect.)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # Copyright (c) 2021 Massachusetts Institute of Technology
2 # SPDX-License-Identifier: MIT
3
4 from setuptools import find_packages, setup
5
6 import versioneer
7
8 DISTNAME = "hydra_zen"
9 LICENSE = "MIT"
10 AUTHOR = "Justin Goodwin, Ryan Soklaski"
11 AUTHOR_EMAIL = "[email protected]"
12 URL = "https://github.com/mit-ll-responsible-ai/hydra_zen"
13 CLASSIFIERS = [
14 "Development Status :: 4 - Beta",
15 "License :: OSI Approved :: MIT License",
16 "Operating System :: OS Independent",
17 "Intended Audience :: Science/Research",
18 "Programming Language :: Python :: 3.6",
19 "Programming Language :: Python :: 3.7",
20 "Programming Language :: Python :: 3.8",
21 "Programming Language :: Python :: 3.9",
22 "Topic :: Scientific/Engineering",
23 ]
24 KEYWORDS = "machine learning research configuration scalable reproducible"
25 INSTALL_REQUIRES = [
26 "hydra-core >= 1.1.0",
27 "typing-extensions >= 3.7.4.1",
28 ]
29 TESTS_REQUIRE = [
30 "pytest >= 3.8",
31 "hypothesis >= 5.32.0",
32 ]
33
34 DESCRIPTION = "Utilities for making hydra scale to ML workflows"
35 LONG_DESCRIPTION = """
36 hydra-zen helps you configure your project using the power of Hydra, while enjoying the Zen of Python!
37
38 hydra-zen eliminates the boilerplate code that you write to configure, orchestrate, and organize the results of large-scale projects, such as machine learning experiments. It does so by providing Hydra-compatible tools that dynamically generate "structured configurations" of your code, and enables Python-centric workflows for running configured instances of your code.
39
40 hydra-zen offers:
41
42 - Functions for automatically and dynamically generating structured configs that can be used to fully or partially instantiate objects in your application.
43 - The ability to launch Hydra jobs, complete with parameter sweeps and multi-run configurations, from within a notebook or any other Python environment.
44 - Incisive type annotations that provide enriched context about your project's configurations to IDEs, type checkers, and other tooling.
45 - Runtime validation of configurations to catch mistakes before your application launches.
46 - Equal support for both object-oriented libraries (e.g., torch.nn) and functional ones (e.g., jax and numpy).
47
48 These functions and capabilities can be used to great effect alongside PyTorch Lightning to design boilerplate-free machine learning projects!
49 """
50
51
52 setup(
53 name=DISTNAME,
54 version=versioneer.get_version(),
55 cmdclass=versioneer.get_cmdclass(),
56 license=LICENSE,
57 author=AUTHOR,
58 author_email=AUTHOR_EMAIL,
59 classifiers=CLASSIFIERS,
60 keywords=KEYWORDS,
61 description=DESCRIPTION,
62 long_description=LONG_DESCRIPTION,
63 install_requires=INSTALL_REQUIRES,
64 tests_require=TESTS_REQUIRE,
65 url=URL,
66 download_url="https://github.com/mit-ll-responsible-ai/hydra-zen/tarball/"
67 + versioneer.get_version(),
68 python_requires=">=3.6",
69 packages=find_packages(where="src", exclude=["tests", "tests.*"]),
70 package_dir={"": "src"},
71 )
72
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -68,4 +68,5 @@
python_requires=">=3.6",
packages=find_packages(where="src", exclude=["tests", "tests.*"]),
package_dir={"": "src"},
+ package_data={"hydra_zen": ["py.typed"]}
)
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -68,4 +68,5 @@\n python_requires=\">=3.6\",\n packages=find_packages(where=\"src\", exclude=[\"tests\", \"tests.*\"]),\n package_dir={\"\": \"src\"},\n+ package_data={\"hydra_zen\": [\"py.typed\"]}\n )\n", "issue": "PEP 561 compatibility\nHi,\r\n\r\nWould it be possible to make hydra-zen compliant with [PEP 561](https://www.python.org/dev/peps/pep-0561) by distributing a `py.typed` file with the package?\r\n\r\nCurrently I'm getting `Skipping analyzing \"hydra_zen\": found module but no type hints or library stubs` when I run mypy on a test file. Here are steps to reproduce this error:\r\n```text\r\n$ pip install hydra-zen mypy\r\n...\r\nSuccessfully installed PyYAML-5.4.1 antlr4-python3-runtime-4.8 hydra-core-1.1.1 hydra-zen-0.2.0 mypy-0.910 mypy-extensions-0.4.3 omegaconf-2.1.1 toml-0.10.2 typing-extensions-3.10.0.2\r\n...\r\n$ echo \"from hydra_zen import builds\" > tmp.py\r\n$ mypy tmp.py\r\ntmp.py:1: error: Skipping analyzing \"hydra_zen\": found module but no type hints or library stubs\r\ntmp.py:1: note: See https://mypy.readthedocs.io/en/stable/running_mypy.html#missing-imports\r\nFound 1 error in 1 file (checked 1 source file)\r\n```\r\n\r\nI believe that adding an empty `py.typed` file to the `src/hydra_zen` directory (and modifying `setup.py` so that the `py.typed` file is distributed with the `hydra-zen` package) would make it possible for type checkers following PEP 561 to discover the type hints in `src`.\r\n(I'd be happy to submit a PR to this effect.)\n", "before_files": [{"content": "# Copyright (c) 2021 Massachusetts Institute of Technology\n# SPDX-License-Identifier: MIT\n\nfrom setuptools import find_packages, setup\n\nimport versioneer\n\nDISTNAME = \"hydra_zen\"\nLICENSE = \"MIT\"\nAUTHOR = \"Justin Goodwin, Ryan Soklaski\"\nAUTHOR_EMAIL = \"[email protected]\"\nURL = \"https://github.com/mit-ll-responsible-ai/hydra_zen\"\nCLASSIFIERS = [\n \"Development Status :: 4 - Beta\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Intended Audience :: Science/Research\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Topic :: Scientific/Engineering\",\n]\nKEYWORDS = \"machine learning research configuration scalable reproducible\"\nINSTALL_REQUIRES = [\n \"hydra-core >= 1.1.0\",\n \"typing-extensions >= 3.7.4.1\",\n]\nTESTS_REQUIRE = [\n \"pytest >= 3.8\",\n \"hypothesis >= 5.32.0\",\n]\n\nDESCRIPTION = \"Utilities for making hydra scale to ML workflows\"\nLONG_DESCRIPTION = \"\"\"\nhydra-zen helps you configure your project using the power of Hydra, while enjoying the Zen of Python!\n\nhydra-zen eliminates the boilerplate code that you write to configure, orchestrate, and organize the results of large-scale projects, such as machine learning experiments. It does so by providing Hydra-compatible tools that dynamically generate \"structured configurations\" of your code, and enables Python-centric workflows for running configured instances of your code.\n\nhydra-zen offers:\n\n - Functions for automatically and dynamically generating structured configs that can be used to fully or partially instantiate objects in your application.\n - The ability to launch Hydra jobs, complete with parameter sweeps and multi-run configurations, from within a notebook or any other Python environment.\n - Incisive type annotations that provide enriched context about your project's configurations to IDEs, type checkers, and other tooling.\n - Runtime validation of configurations to catch mistakes before your application launches.\n - Equal support for both object-oriented libraries (e.g., torch.nn) and functional ones (e.g., jax and numpy).\n\nThese functions and capabilities can be used to great effect alongside PyTorch Lightning to design boilerplate-free machine learning projects!\n\"\"\"\n\n\nsetup(\n name=DISTNAME,\n version=versioneer.get_version(),\n cmdclass=versioneer.get_cmdclass(),\n license=LICENSE,\n author=AUTHOR,\n author_email=AUTHOR_EMAIL,\n classifiers=CLASSIFIERS,\n keywords=KEYWORDS,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n install_requires=INSTALL_REQUIRES,\n tests_require=TESTS_REQUIRE,\n url=URL,\n download_url=\"https://github.com/mit-ll-responsible-ai/hydra-zen/tarball/\"\n + versioneer.get_version(),\n python_requires=\">=3.6\",\n packages=find_packages(where=\"src\", exclude=[\"tests\", \"tests.*\"]),\n package_dir={\"\": \"src\"},\n)\n", "path": "setup.py"}], "after_files": [{"content": "# Copyright (c) 2021 Massachusetts Institute of Technology\n# SPDX-License-Identifier: MIT\n\nfrom setuptools import find_packages, setup\n\nimport versioneer\n\nDISTNAME = \"hydra_zen\"\nLICENSE = \"MIT\"\nAUTHOR = \"Justin Goodwin, Ryan Soklaski\"\nAUTHOR_EMAIL = \"[email protected]\"\nURL = \"https://github.com/mit-ll-responsible-ai/hydra_zen\"\nCLASSIFIERS = [\n \"Development Status :: 4 - Beta\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Intended Audience :: Science/Research\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Topic :: Scientific/Engineering\",\n]\nKEYWORDS = \"machine learning research configuration scalable reproducible\"\nINSTALL_REQUIRES = [\n \"hydra-core >= 1.1.0\",\n \"typing-extensions >= 3.7.4.1\",\n]\nTESTS_REQUIRE = [\n \"pytest >= 3.8\",\n \"hypothesis >= 5.32.0\",\n]\n\nDESCRIPTION = \"Utilities for making hydra scale to ML workflows\"\nLONG_DESCRIPTION = \"\"\"\nhydra-zen helps you configure your project using the power of Hydra, while enjoying the Zen of Python!\n\nhydra-zen eliminates the boilerplate code that you write to configure, orchestrate, and organize the results of large-scale projects, such as machine learning experiments. It does so by providing Hydra-compatible tools that dynamically generate \"structured configurations\" of your code, and enables Python-centric workflows for running configured instances of your code.\n\nhydra-zen offers:\n\n - Functions for automatically and dynamically generating structured configs that can be used to fully or partially instantiate objects in your application.\n - The ability to launch Hydra jobs, complete with parameter sweeps and multi-run configurations, from within a notebook or any other Python environment.\n - Incisive type annotations that provide enriched context about your project's configurations to IDEs, type checkers, and other tooling.\n - Runtime validation of configurations to catch mistakes before your application launches.\n - Equal support for both object-oriented libraries (e.g., torch.nn) and functional ones (e.g., jax and numpy).\n\nThese functions and capabilities can be used to great effect alongside PyTorch Lightning to design boilerplate-free machine learning projects!\n\"\"\"\n\n\nsetup(\n name=DISTNAME,\n version=versioneer.get_version(),\n cmdclass=versioneer.get_cmdclass(),\n license=LICENSE,\n author=AUTHOR,\n author_email=AUTHOR_EMAIL,\n classifiers=CLASSIFIERS,\n keywords=KEYWORDS,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n install_requires=INSTALL_REQUIRES,\n tests_require=TESTS_REQUIRE,\n url=URL,\n download_url=\"https://github.com/mit-ll-responsible-ai/hydra-zen/tarball/\"\n + versioneer.get_version(),\n python_requires=\">=3.6\",\n packages=find_packages(where=\"src\", exclude=[\"tests\", \"tests.*\"]),\n package_dir={\"\": \"src\"},\n package_data={\"hydra_zen\": [\"py.typed\"]}\n)\n", "path": "setup.py"}]} | 1,473 | 81 |
gh_patches_debug_9569 | rasdani/github-patches | git_diff | ckan__ckan-2563 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Include the main.debug.css
Hi, I'm new to CKAN in my organization and turned debug to true for development and encountered the `AttributeError: 'module' object has no attribute 'css/main.debug.css'` error. It took me a while to figure out that I had to compile the less to get it.
Wouldn't it be easier to include this file so that debug mode automatically works without needing to change anything else?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ckan/lib/app_globals.py`
Content:
```
1 ''' The application's Globals object '''
2
3 import logging
4 import time
5 from threading import Lock
6 import re
7
8 from paste.deploy.converters import asbool
9 from pylons import config
10
11 import ckan
12 import ckan.model as model
13 import ckan.logic as logic
14
15
16 log = logging.getLogger(__name__)
17
18
19 # mappings translate between config settings and globals because our naming
20 # conventions are not well defined and/or implemented
21 mappings = {
22 # 'config_key': 'globals_key',
23 }
24
25
26 # This mapping is only used to define the configuration options (from the
27 # `config` object) that should be copied to the `app_globals` (`g`) object.
28 app_globals_from_config_details = {
29 'ckan.site_title': {},
30 'ckan.site_logo': {},
31 'ckan.site_url': {},
32 'ckan.site_description': {},
33 'ckan.site_about': {},
34 'ckan.site_intro_text': {},
35 'ckan.site_custom_css': {},
36 'ckan.favicon': {}, # default gets set in config.environment.py
37 'ckan.template_head_end': {},
38 'ckan.template_footer_end': {},
39 # has been setup in load_environment():
40 'ckan.site_id': {},
41 'ckan.recaptcha.publickey': {'name': 'recaptcha_publickey'},
42 'ckan.recaptcha.version': {'name': 'recaptcha_version', 'default': '1'},
43 'ckan.template_title_deliminater': {'default': '-'},
44 'ckan.template_head_end': {},
45 'ckan.template_footer_end': {},
46 'ckan.dumps_url': {},
47 'ckan.dumps_format': {},
48 'ofs.impl': {'name': 'ofs_impl'},
49 'ckan.homepage_style': {'default': '1'},
50
51 # split string
52 'search.facets': {'default': 'organization groups tags res_format license_id',
53 'type': 'split',
54 'name': 'facets'},
55 'package_hide_extras': {'type': 'split'},
56 'ckan.plugins': {'type': 'split'},
57
58 # bool
59 'debug': {'default': 'false', 'type' : 'bool'},
60 'ckan.debug_supress_header' : {'default': 'false', 'type' : 'bool'},
61 'ckan.legacy_templates' : {'default': 'false', 'type' : 'bool'},
62 'ckan.tracking_enabled' : {'default': 'false', 'type' : 'bool'},
63
64 # int
65 'ckan.datasets_per_page': {'default': '20', 'type': 'int'},
66 'ckan.activity_list_limit': {'default': '30', 'type': 'int'},
67 'search.facets.default': {'default': '10', 'type': 'int',
68 'name': 'facets_default_number'},
69 }
70
71
72 # A place to store the origional config options of we override them
73 _CONFIG_CACHE = {}
74
75 def set_main_css(css_file):
76 ''' Sets the main_css using debug css if needed. The css_file
77 must be of the form file.css '''
78 assert css_file.endswith('.css')
79 if config.get('debug') and css_file == '/base/css/main.css':
80 new_css = '/base/css/main.debug.css'
81 else:
82 new_css = css_file
83 # FIXME we should check the css file exists
84 app_globals.main_css = str(new_css)
85
86
87 def set_app_global(key, value):
88 '''
89 Set a new key on the app_globals (g) object
90
91 It will process the value according to the options on
92 app_globals_from_config_details (if any)
93 '''
94 key, value = process_app_global(key, value)
95 setattr(app_globals, key, value)
96
97
98 def process_app_global(key, value):
99 '''
100 Tweak a key, value pair meant to be set on the app_globals (g) object
101
102 According to the options on app_globals_from_config_details (if any)
103 '''
104 options = app_globals_from_config_details.get(key)
105 key = get_globals_key(key)
106 if options:
107 if 'name' in options:
108 key = options['name']
109 value = value or options.get('default', '')
110
111 data_type = options.get('type')
112 if data_type == 'bool':
113 value = asbool(value)
114 elif data_type == 'int':
115 value = int(value)
116 elif data_type == 'split':
117 value = value.split()
118
119 return key, value
120
121
122 def get_globals_key(key):
123 # create our globals key
124 # these can be specified in mappings or else we remove
125 # the `ckan.` part this is to keep the existing namings
126 # set the value
127 if key in mappings:
128 return mappings[key]
129 elif key.startswith('ckan.'):
130 return key[5:]
131 else:
132 return key
133
134
135 def reset():
136 ''' set updatable values from config '''
137 def get_config_value(key, default=''):
138 if model.meta.engine.has_table('system_info'):
139 value = model.get_system_info(key)
140 else:
141 value = None
142 config_value = config.get(key)
143 # sort encodeings if needed
144 if isinstance(config_value, str):
145 try:
146 config_value = config_value.decode('utf-8')
147 except UnicodeDecodeError:
148 config_value = config_value.decode('latin-1')
149 # we want to store the config the first time we get here so we can
150 # reset them if needed
151 if key not in _CONFIG_CACHE:
152 _CONFIG_CACHE[key] = config_value
153 if value is not None:
154 log.debug('config `%s` set to `%s` from db' % (key, value))
155 else:
156 value = _CONFIG_CACHE[key]
157 if value:
158 log.debug('config `%s` set to `%s` from config' % (key, value))
159 else:
160 value = default
161
162 set_app_global(key, value)
163
164 # update the config
165 config[key] = value
166 return value
167
168 # update the config settings in auto update
169 schema = logic.schema.update_configuration_schema()
170 for key in schema.keys():
171 get_config_value(key)
172
173 # cusom styling
174 main_css = get_config_value('ckan.main_css', '/base/css/main.css')
175 set_main_css(main_css)
176 # site_url_nice
177 site_url_nice = app_globals.site_url.replace('http://', '')
178 site_url_nice = site_url_nice.replace('www.', '')
179 app_globals.site_url_nice = site_url_nice
180
181 if app_globals.site_logo:
182 app_globals.header_class = 'header-image'
183 elif not app_globals.site_description:
184 app_globals.header_class = 'header-text-logo'
185 else:
186 app_globals.header_class = 'header-text-logo-tagline'
187
188
189 class _Globals(object):
190
191 ''' Globals acts as a container for objects available throughout the
192 life of the application. '''
193
194 def __init__(self):
195 '''One instance of Globals is created during application
196 initialization and is available during requests via the
197 'app_globals' variable
198 '''
199 self._init()
200 self._config_update = None
201 self._mutex = Lock()
202
203 def _check_uptodate(self):
204 ''' check the config is uptodate needed when several instances are
205 running '''
206 value = model.get_system_info('ckan.config_update')
207 if self._config_update != value:
208 if self._mutex.acquire(False):
209 reset()
210 self._config_update = value
211 self._mutex.release()
212
213 def _init(self):
214
215 self.ckan_version = ckan.__version__
216 self.ckan_base_version = re.sub('[^0-9\.]', '', self.ckan_version)
217 if self.ckan_base_version == self.ckan_version:
218 self.ckan_doc_version = 'ckan-{0}'.format(self.ckan_version)
219 else:
220 self.ckan_doc_version = 'latest'
221
222 # process the config details to set globals
223 for key in app_globals_from_config_details.keys():
224 new_key, value = process_app_global(key, config.get(key) or '')
225 setattr(self, new_key, value)
226
227
228 app_globals = _Globals()
229 del _Globals
230
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ckan/lib/app_globals.py b/ckan/lib/app_globals.py
--- a/ckan/lib/app_globals.py
+++ b/ckan/lib/app_globals.py
@@ -73,13 +73,9 @@
_CONFIG_CACHE = {}
def set_main_css(css_file):
- ''' Sets the main_css using debug css if needed. The css_file
- must be of the form file.css '''
+ ''' Sets the main_css. The css_file must be of the form file.css '''
assert css_file.endswith('.css')
- if config.get('debug') and css_file == '/base/css/main.css':
- new_css = '/base/css/main.debug.css'
- else:
- new_css = css_file
+ new_css = css_file
# FIXME we should check the css file exists
app_globals.main_css = str(new_css)
| {"golden_diff": "diff --git a/ckan/lib/app_globals.py b/ckan/lib/app_globals.py\n--- a/ckan/lib/app_globals.py\n+++ b/ckan/lib/app_globals.py\n@@ -73,13 +73,9 @@\n _CONFIG_CACHE = {}\n \n def set_main_css(css_file):\n- ''' Sets the main_css using debug css if needed. The css_file\n- must be of the form file.css '''\n+ ''' Sets the main_css. The css_file must be of the form file.css '''\n assert css_file.endswith('.css')\n- if config.get('debug') and css_file == '/base/css/main.css':\n- new_css = '/base/css/main.debug.css'\n- else:\n- new_css = css_file\n+ new_css = css_file\n # FIXME we should check the css file exists\n app_globals.main_css = str(new_css)\n", "issue": "Include the main.debug.css\nHi, I'm new to CKAN in my organization and turned debug to true for development and encountered the `AttributeError: 'module' object has no attribute 'css/main.debug.css'` error. It took me a while to figure out that I had to compile the less to get it.\n\nWouldn't it be easier to include this file so that debug mode automatically works without needing to change anything else?\n\n", "before_files": [{"content": "''' The application's Globals object '''\n\nimport logging\nimport time\nfrom threading import Lock\nimport re\n\nfrom paste.deploy.converters import asbool\nfrom pylons import config\n\nimport ckan\nimport ckan.model as model\nimport ckan.logic as logic\n\n\nlog = logging.getLogger(__name__)\n\n\n# mappings translate between config settings and globals because our naming\n# conventions are not well defined and/or implemented\nmappings = {\n# 'config_key': 'globals_key',\n}\n\n\n# This mapping is only used to define the configuration options (from the\n# `config` object) that should be copied to the `app_globals` (`g`) object.\napp_globals_from_config_details = {\n 'ckan.site_title': {},\n 'ckan.site_logo': {},\n 'ckan.site_url': {},\n 'ckan.site_description': {},\n 'ckan.site_about': {},\n 'ckan.site_intro_text': {},\n 'ckan.site_custom_css': {},\n 'ckan.favicon': {}, # default gets set in config.environment.py\n 'ckan.template_head_end': {},\n 'ckan.template_footer_end': {},\n # has been setup in load_environment():\n 'ckan.site_id': {},\n 'ckan.recaptcha.publickey': {'name': 'recaptcha_publickey'},\n 'ckan.recaptcha.version': {'name': 'recaptcha_version', 'default': '1'},\n 'ckan.template_title_deliminater': {'default': '-'},\n 'ckan.template_head_end': {},\n 'ckan.template_footer_end': {},\n 'ckan.dumps_url': {},\n 'ckan.dumps_format': {},\n 'ofs.impl': {'name': 'ofs_impl'},\n 'ckan.homepage_style': {'default': '1'},\n\n # split string\n 'search.facets': {'default': 'organization groups tags res_format license_id',\n 'type': 'split',\n 'name': 'facets'},\n 'package_hide_extras': {'type': 'split'},\n 'ckan.plugins': {'type': 'split'},\n\n # bool\n 'debug': {'default': 'false', 'type' : 'bool'},\n 'ckan.debug_supress_header' : {'default': 'false', 'type' : 'bool'},\n 'ckan.legacy_templates' : {'default': 'false', 'type' : 'bool'},\n 'ckan.tracking_enabled' : {'default': 'false', 'type' : 'bool'},\n\n # int\n 'ckan.datasets_per_page': {'default': '20', 'type': 'int'},\n 'ckan.activity_list_limit': {'default': '30', 'type': 'int'},\n 'search.facets.default': {'default': '10', 'type': 'int',\n 'name': 'facets_default_number'},\n}\n\n\n# A place to store the origional config options of we override them\n_CONFIG_CACHE = {}\n\ndef set_main_css(css_file):\n ''' Sets the main_css using debug css if needed. The css_file\n must be of the form file.css '''\n assert css_file.endswith('.css')\n if config.get('debug') and css_file == '/base/css/main.css':\n new_css = '/base/css/main.debug.css'\n else:\n new_css = css_file\n # FIXME we should check the css file exists\n app_globals.main_css = str(new_css)\n\n\ndef set_app_global(key, value):\n '''\n Set a new key on the app_globals (g) object\n\n It will process the value according to the options on\n app_globals_from_config_details (if any)\n '''\n key, value = process_app_global(key, value)\n setattr(app_globals, key, value)\n\n\ndef process_app_global(key, value):\n '''\n Tweak a key, value pair meant to be set on the app_globals (g) object\n\n According to the options on app_globals_from_config_details (if any)\n '''\n options = app_globals_from_config_details.get(key)\n key = get_globals_key(key)\n if options:\n if 'name' in options:\n key = options['name']\n value = value or options.get('default', '')\n\n data_type = options.get('type')\n if data_type == 'bool':\n value = asbool(value)\n elif data_type == 'int':\n value = int(value)\n elif data_type == 'split':\n value = value.split()\n\n return key, value\n\n\ndef get_globals_key(key):\n # create our globals key\n # these can be specified in mappings or else we remove\n # the `ckan.` part this is to keep the existing namings\n # set the value\n if key in mappings:\n return mappings[key]\n elif key.startswith('ckan.'):\n return key[5:]\n else:\n return key\n\n\ndef reset():\n ''' set updatable values from config '''\n def get_config_value(key, default=''):\n if model.meta.engine.has_table('system_info'):\n value = model.get_system_info(key)\n else:\n value = None\n config_value = config.get(key)\n # sort encodeings if needed\n if isinstance(config_value, str):\n try:\n config_value = config_value.decode('utf-8')\n except UnicodeDecodeError:\n config_value = config_value.decode('latin-1')\n # we want to store the config the first time we get here so we can\n # reset them if needed\n if key not in _CONFIG_CACHE:\n _CONFIG_CACHE[key] = config_value\n if value is not None:\n log.debug('config `%s` set to `%s` from db' % (key, value))\n else:\n value = _CONFIG_CACHE[key]\n if value:\n log.debug('config `%s` set to `%s` from config' % (key, value))\n else:\n value = default\n\n set_app_global(key, value)\n\n # update the config\n config[key] = value\n return value\n\n # update the config settings in auto update\n schema = logic.schema.update_configuration_schema()\n for key in schema.keys():\n get_config_value(key)\n\n # cusom styling\n main_css = get_config_value('ckan.main_css', '/base/css/main.css')\n set_main_css(main_css)\n # site_url_nice\n site_url_nice = app_globals.site_url.replace('http://', '')\n site_url_nice = site_url_nice.replace('www.', '')\n app_globals.site_url_nice = site_url_nice\n\n if app_globals.site_logo:\n app_globals.header_class = 'header-image'\n elif not app_globals.site_description:\n app_globals.header_class = 'header-text-logo'\n else:\n app_globals.header_class = 'header-text-logo-tagline'\n\n\nclass _Globals(object):\n\n ''' Globals acts as a container for objects available throughout the\n life of the application. '''\n\n def __init__(self):\n '''One instance of Globals is created during application\n initialization and is available during requests via the\n 'app_globals' variable\n '''\n self._init()\n self._config_update = None\n self._mutex = Lock()\n\n def _check_uptodate(self):\n ''' check the config is uptodate needed when several instances are\n running '''\n value = model.get_system_info('ckan.config_update')\n if self._config_update != value:\n if self._mutex.acquire(False):\n reset()\n self._config_update = value\n self._mutex.release()\n\n def _init(self):\n\n self.ckan_version = ckan.__version__\n self.ckan_base_version = re.sub('[^0-9\\.]', '', self.ckan_version)\n if self.ckan_base_version == self.ckan_version:\n self.ckan_doc_version = 'ckan-{0}'.format(self.ckan_version)\n else:\n self.ckan_doc_version = 'latest'\n\n # process the config details to set globals\n for key in app_globals_from_config_details.keys():\n new_key, value = process_app_global(key, config.get(key) or '')\n setattr(self, new_key, value)\n\n\napp_globals = _Globals()\ndel _Globals\n", "path": "ckan/lib/app_globals.py"}], "after_files": [{"content": "''' The application's Globals object '''\n\nimport logging\nimport time\nfrom threading import Lock\nimport re\n\nfrom paste.deploy.converters import asbool\nfrom pylons import config\n\nimport ckan\nimport ckan.model as model\nimport ckan.logic as logic\n\n\nlog = logging.getLogger(__name__)\n\n\n# mappings translate between config settings and globals because our naming\n# conventions are not well defined and/or implemented\nmappings = {\n# 'config_key': 'globals_key',\n}\n\n\n# This mapping is only used to define the configuration options (from the\n# `config` object) that should be copied to the `app_globals` (`g`) object.\napp_globals_from_config_details = {\n 'ckan.site_title': {},\n 'ckan.site_logo': {},\n 'ckan.site_url': {},\n 'ckan.site_description': {},\n 'ckan.site_about': {},\n 'ckan.site_intro_text': {},\n 'ckan.site_custom_css': {},\n 'ckan.favicon': {}, # default gets set in config.environment.py\n 'ckan.template_head_end': {},\n 'ckan.template_footer_end': {},\n # has been setup in load_environment():\n 'ckan.site_id': {},\n 'ckan.recaptcha.publickey': {'name': 'recaptcha_publickey'},\n 'ckan.recaptcha.version': {'name': 'recaptcha_version', 'default': '1'},\n 'ckan.template_title_deliminater': {'default': '-'},\n 'ckan.template_head_end': {},\n 'ckan.template_footer_end': {},\n 'ckan.dumps_url': {},\n 'ckan.dumps_format': {},\n 'ofs.impl': {'name': 'ofs_impl'},\n 'ckan.homepage_style': {'default': '1'},\n\n # split string\n 'search.facets': {'default': 'organization groups tags res_format license_id',\n 'type': 'split',\n 'name': 'facets'},\n 'package_hide_extras': {'type': 'split'},\n 'ckan.plugins': {'type': 'split'},\n\n # bool\n 'debug': {'default': 'false', 'type' : 'bool'},\n 'ckan.debug_supress_header' : {'default': 'false', 'type' : 'bool'},\n 'ckan.legacy_templates' : {'default': 'false', 'type' : 'bool'},\n 'ckan.tracking_enabled' : {'default': 'false', 'type' : 'bool'},\n\n # int\n 'ckan.datasets_per_page': {'default': '20', 'type': 'int'},\n 'ckan.activity_list_limit': {'default': '30', 'type': 'int'},\n 'search.facets.default': {'default': '10', 'type': 'int',\n 'name': 'facets_default_number'},\n}\n\n\n# A place to store the origional config options of we override them\n_CONFIG_CACHE = {}\n\ndef set_main_css(css_file):\n ''' Sets the main_css. The css_file must be of the form file.css '''\n assert css_file.endswith('.css')\n new_css = css_file\n # FIXME we should check the css file exists\n app_globals.main_css = str(new_css)\n\n\ndef set_app_global(key, value):\n '''\n Set a new key on the app_globals (g) object\n\n It will process the value according to the options on\n app_globals_from_config_details (if any)\n '''\n key, value = process_app_global(key, value)\n setattr(app_globals, key, value)\n\n\ndef process_app_global(key, value):\n '''\n Tweak a key, value pair meant to be set on the app_globals (g) object\n\n According to the options on app_globals_from_config_details (if any)\n '''\n options = app_globals_from_config_details.get(key)\n key = get_globals_key(key)\n if options:\n if 'name' in options:\n key = options['name']\n value = value or options.get('default', '')\n\n data_type = options.get('type')\n if data_type == 'bool':\n value = asbool(value)\n elif data_type == 'int':\n value = int(value)\n elif data_type == 'split':\n value = value.split()\n\n return key, value\n\n\ndef get_globals_key(key):\n # create our globals key\n # these can be specified in mappings or else we remove\n # the `ckan.` part this is to keep the existing namings\n # set the value\n if key in mappings:\n return mappings[key]\n elif key.startswith('ckan.'):\n return key[5:]\n else:\n return key\n\n\ndef reset():\n ''' set updatable values from config '''\n def get_config_value(key, default=''):\n if model.meta.engine.has_table('system_info'):\n value = model.get_system_info(key)\n else:\n value = None\n config_value = config.get(key)\n # sort encodeings if needed\n if isinstance(config_value, str):\n try:\n config_value = config_value.decode('utf-8')\n except UnicodeDecodeError:\n config_value = config_value.decode('latin-1')\n # we want to store the config the first time we get here so we can\n # reset them if needed\n if key not in _CONFIG_CACHE:\n _CONFIG_CACHE[key] = config_value\n if value is not None:\n log.debug('config `%s` set to `%s` from db' % (key, value))\n else:\n value = _CONFIG_CACHE[key]\n if value:\n log.debug('config `%s` set to `%s` from config' % (key, value))\n else:\n value = default\n\n set_app_global(key, value)\n\n # update the config\n config[key] = value\n return value\n\n # update the config settings in auto update\n schema = logic.schema.update_configuration_schema()\n for key in schema.keys():\n get_config_value(key)\n\n # cusom styling\n main_css = get_config_value('ckan.main_css', '/base/css/main.css')\n set_main_css(main_css)\n # site_url_nice\n site_url_nice = app_globals.site_url.replace('http://', '')\n site_url_nice = site_url_nice.replace('www.', '')\n app_globals.site_url_nice = site_url_nice\n\n if app_globals.site_logo:\n app_globals.header_class = 'header-image'\n elif not app_globals.site_description:\n app_globals.header_class = 'header-text-logo'\n else:\n app_globals.header_class = 'header-text-logo-tagline'\n\n\nclass _Globals(object):\n\n ''' Globals acts as a container for objects available throughout the\n life of the application. '''\n\n def __init__(self):\n '''One instance of Globals is created during application\n initialization and is available during requests via the\n 'app_globals' variable\n '''\n self._init()\n self._config_update = None\n self._mutex = Lock()\n\n def _check_uptodate(self):\n ''' check the config is uptodate needed when several instances are\n running '''\n value = model.get_system_info('ckan.config_update')\n if self._config_update != value:\n if self._mutex.acquire(False):\n reset()\n self._config_update = value\n self._mutex.release()\n\n def _init(self):\n\n self.ckan_version = ckan.__version__\n self.ckan_base_version = re.sub('[^0-9\\.]', '', self.ckan_version)\n if self.ckan_base_version == self.ckan_version:\n self.ckan_doc_version = 'ckan-{0}'.format(self.ckan_version)\n else:\n self.ckan_doc_version = 'latest'\n\n # process the config details to set globals\n for key in app_globals_from_config_details.keys():\n new_key, value = process_app_global(key, config.get(key) or '')\n setattr(self, new_key, value)\n\n\napp_globals = _Globals()\ndel _Globals\n", "path": "ckan/lib/app_globals.py"}]} | 2,712 | 193 |
gh_patches_debug_20671 | rasdani/github-patches | git_diff | scrapy__scrapy-5068 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
MediaPipeline exceptions passed silently
### Description
MediaPipeline exceptions passed silently both for method body or method signature errors.
### Steps to Reproduce
```
from scrapy.pipelines.files import FilesPipeline
class BuggyFilesPipeline(FilesPipeline):
def file_path(self, request, response=None, info=None, *, item=None):
return 1 / 0
```
**Expected behavior:** Exception logged
**Actual behavior:** Exception passed silently
**Reproduces how often:** 100%
### Versions
Scrapy 2.4
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scrapy/pipelines/media.py`
Content:
```
1 import functools
2 import logging
3 from collections import defaultdict
4
5 from twisted.internet.defer import Deferred, DeferredList
6 from twisted.python.failure import Failure
7
8 from scrapy.http.request import NO_CALLBACK
9 from scrapy.settings import Settings
10 from scrapy.utils.datatypes import SequenceExclude
11 from scrapy.utils.defer import defer_result, mustbe_deferred
12 from scrapy.utils.log import failure_to_exc_info
13 from scrapy.utils.misc import arg_to_iter
14
15 logger = logging.getLogger(__name__)
16
17
18 def _DUMMY_CALLBACK(response):
19 return response
20
21
22 class MediaPipeline:
23 LOG_FAILED_RESULTS = True
24
25 class SpiderInfo:
26 def __init__(self, spider):
27 self.spider = spider
28 self.downloading = set()
29 self.downloaded = {}
30 self.waiting = defaultdict(list)
31
32 def __init__(self, download_func=None, settings=None):
33 self.download_func = download_func
34 self._expects_item = {}
35
36 if isinstance(settings, dict) or settings is None:
37 settings = Settings(settings)
38 resolve = functools.partial(
39 self._key_for_pipe, base_class_name="MediaPipeline", settings=settings
40 )
41 self.allow_redirects = settings.getbool(resolve("MEDIA_ALLOW_REDIRECTS"), False)
42 self._handle_statuses(self.allow_redirects)
43
44 def _handle_statuses(self, allow_redirects):
45 self.handle_httpstatus_list = None
46 if allow_redirects:
47 self.handle_httpstatus_list = SequenceExclude(range(300, 400))
48
49 def _key_for_pipe(self, key, base_class_name=None, settings=None):
50 """
51 >>> MediaPipeline()._key_for_pipe("IMAGES")
52 'IMAGES'
53 >>> class MyPipe(MediaPipeline):
54 ... pass
55 >>> MyPipe()._key_for_pipe("IMAGES", base_class_name="MediaPipeline")
56 'MYPIPE_IMAGES'
57 """
58 class_name = self.__class__.__name__
59 formatted_key = f"{class_name.upper()}_{key}"
60 if (
61 not base_class_name
62 or class_name == base_class_name
63 or settings
64 and not settings.get(formatted_key)
65 ):
66 return key
67 return formatted_key
68
69 @classmethod
70 def from_crawler(cls, crawler):
71 try:
72 pipe = cls.from_settings(crawler.settings)
73 except AttributeError:
74 pipe = cls()
75 pipe.crawler = crawler
76 pipe._fingerprinter = crawler.request_fingerprinter
77 return pipe
78
79 def open_spider(self, spider):
80 self.spiderinfo = self.SpiderInfo(spider)
81
82 def process_item(self, item, spider):
83 info = self.spiderinfo
84 requests = arg_to_iter(self.get_media_requests(item, info))
85 dlist = [self._process_request(r, info, item) for r in requests]
86 dfd = DeferredList(dlist, consumeErrors=True)
87 return dfd.addCallback(self.item_completed, item, info)
88
89 def _process_request(self, request, info, item):
90 fp = self._fingerprinter.fingerprint(request)
91 if not request.callback or request.callback is NO_CALLBACK:
92 cb = _DUMMY_CALLBACK
93 else:
94 cb = request.callback
95 eb = request.errback
96 request.callback = NO_CALLBACK
97 request.errback = None
98
99 # Return cached result if request was already seen
100 if fp in info.downloaded:
101 return defer_result(info.downloaded[fp]).addCallbacks(cb, eb)
102
103 # Otherwise, wait for result
104 wad = Deferred().addCallbacks(cb, eb)
105 info.waiting[fp].append(wad)
106
107 # Check if request is downloading right now to avoid doing it twice
108 if fp in info.downloading:
109 return wad
110
111 # Download request checking media_to_download hook output first
112 info.downloading.add(fp)
113 dfd = mustbe_deferred(self.media_to_download, request, info, item=item)
114 dfd.addCallback(self._check_media_to_download, request, info, item=item)
115 dfd.addBoth(self._cache_result_and_execute_waiters, fp, info)
116 dfd.addErrback(
117 lambda f: logger.error(
118 f.value, exc_info=failure_to_exc_info(f), extra={"spider": info.spider}
119 )
120 )
121 return dfd.addBoth(lambda _: wad) # it must return wad at last
122
123 def _modify_media_request(self, request):
124 if self.handle_httpstatus_list:
125 request.meta["handle_httpstatus_list"] = self.handle_httpstatus_list
126 else:
127 request.meta["handle_httpstatus_all"] = True
128
129 def _check_media_to_download(self, result, request, info, item):
130 if result is not None:
131 return result
132 if self.download_func:
133 # this ugly code was left only to support tests. TODO: remove
134 dfd = mustbe_deferred(self.download_func, request, info.spider)
135 dfd.addCallbacks(
136 callback=self.media_downloaded,
137 callbackArgs=(request, info),
138 callbackKeywords={"item": item},
139 errback=self.media_failed,
140 errbackArgs=(request, info),
141 )
142 else:
143 self._modify_media_request(request)
144 dfd = self.crawler.engine.download(request)
145 dfd.addCallbacks(
146 callback=self.media_downloaded,
147 callbackArgs=(request, info),
148 callbackKeywords={"item": item},
149 errback=self.media_failed,
150 errbackArgs=(request, info),
151 )
152 return dfd
153
154 def _cache_result_and_execute_waiters(self, result, fp, info):
155 if isinstance(result, Failure):
156 # minimize cached information for failure
157 result.cleanFailure()
158 result.frames = []
159 result.stack = None
160
161 # This code fixes a memory leak by avoiding to keep references to
162 # the Request and Response objects on the Media Pipeline cache.
163 #
164 # What happens when the media_downloaded callback raises an
165 # exception, for example a FileException('download-error') when
166 # the Response status code is not 200 OK, is that the original
167 # StopIteration exception (which in turn contains the failed
168 # Response and by extension, the original Request) gets encapsulated
169 # within the FileException context.
170 #
171 # Originally, Scrapy was using twisted.internet.defer.returnValue
172 # inside functions decorated with twisted.internet.defer.inlineCallbacks,
173 # encapsulating the returned Response in a _DefGen_Return exception
174 # instead of a StopIteration.
175 #
176 # To avoid keeping references to the Response and therefore Request
177 # objects on the Media Pipeline cache, we should wipe the context of
178 # the encapsulated exception when it is a StopIteration instance
179 #
180 # This problem does not occur in Python 2.7 since we don't have
181 # Exception Chaining (https://www.python.org/dev/peps/pep-3134/).
182 context = getattr(result.value, "__context__", None)
183 if isinstance(context, StopIteration):
184 setattr(result.value, "__context__", None)
185
186 info.downloading.remove(fp)
187 info.downloaded[fp] = result # cache result
188 for wad in info.waiting.pop(fp):
189 defer_result(result).chainDeferred(wad)
190
191 # Overridable Interface
192 def media_to_download(self, request, info, *, item=None):
193 """Check request before starting download"""
194 pass
195
196 def get_media_requests(self, item, info):
197 """Returns the media requests to download"""
198 pass
199
200 def media_downloaded(self, response, request, info, *, item=None):
201 """Handler for success downloads"""
202 return response
203
204 def media_failed(self, failure, request, info):
205 """Handler for failed downloads"""
206 return failure
207
208 def item_completed(self, results, item, info):
209 """Called per item when all media requests has been processed"""
210 if self.LOG_FAILED_RESULTS:
211 for ok, value in results:
212 if not ok:
213 logger.error(
214 "%(class)s found errors processing %(item)s",
215 {"class": self.__class__.__name__, "item": item},
216 exc_info=failure_to_exc_info(value),
217 extra={"spider": info.spider},
218 )
219 return item
220
221 def file_path(self, request, response=None, info=None, *, item=None):
222 """Returns the path where downloaded media should be stored"""
223 pass
224
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/scrapy/pipelines/media.py b/scrapy/pipelines/media.py
--- a/scrapy/pipelines/media.py
+++ b/scrapy/pipelines/media.py
@@ -112,14 +112,14 @@
info.downloading.add(fp)
dfd = mustbe_deferred(self.media_to_download, request, info, item=item)
dfd.addCallback(self._check_media_to_download, request, info, item=item)
+ dfd.addErrback(self._log_exception)
dfd.addBoth(self._cache_result_and_execute_waiters, fp, info)
- dfd.addErrback(
- lambda f: logger.error(
- f.value, exc_info=failure_to_exc_info(f), extra={"spider": info.spider}
- )
- )
return dfd.addBoth(lambda _: wad) # it must return wad at last
+ def _log_exception(self, result):
+ logger.exception(result)
+ return result
+
def _modify_media_request(self, request):
if self.handle_httpstatus_list:
request.meta["handle_httpstatus_list"] = self.handle_httpstatus_list
| {"golden_diff": "diff --git a/scrapy/pipelines/media.py b/scrapy/pipelines/media.py\n--- a/scrapy/pipelines/media.py\n+++ b/scrapy/pipelines/media.py\n@@ -112,14 +112,14 @@\n info.downloading.add(fp)\n dfd = mustbe_deferred(self.media_to_download, request, info, item=item)\n dfd.addCallback(self._check_media_to_download, request, info, item=item)\n+ dfd.addErrback(self._log_exception)\n dfd.addBoth(self._cache_result_and_execute_waiters, fp, info)\n- dfd.addErrback(\n- lambda f: logger.error(\n- f.value, exc_info=failure_to_exc_info(f), extra={\"spider\": info.spider}\n- )\n- )\n return dfd.addBoth(lambda _: wad) # it must return wad at last\n \n+ def _log_exception(self, result):\n+ logger.exception(result)\n+ return result\n+\n def _modify_media_request(self, request):\n if self.handle_httpstatus_list:\n request.meta[\"handle_httpstatus_list\"] = self.handle_httpstatus_list\n", "issue": "MediaPipeline exceptions passed silently\n### Description\r\n\r\nMediaPipeline exceptions passed silently both for method body or method signature errors.\r\n\r\n### Steps to Reproduce\r\n\r\n```\r\nfrom scrapy.pipelines.files import FilesPipeline\r\nclass BuggyFilesPipeline(FilesPipeline):\r\n def file_path(self, request, response=None, info=None, *, item=None):\r\n return 1 / 0\r\n```\r\n**Expected behavior:** Exception logged\r\n\r\n**Actual behavior:** Exception passed silently\r\n\r\n**Reproduces how often:** 100%\r\n\r\n### Versions\r\n\r\nScrapy 2.4\r\n\n", "before_files": [{"content": "import functools\nimport logging\nfrom collections import defaultdict\n\nfrom twisted.internet.defer import Deferred, DeferredList\nfrom twisted.python.failure import Failure\n\nfrom scrapy.http.request import NO_CALLBACK\nfrom scrapy.settings import Settings\nfrom scrapy.utils.datatypes import SequenceExclude\nfrom scrapy.utils.defer import defer_result, mustbe_deferred\nfrom scrapy.utils.log import failure_to_exc_info\nfrom scrapy.utils.misc import arg_to_iter\n\nlogger = logging.getLogger(__name__)\n\n\ndef _DUMMY_CALLBACK(response):\n return response\n\n\nclass MediaPipeline:\n LOG_FAILED_RESULTS = True\n\n class SpiderInfo:\n def __init__(self, spider):\n self.spider = spider\n self.downloading = set()\n self.downloaded = {}\n self.waiting = defaultdict(list)\n\n def __init__(self, download_func=None, settings=None):\n self.download_func = download_func\n self._expects_item = {}\n\n if isinstance(settings, dict) or settings is None:\n settings = Settings(settings)\n resolve = functools.partial(\n self._key_for_pipe, base_class_name=\"MediaPipeline\", settings=settings\n )\n self.allow_redirects = settings.getbool(resolve(\"MEDIA_ALLOW_REDIRECTS\"), False)\n self._handle_statuses(self.allow_redirects)\n\n def _handle_statuses(self, allow_redirects):\n self.handle_httpstatus_list = None\n if allow_redirects:\n self.handle_httpstatus_list = SequenceExclude(range(300, 400))\n\n def _key_for_pipe(self, key, base_class_name=None, settings=None):\n \"\"\"\n >>> MediaPipeline()._key_for_pipe(\"IMAGES\")\n 'IMAGES'\n >>> class MyPipe(MediaPipeline):\n ... pass\n >>> MyPipe()._key_for_pipe(\"IMAGES\", base_class_name=\"MediaPipeline\")\n 'MYPIPE_IMAGES'\n \"\"\"\n class_name = self.__class__.__name__\n formatted_key = f\"{class_name.upper()}_{key}\"\n if (\n not base_class_name\n or class_name == base_class_name\n or settings\n and not settings.get(formatted_key)\n ):\n return key\n return formatted_key\n\n @classmethod\n def from_crawler(cls, crawler):\n try:\n pipe = cls.from_settings(crawler.settings)\n except AttributeError:\n pipe = cls()\n pipe.crawler = crawler\n pipe._fingerprinter = crawler.request_fingerprinter\n return pipe\n\n def open_spider(self, spider):\n self.spiderinfo = self.SpiderInfo(spider)\n\n def process_item(self, item, spider):\n info = self.spiderinfo\n requests = arg_to_iter(self.get_media_requests(item, info))\n dlist = [self._process_request(r, info, item) for r in requests]\n dfd = DeferredList(dlist, consumeErrors=True)\n return dfd.addCallback(self.item_completed, item, info)\n\n def _process_request(self, request, info, item):\n fp = self._fingerprinter.fingerprint(request)\n if not request.callback or request.callback is NO_CALLBACK:\n cb = _DUMMY_CALLBACK\n else:\n cb = request.callback\n eb = request.errback\n request.callback = NO_CALLBACK\n request.errback = None\n\n # Return cached result if request was already seen\n if fp in info.downloaded:\n return defer_result(info.downloaded[fp]).addCallbacks(cb, eb)\n\n # Otherwise, wait for result\n wad = Deferred().addCallbacks(cb, eb)\n info.waiting[fp].append(wad)\n\n # Check if request is downloading right now to avoid doing it twice\n if fp in info.downloading:\n return wad\n\n # Download request checking media_to_download hook output first\n info.downloading.add(fp)\n dfd = mustbe_deferred(self.media_to_download, request, info, item=item)\n dfd.addCallback(self._check_media_to_download, request, info, item=item)\n dfd.addBoth(self._cache_result_and_execute_waiters, fp, info)\n dfd.addErrback(\n lambda f: logger.error(\n f.value, exc_info=failure_to_exc_info(f), extra={\"spider\": info.spider}\n )\n )\n return dfd.addBoth(lambda _: wad) # it must return wad at last\n\n def _modify_media_request(self, request):\n if self.handle_httpstatus_list:\n request.meta[\"handle_httpstatus_list\"] = self.handle_httpstatus_list\n else:\n request.meta[\"handle_httpstatus_all\"] = True\n\n def _check_media_to_download(self, result, request, info, item):\n if result is not None:\n return result\n if self.download_func:\n # this ugly code was left only to support tests. TODO: remove\n dfd = mustbe_deferred(self.download_func, request, info.spider)\n dfd.addCallbacks(\n callback=self.media_downloaded,\n callbackArgs=(request, info),\n callbackKeywords={\"item\": item},\n errback=self.media_failed,\n errbackArgs=(request, info),\n )\n else:\n self._modify_media_request(request)\n dfd = self.crawler.engine.download(request)\n dfd.addCallbacks(\n callback=self.media_downloaded,\n callbackArgs=(request, info),\n callbackKeywords={\"item\": item},\n errback=self.media_failed,\n errbackArgs=(request, info),\n )\n return dfd\n\n def _cache_result_and_execute_waiters(self, result, fp, info):\n if isinstance(result, Failure):\n # minimize cached information for failure\n result.cleanFailure()\n result.frames = []\n result.stack = None\n\n # This code fixes a memory leak by avoiding to keep references to\n # the Request and Response objects on the Media Pipeline cache.\n #\n # What happens when the media_downloaded callback raises an\n # exception, for example a FileException('download-error') when\n # the Response status code is not 200 OK, is that the original\n # StopIteration exception (which in turn contains the failed\n # Response and by extension, the original Request) gets encapsulated\n # within the FileException context.\n #\n # Originally, Scrapy was using twisted.internet.defer.returnValue\n # inside functions decorated with twisted.internet.defer.inlineCallbacks,\n # encapsulating the returned Response in a _DefGen_Return exception\n # instead of a StopIteration.\n #\n # To avoid keeping references to the Response and therefore Request\n # objects on the Media Pipeline cache, we should wipe the context of\n # the encapsulated exception when it is a StopIteration instance\n #\n # This problem does not occur in Python 2.7 since we don't have\n # Exception Chaining (https://www.python.org/dev/peps/pep-3134/).\n context = getattr(result.value, \"__context__\", None)\n if isinstance(context, StopIteration):\n setattr(result.value, \"__context__\", None)\n\n info.downloading.remove(fp)\n info.downloaded[fp] = result # cache result\n for wad in info.waiting.pop(fp):\n defer_result(result).chainDeferred(wad)\n\n # Overridable Interface\n def media_to_download(self, request, info, *, item=None):\n \"\"\"Check request before starting download\"\"\"\n pass\n\n def get_media_requests(self, item, info):\n \"\"\"Returns the media requests to download\"\"\"\n pass\n\n def media_downloaded(self, response, request, info, *, item=None):\n \"\"\"Handler for success downloads\"\"\"\n return response\n\n def media_failed(self, failure, request, info):\n \"\"\"Handler for failed downloads\"\"\"\n return failure\n\n def item_completed(self, results, item, info):\n \"\"\"Called per item when all media requests has been processed\"\"\"\n if self.LOG_FAILED_RESULTS:\n for ok, value in results:\n if not ok:\n logger.error(\n \"%(class)s found errors processing %(item)s\",\n {\"class\": self.__class__.__name__, \"item\": item},\n exc_info=failure_to_exc_info(value),\n extra={\"spider\": info.spider},\n )\n return item\n\n def file_path(self, request, response=None, info=None, *, item=None):\n \"\"\"Returns the path where downloaded media should be stored\"\"\"\n pass\n", "path": "scrapy/pipelines/media.py"}], "after_files": [{"content": "import functools\nimport logging\nfrom collections import defaultdict\n\nfrom twisted.internet.defer import Deferred, DeferredList\nfrom twisted.python.failure import Failure\n\nfrom scrapy.http.request import NO_CALLBACK\nfrom scrapy.settings import Settings\nfrom scrapy.utils.datatypes import SequenceExclude\nfrom scrapy.utils.defer import defer_result, mustbe_deferred\nfrom scrapy.utils.log import failure_to_exc_info\nfrom scrapy.utils.misc import arg_to_iter\n\nlogger = logging.getLogger(__name__)\n\n\ndef _DUMMY_CALLBACK(response):\n return response\n\n\nclass MediaPipeline:\n LOG_FAILED_RESULTS = True\n\n class SpiderInfo:\n def __init__(self, spider):\n self.spider = spider\n self.downloading = set()\n self.downloaded = {}\n self.waiting = defaultdict(list)\n\n def __init__(self, download_func=None, settings=None):\n self.download_func = download_func\n self._expects_item = {}\n\n if isinstance(settings, dict) or settings is None:\n settings = Settings(settings)\n resolve = functools.partial(\n self._key_for_pipe, base_class_name=\"MediaPipeline\", settings=settings\n )\n self.allow_redirects = settings.getbool(resolve(\"MEDIA_ALLOW_REDIRECTS\"), False)\n self._handle_statuses(self.allow_redirects)\n\n def _handle_statuses(self, allow_redirects):\n self.handle_httpstatus_list = None\n if allow_redirects:\n self.handle_httpstatus_list = SequenceExclude(range(300, 400))\n\n def _key_for_pipe(self, key, base_class_name=None, settings=None):\n \"\"\"\n >>> MediaPipeline()._key_for_pipe(\"IMAGES\")\n 'IMAGES'\n >>> class MyPipe(MediaPipeline):\n ... pass\n >>> MyPipe()._key_for_pipe(\"IMAGES\", base_class_name=\"MediaPipeline\")\n 'MYPIPE_IMAGES'\n \"\"\"\n class_name = self.__class__.__name__\n formatted_key = f\"{class_name.upper()}_{key}\"\n if (\n not base_class_name\n or class_name == base_class_name\n or settings\n and not settings.get(formatted_key)\n ):\n return key\n return formatted_key\n\n @classmethod\n def from_crawler(cls, crawler):\n try:\n pipe = cls.from_settings(crawler.settings)\n except AttributeError:\n pipe = cls()\n pipe.crawler = crawler\n pipe._fingerprinter = crawler.request_fingerprinter\n return pipe\n\n def open_spider(self, spider):\n self.spiderinfo = self.SpiderInfo(spider)\n\n def process_item(self, item, spider):\n info = self.spiderinfo\n requests = arg_to_iter(self.get_media_requests(item, info))\n dlist = [self._process_request(r, info, item) for r in requests]\n dfd = DeferredList(dlist, consumeErrors=True)\n return dfd.addCallback(self.item_completed, item, info)\n\n def _process_request(self, request, info, item):\n fp = self._fingerprinter.fingerprint(request)\n if not request.callback or request.callback is NO_CALLBACK:\n cb = _DUMMY_CALLBACK\n else:\n cb = request.callback\n eb = request.errback\n request.callback = NO_CALLBACK\n request.errback = None\n\n # Return cached result if request was already seen\n if fp in info.downloaded:\n return defer_result(info.downloaded[fp]).addCallbacks(cb, eb)\n\n # Otherwise, wait for result\n wad = Deferred().addCallbacks(cb, eb)\n info.waiting[fp].append(wad)\n\n # Check if request is downloading right now to avoid doing it twice\n if fp in info.downloading:\n return wad\n\n # Download request checking media_to_download hook output first\n info.downloading.add(fp)\n dfd = mustbe_deferred(self.media_to_download, request, info, item=item)\n dfd.addCallback(self._check_media_to_download, request, info, item=item)\n dfd.addErrback(self._log_exception)\n dfd.addBoth(self._cache_result_and_execute_waiters, fp, info)\n return dfd.addBoth(lambda _: wad) # it must return wad at last\n\n def _log_exception(self, result):\n logger.exception(result)\n return result\n\n def _modify_media_request(self, request):\n if self.handle_httpstatus_list:\n request.meta[\"handle_httpstatus_list\"] = self.handle_httpstatus_list\n else:\n request.meta[\"handle_httpstatus_all\"] = True\n\n def _check_media_to_download(self, result, request, info, item):\n if result is not None:\n return result\n if self.download_func:\n # this ugly code was left only to support tests. TODO: remove\n dfd = mustbe_deferred(self.download_func, request, info.spider)\n dfd.addCallbacks(\n callback=self.media_downloaded,\n callbackArgs=(request, info),\n callbackKeywords={\"item\": item},\n errback=self.media_failed,\n errbackArgs=(request, info),\n )\n else:\n self._modify_media_request(request)\n dfd = self.crawler.engine.download(request)\n dfd.addCallbacks(\n callback=self.media_downloaded,\n callbackArgs=(request, info),\n callbackKeywords={\"item\": item},\n errback=self.media_failed,\n errbackArgs=(request, info),\n )\n return dfd\n\n def _cache_result_and_execute_waiters(self, result, fp, info):\n if isinstance(result, Failure):\n # minimize cached information for failure\n result.cleanFailure()\n result.frames = []\n result.stack = None\n\n # This code fixes a memory leak by avoiding to keep references to\n # the Request and Response objects on the Media Pipeline cache.\n #\n # What happens when the media_downloaded callback raises an\n # exception, for example a FileException('download-error') when\n # the Response status code is not 200 OK, is that the original\n # StopIteration exception (which in turn contains the failed\n # Response and by extension, the original Request) gets encapsulated\n # within the FileException context.\n #\n # Originally, Scrapy was using twisted.internet.defer.returnValue\n # inside functions decorated with twisted.internet.defer.inlineCallbacks,\n # encapsulating the returned Response in a _DefGen_Return exception\n # instead of a StopIteration.\n #\n # To avoid keeping references to the Response and therefore Request\n # objects on the Media Pipeline cache, we should wipe the context of\n # the encapsulated exception when it is a StopIteration instance\n #\n # This problem does not occur in Python 2.7 since we don't have\n # Exception Chaining (https://www.python.org/dev/peps/pep-3134/).\n context = getattr(result.value, \"__context__\", None)\n if isinstance(context, StopIteration):\n setattr(result.value, \"__context__\", None)\n\n info.downloading.remove(fp)\n info.downloaded[fp] = result # cache result\n for wad in info.waiting.pop(fp):\n defer_result(result).chainDeferred(wad)\n\n # Overridable Interface\n def media_to_download(self, request, info, *, item=None):\n \"\"\"Check request before starting download\"\"\"\n pass\n\n def get_media_requests(self, item, info):\n \"\"\"Returns the media requests to download\"\"\"\n pass\n\n def media_downloaded(self, response, request, info, *, item=None):\n \"\"\"Handler for success downloads\"\"\"\n return response\n\n def media_failed(self, failure, request, info):\n \"\"\"Handler for failed downloads\"\"\"\n return failure\n\n def item_completed(self, results, item, info):\n \"\"\"Called per item when all media requests has been processed\"\"\"\n if self.LOG_FAILED_RESULTS:\n for ok, value in results:\n if not ok:\n logger.error(\n \"%(class)s found errors processing %(item)s\",\n {\"class\": self.__class__.__name__, \"item\": item},\n exc_info=failure_to_exc_info(value),\n extra={\"spider\": info.spider},\n )\n return item\n\n def file_path(self, request, response=None, info=None, *, item=None):\n \"\"\"Returns the path where downloaded media should be stored\"\"\"\n pass\n", "path": "scrapy/pipelines/media.py"}]} | 2,738 | 251 |
gh_patches_debug_29551 | rasdani/github-patches | git_diff | doccano__doccano-1770 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Wrong progress in collaborative annotation ('Share annotations across all users')
How to reproduce the behaviour
---------
Progress is shown as individual progress instead of total progress when 'Share annotations across all users' is ticked in project setting.
Your Environment
---------
<!-- Include details of your environment.-->
* Operating System: wsl2+ubuntu20.04
* Python Version Used: 3.8
* When you install doccano: 20220403
* How did you install doccano (Heroku button etc): source
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `backend/metrics/views.py`
Content:
```
1 import abc
2
3 from rest_framework import status
4 from rest_framework.permissions import IsAuthenticated
5 from rest_framework.response import Response
6 from rest_framework.views import APIView
7
8 from examples.models import Example, ExampleState
9 from label_types.models import CategoryType, LabelType, RelationType, SpanType
10 from labels.models import Category, Label, Relation, Span
11 from projects.models import Member
12 from projects.permissions import IsProjectAdmin, IsProjectStaffAndReadOnly
13
14
15 class ProgressAPI(APIView):
16 permission_classes = [IsAuthenticated & (IsProjectAdmin | IsProjectStaffAndReadOnly)]
17
18 def get(self, request, *args, **kwargs):
19 examples = Example.objects.filter(project=self.kwargs["project_id"]).values("id")
20 total = examples.count()
21 complete = ExampleState.objects.count_done(examples, user=self.request.user)
22 data = {"total": total, "remaining": total - complete, "complete": complete}
23 return Response(data=data, status=status.HTTP_200_OK)
24
25
26 class MemberProgressAPI(APIView):
27 permission_classes = [IsAuthenticated & (IsProjectAdmin | IsProjectStaffAndReadOnly)]
28
29 def get(self, request, *args, **kwargs):
30 examples = Example.objects.filter(project=self.kwargs["project_id"]).values("id")
31 members = Member.objects.filter(project=self.kwargs["project_id"])
32 data = ExampleState.objects.measure_member_progress(examples, members)
33 return Response(data=data, status=status.HTTP_200_OK)
34
35
36 class LabelDistribution(abc.ABC, APIView):
37 permission_classes = [IsAuthenticated & (IsProjectAdmin | IsProjectStaffAndReadOnly)]
38 model = Label
39 label_type = LabelType
40
41 def get(self, request, *args, **kwargs):
42 labels = self.label_type.objects.filter(project=self.kwargs["project_id"])
43 examples = Example.objects.filter(project=self.kwargs["project_id"]).values("id")
44 members = Member.objects.filter(project=self.kwargs["project_id"])
45 data = self.model.objects.calc_label_distribution(examples, members, labels)
46 return Response(data=data, status=status.HTTP_200_OK)
47
48
49 class CategoryTypeDistribution(LabelDistribution):
50 model = Category
51 label_type = CategoryType
52
53
54 class SpanTypeDistribution(LabelDistribution):
55 model = Span
56 label_type = SpanType
57
58
59 class RelationTypeDistribution(LabelDistribution):
60 model = Relation
61 label_type = RelationType
62
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/backend/metrics/views.py b/backend/metrics/views.py
--- a/backend/metrics/views.py
+++ b/backend/metrics/views.py
@@ -1,5 +1,6 @@
import abc
+from django.shortcuts import get_object_or_404
from rest_framework import status
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
@@ -8,7 +9,7 @@
from examples.models import Example, ExampleState
from label_types.models import CategoryType, LabelType, RelationType, SpanType
from labels.models import Category, Label, Relation, Span
-from projects.models import Member
+from projects.models import Member, Project
from projects.permissions import IsProjectAdmin, IsProjectStaffAndReadOnly
@@ -18,7 +19,11 @@
def get(self, request, *args, **kwargs):
examples = Example.objects.filter(project=self.kwargs["project_id"]).values("id")
total = examples.count()
- complete = ExampleState.objects.count_done(examples, user=self.request.user)
+ project = get_object_or_404(Project, pk=self.kwargs["project_id"])
+ if project.collaborative_annotation:
+ complete = ExampleState.objects.count_done(examples)
+ else:
+ complete = ExampleState.objects.count_done(examples, user=self.request.user)
data = {"total": total, "remaining": total - complete, "complete": complete}
return Response(data=data, status=status.HTTP_200_OK)
| {"golden_diff": "diff --git a/backend/metrics/views.py b/backend/metrics/views.py\n--- a/backend/metrics/views.py\n+++ b/backend/metrics/views.py\n@@ -1,5 +1,6 @@\n import abc\n \n+from django.shortcuts import get_object_or_404\n from rest_framework import status\n from rest_framework.permissions import IsAuthenticated\n from rest_framework.response import Response\n@@ -8,7 +9,7 @@\n from examples.models import Example, ExampleState\n from label_types.models import CategoryType, LabelType, RelationType, SpanType\n from labels.models import Category, Label, Relation, Span\n-from projects.models import Member\n+from projects.models import Member, Project\n from projects.permissions import IsProjectAdmin, IsProjectStaffAndReadOnly\n \n \n@@ -18,7 +19,11 @@\n def get(self, request, *args, **kwargs):\n examples = Example.objects.filter(project=self.kwargs[\"project_id\"]).values(\"id\")\n total = examples.count()\n- complete = ExampleState.objects.count_done(examples, user=self.request.user)\n+ project = get_object_or_404(Project, pk=self.kwargs[\"project_id\"])\n+ if project.collaborative_annotation:\n+ complete = ExampleState.objects.count_done(examples)\n+ else:\n+ complete = ExampleState.objects.count_done(examples, user=self.request.user)\n data = {\"total\": total, \"remaining\": total - complete, \"complete\": complete}\n return Response(data=data, status=status.HTTP_200_OK)\n", "issue": "Wrong progress in collaborative annotation ('Share annotations across all users')\nHow to reproduce the behaviour\r\n---------\r\nProgress is shown as individual progress instead of total progress when 'Share annotations across all users' is ticked in project setting.\r\n\r\nYour Environment\r\n---------\r\n<!-- Include details of your environment.-->\r\n* Operating System: wsl2+ubuntu20.04\r\n* Python Version Used: 3.8\r\n* When you install doccano: 20220403\r\n* How did you install doccano (Heroku button etc): source\r\n\n", "before_files": [{"content": "import abc\n\nfrom rest_framework import status\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\nfrom examples.models import Example, ExampleState\nfrom label_types.models import CategoryType, LabelType, RelationType, SpanType\nfrom labels.models import Category, Label, Relation, Span\nfrom projects.models import Member\nfrom projects.permissions import IsProjectAdmin, IsProjectStaffAndReadOnly\n\n\nclass ProgressAPI(APIView):\n permission_classes = [IsAuthenticated & (IsProjectAdmin | IsProjectStaffAndReadOnly)]\n\n def get(self, request, *args, **kwargs):\n examples = Example.objects.filter(project=self.kwargs[\"project_id\"]).values(\"id\")\n total = examples.count()\n complete = ExampleState.objects.count_done(examples, user=self.request.user)\n data = {\"total\": total, \"remaining\": total - complete, \"complete\": complete}\n return Response(data=data, status=status.HTTP_200_OK)\n\n\nclass MemberProgressAPI(APIView):\n permission_classes = [IsAuthenticated & (IsProjectAdmin | IsProjectStaffAndReadOnly)]\n\n def get(self, request, *args, **kwargs):\n examples = Example.objects.filter(project=self.kwargs[\"project_id\"]).values(\"id\")\n members = Member.objects.filter(project=self.kwargs[\"project_id\"])\n data = ExampleState.objects.measure_member_progress(examples, members)\n return Response(data=data, status=status.HTTP_200_OK)\n\n\nclass LabelDistribution(abc.ABC, APIView):\n permission_classes = [IsAuthenticated & (IsProjectAdmin | IsProjectStaffAndReadOnly)]\n model = Label\n label_type = LabelType\n\n def get(self, request, *args, **kwargs):\n labels = self.label_type.objects.filter(project=self.kwargs[\"project_id\"])\n examples = Example.objects.filter(project=self.kwargs[\"project_id\"]).values(\"id\")\n members = Member.objects.filter(project=self.kwargs[\"project_id\"])\n data = self.model.objects.calc_label_distribution(examples, members, labels)\n return Response(data=data, status=status.HTTP_200_OK)\n\n\nclass CategoryTypeDistribution(LabelDistribution):\n model = Category\n label_type = CategoryType\n\n\nclass SpanTypeDistribution(LabelDistribution):\n model = Span\n label_type = SpanType\n\n\nclass RelationTypeDistribution(LabelDistribution):\n model = Relation\n label_type = RelationType\n", "path": "backend/metrics/views.py"}], "after_files": [{"content": "import abc\n\nfrom django.shortcuts import get_object_or_404\nfrom rest_framework import status\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\nfrom examples.models import Example, ExampleState\nfrom label_types.models import CategoryType, LabelType, RelationType, SpanType\nfrom labels.models import Category, Label, Relation, Span\nfrom projects.models import Member, Project\nfrom projects.permissions import IsProjectAdmin, IsProjectStaffAndReadOnly\n\n\nclass ProgressAPI(APIView):\n permission_classes = [IsAuthenticated & (IsProjectAdmin | IsProjectStaffAndReadOnly)]\n\n def get(self, request, *args, **kwargs):\n examples = Example.objects.filter(project=self.kwargs[\"project_id\"]).values(\"id\")\n total = examples.count()\n project = get_object_or_404(Project, pk=self.kwargs[\"project_id\"])\n if project.collaborative_annotation:\n complete = ExampleState.objects.count_done(examples)\n else:\n complete = ExampleState.objects.count_done(examples, user=self.request.user)\n data = {\"total\": total, \"remaining\": total - complete, \"complete\": complete}\n return Response(data=data, status=status.HTTP_200_OK)\n\n\nclass MemberProgressAPI(APIView):\n permission_classes = [IsAuthenticated & (IsProjectAdmin | IsProjectStaffAndReadOnly)]\n\n def get(self, request, *args, **kwargs):\n examples = Example.objects.filter(project=self.kwargs[\"project_id\"]).values(\"id\")\n members = Member.objects.filter(project=self.kwargs[\"project_id\"])\n data = ExampleState.objects.measure_member_progress(examples, members)\n return Response(data=data, status=status.HTTP_200_OK)\n\n\nclass LabelDistribution(abc.ABC, APIView):\n permission_classes = [IsAuthenticated & (IsProjectAdmin | IsProjectStaffAndReadOnly)]\n model = Label\n label_type = LabelType\n\n def get(self, request, *args, **kwargs):\n labels = self.label_type.objects.filter(project=self.kwargs[\"project_id\"])\n examples = Example.objects.filter(project=self.kwargs[\"project_id\"]).values(\"id\")\n members = Member.objects.filter(project=self.kwargs[\"project_id\"])\n data = self.model.objects.calc_label_distribution(examples, members, labels)\n return Response(data=data, status=status.HTTP_200_OK)\n\n\nclass CategoryTypeDistribution(LabelDistribution):\n model = Category\n label_type = CategoryType\n\n\nclass SpanTypeDistribution(LabelDistribution):\n model = Span\n label_type = SpanType\n\n\nclass RelationTypeDistribution(LabelDistribution):\n model = Relation\n label_type = RelationType\n", "path": "backend/metrics/views.py"}]} | 999 | 320 |
gh_patches_debug_19988 | rasdani/github-patches | git_diff | CiviWiki__OpenCiviWiki-1186 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] Fix Bugs when profile is being viewed
### Description
The serialization is not correct when the registered user tries to view their own profile
This comes from the serialization part of our code-base which can be viewed [here](https://github.com/CiviWiki/OpenCiviWiki/blob/develop/project/threads/views.py#L181-L245)
```py
thread_wiki_data = {
"thread_id": thread_id,
"title": Thread_filter.title,
"summary": Thread_filter.summary,
"image": Thread_filter.image_url,
"author": {
"username": Thread_filter.author.user.username,
"profile_image": Thread_filter.author.profile_image_url,
"first_name": Thread_filter.author.first_name,
"last_name": Thread_filter.author.last_name,
},
"contributors": [
Profile.objects.chip_summarize(a)
for a in Profile.objects.filter(
pk__in=civis.distinct("author").values_list("author", flat=True)
)
],
"category": {
"id": Thread_filter.category.id,
"name": Thread_filter.category.name,
},
"categories": [{"id": c.id, "name": c.name} for c in Category.objects.all()],
"created": Thread_filter.created_date_str,
"num_civis": Thread_filter.num_civis,
"num_views": Thread_filter.num_views,
"user_votes": [
{
"civi_id": act.civi.id,
"activity_type": act.activity_type,
"c_type": act.civi.c_type,
}
for act in Activity.objects.filter(
thread=Thread_filter.id, account=req_acct.id
)
],
}
```
### What should have happened?
The serialization should return user appropriately
### What browser(s) are you seeing the problem on?
Chrome, Firefox, Microsoft Edge, Safari
### Further details

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `project/accounts/views.py`
Content:
```
1 """
2 Class based views.
3
4 This module will include views for the accounts app.
5 """
6
7 from core.custom_decorators import full_profile, login_required
8 from django.conf import settings
9 from django.contrib.auth import get_user_model, login
10 from django.contrib.auth import views as auth_views
11 from django.contrib.auth.mixins import LoginRequiredMixin
12 from django.contrib.sites.shortcuts import get_current_site
13 from django.http import HttpResponseRedirect
14 from django.template.response import TemplateResponse
15 from django.urls import reverse_lazy
16 from django.utils.encoding import force_str
17 from django.utils.http import urlsafe_base64_decode
18 from django.views import View
19 from django.views.generic.edit import FormView, UpdateView
20
21 from accounts.authentication import account_activation_token, send_activation_email
22 from accounts.forms import ProfileEditForm, UpdateProfileImage, UserRegistrationForm
23 from accounts.models import Profile
24
25
26 class RegisterView(FormView):
27 """
28 A form view that handles user registration.
29 """
30
31 template_name = "accounts/register/register.html"
32 form_class = UserRegistrationForm
33 success_url = "/"
34
35 def _create_user(self, form):
36 username = form.cleaned_data["username"]
37 password = form.cleaned_data["password"]
38 email = form.cleaned_data["email"]
39 user = get_user_model().objects.create_user(username, email, password)
40 return user
41
42 def _send_email(self, user):
43 domain = get_current_site(self.request).domain
44 send_activation_email(user, domain)
45
46 def _login(self, user):
47 login(self.request, user)
48
49 def form_valid(self, form):
50 user = self._create_user(form)
51
52 self._send_email(user)
53 self._login(user)
54
55 return super(RegisterView, self).form_valid(form)
56
57
58 class PasswordResetView(auth_views.PasswordResetView):
59 template_name = "accounts/users/password_reset.html"
60 email_template_name = "accounts/users/password_reset_email.html"
61 subject_template_name = "accounts/users/password_reset_subject.txt"
62 from_email = settings.EMAIL_HOST_USER
63 success_url = reverse_lazy("accounts_password_reset_done")
64
65
66 class PasswordResetDoneView(auth_views.PasswordResetDoneView):
67 template_name = "accounts/users/password_reset_done.html"
68
69
70 class PasswordResetConfirmView(auth_views.PasswordResetConfirmView):
71 template_name = "accounts/users/password_reset_confirm.html"
72 success_url = reverse_lazy("accounts_password_reset_complete")
73
74
75 class PasswordResetCompleteView(auth_views.PasswordResetCompleteView):
76 template_name = "accounts/users/password_reset_complete.html"
77
78
79 class SettingsView(LoginRequiredMixin, UpdateView):
80 """A form view to edit Profile"""
81
82 login_url = "accounts_login"
83 form_class = ProfileEditForm
84 success_url = reverse_lazy("accounts_settings")
85 template_name = "accounts/update_settings.html"
86
87 def get_object(self, queryset=None):
88 return Profile.objects.get(user=self.request.user)
89
90 def get_initial(self):
91 profile = Profile.objects.get(user=self.request.user)
92 self.initial.update(
93 {
94 "username": profile.user.username,
95 "email": profile.user.email,
96 "first_name": profile.first_name or None,
97 "last_name": profile.last_name or None,
98 "about_me": profile.about_me or None,
99 }
100 )
101 return super(SettingsView, self).get_initial()
102
103
104 class ProfileActivationView(View):
105 """
106 This shows different views to the user when they are verifying
107 their account based on whether they are already verified or not.
108 """
109
110 def get(self, request, uidb64, token):
111
112 User = get_user_model()
113 try:
114 uid = force_str(urlsafe_base64_decode(uidb64))
115 user = User.objects.get(pk=uid)
116
117 except (TypeError, ValueError, OverflowError, User.DoesNotExist):
118 user = None
119
120 if user is not None and account_activation_token.check_token(user, token):
121 profile = Profile.objects.get(user=user)
122 if profile.is_verified:
123 redirect_link = {"href": "/", "label": "Back to Main"}
124 template_var = {
125 "title": "Email Already Verified",
126 "content": "You have already verified your email",
127 "link": redirect_link,
128 }
129 else:
130 profile.is_verified = True
131 profile.save()
132
133 redirect_link = {"href": "/", "label": "Back to Main"}
134 template_var = {
135 "title": "Email Verification Successful",
136 "content": "Thank you for verifying your email with CiviWiki",
137 "link": redirect_link,
138 }
139 else:
140 # invalid link
141 redirect_link = {"href": "/", "label": "Back to Main"}
142 template_var = {
143 "title": "Email Verification Error",
144 "content": "Email could not be verified",
145 "link": redirect_link,
146 }
147
148 return TemplateResponse(request, "general_message.html", template_var)
149
150
151 class ProfileSetupView(LoginRequiredMixin, View):
152 """A view to make the user profile full_profile"""
153
154 login_url = "accounts_login"
155
156 def get(self, request):
157 profile = Profile.objects.get(user=request.user)
158 if profile.full_profile:
159 return HttpResponseRedirect("/")
160 # start temp rep rendering TODO: REMOVE THIS
161 else:
162 data = {
163 "username": request.user.username,
164 "email": request.user.email,
165 }
166 return TemplateResponse(request, "accounts/user-setup.html", data)
167
168
169 @login_required
170 @full_profile
171 def user_profile(request, username=None):
172 User = get_user_model()
173 if request.method == "GET":
174 if not username:
175 return HttpResponseRedirect(f"/profile/{request.user}")
176 else:
177 is_owner = username == request.user.username
178 try:
179 user = User.objects.get(username=username)
180 profile = user.profile_set.first()
181 except User.DoesNotExist:
182 return HttpResponseRedirect("/404")
183
184 form = ProfileEditForm(
185 initial={
186 "username": user.username,
187 "email": user.email,
188 "first_name": profile.first_name or None,
189 "last_name": profile.last_name or None,
190 "about_me": profile.about_me or None,
191 },
192 readonly=True,
193 )
194 data = {
195 "username": user,
196 "profile_image_form": UpdateProfileImage,
197 "form": form if is_owner else None,
198 "readonly": True,
199 }
200 return TemplateResponse(request, "account.html", data)
201
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/project/accounts/views.py b/project/accounts/views.py
--- a/project/accounts/views.py
+++ b/project/accounts/views.py
@@ -177,7 +177,6 @@
is_owner = username == request.user.username
try:
user = User.objects.get(username=username)
- profile = user.profile_set.first()
except User.DoesNotExist:
return HttpResponseRedirect("/404")
@@ -185,9 +184,9 @@
initial={
"username": user.username,
"email": user.email,
- "first_name": profile.first_name or None,
- "last_name": profile.last_name or None,
- "about_me": profile.about_me or None,
+ "first_name": user.profile.first_name or None,
+ "last_name": user.profile.last_name or None,
+ "about_me": user.profile.about_me or None,
},
readonly=True,
)
| {"golden_diff": "diff --git a/project/accounts/views.py b/project/accounts/views.py\n--- a/project/accounts/views.py\n+++ b/project/accounts/views.py\n@@ -177,7 +177,6 @@\n is_owner = username == request.user.username\n try:\n user = User.objects.get(username=username)\n- profile = user.profile_set.first()\n except User.DoesNotExist:\n return HttpResponseRedirect(\"/404\")\n \n@@ -185,9 +184,9 @@\n initial={\n \"username\": user.username,\n \"email\": user.email,\n- \"first_name\": profile.first_name or None,\n- \"last_name\": profile.last_name or None,\n- \"about_me\": profile.about_me or None,\n+ \"first_name\": user.profile.first_name or None,\n+ \"last_name\": user.profile.last_name or None,\n+ \"about_me\": user.profile.about_me or None,\n },\n readonly=True,\n )\n", "issue": "[BUG] Fix Bugs when profile is being viewed\n### Description\n\nThe serialization is not correct when the registered user tries to view their own profile\r\n\r\n\r\n\r\nThis comes from the serialization part of our code-base which can be viewed [here](https://github.com/CiviWiki/OpenCiviWiki/blob/develop/project/threads/views.py#L181-L245)\r\n\r\n```py\r\nthread_wiki_data = {\r\n \"thread_id\": thread_id,\r\n \"title\": Thread_filter.title,\r\n \"summary\": Thread_filter.summary,\r\n \"image\": Thread_filter.image_url,\r\n \"author\": {\r\n \"username\": Thread_filter.author.user.username,\r\n \"profile_image\": Thread_filter.author.profile_image_url,\r\n \"first_name\": Thread_filter.author.first_name,\r\n \"last_name\": Thread_filter.author.last_name,\r\n },\r\n \"contributors\": [\r\n Profile.objects.chip_summarize(a)\r\n for a in Profile.objects.filter(\r\n pk__in=civis.distinct(\"author\").values_list(\"author\", flat=True)\r\n )\r\n ],\r\n \"category\": {\r\n \"id\": Thread_filter.category.id,\r\n \"name\": Thread_filter.category.name,\r\n },\r\n \"categories\": [{\"id\": c.id, \"name\": c.name} for c in Category.objects.all()],\r\n \"created\": Thread_filter.created_date_str,\r\n \"num_civis\": Thread_filter.num_civis,\r\n \"num_views\": Thread_filter.num_views,\r\n \"user_votes\": [\r\n {\r\n \"civi_id\": act.civi.id,\r\n \"activity_type\": act.activity_type,\r\n \"c_type\": act.civi.c_type,\r\n }\r\n for act in Activity.objects.filter(\r\n thread=Thread_filter.id, account=req_acct.id\r\n )\r\n ],\r\n }\r\n```\n\n### What should have happened?\n\nThe serialization should return user appropriately\n\n### What browser(s) are you seeing the problem on?\n\nChrome, Firefox, Microsoft Edge, Safari\n\n### Further details\n\n\n", "before_files": [{"content": "\"\"\"\nClass based views.\n\nThis module will include views for the accounts app.\n\"\"\"\n\nfrom core.custom_decorators import full_profile, login_required\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model, login\nfrom django.contrib.auth import views as auth_views\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.contrib.sites.shortcuts import get_current_site\nfrom django.http import HttpResponseRedirect\nfrom django.template.response import TemplateResponse\nfrom django.urls import reverse_lazy\nfrom django.utils.encoding import force_str\nfrom django.utils.http import urlsafe_base64_decode\nfrom django.views import View\nfrom django.views.generic.edit import FormView, UpdateView\n\nfrom accounts.authentication import account_activation_token, send_activation_email\nfrom accounts.forms import ProfileEditForm, UpdateProfileImage, UserRegistrationForm\nfrom accounts.models import Profile\n\n\nclass RegisterView(FormView):\n \"\"\"\n A form view that handles user registration.\n \"\"\"\n\n template_name = \"accounts/register/register.html\"\n form_class = UserRegistrationForm\n success_url = \"/\"\n\n def _create_user(self, form):\n username = form.cleaned_data[\"username\"]\n password = form.cleaned_data[\"password\"]\n email = form.cleaned_data[\"email\"]\n user = get_user_model().objects.create_user(username, email, password)\n return user\n\n def _send_email(self, user):\n domain = get_current_site(self.request).domain\n send_activation_email(user, domain)\n\n def _login(self, user):\n login(self.request, user)\n\n def form_valid(self, form):\n user = self._create_user(form)\n\n self._send_email(user)\n self._login(user)\n\n return super(RegisterView, self).form_valid(form)\n\n\nclass PasswordResetView(auth_views.PasswordResetView):\n template_name = \"accounts/users/password_reset.html\"\n email_template_name = \"accounts/users/password_reset_email.html\"\n subject_template_name = \"accounts/users/password_reset_subject.txt\"\n from_email = settings.EMAIL_HOST_USER\n success_url = reverse_lazy(\"accounts_password_reset_done\")\n\n\nclass PasswordResetDoneView(auth_views.PasswordResetDoneView):\n template_name = \"accounts/users/password_reset_done.html\"\n\n\nclass PasswordResetConfirmView(auth_views.PasswordResetConfirmView):\n template_name = \"accounts/users/password_reset_confirm.html\"\n success_url = reverse_lazy(\"accounts_password_reset_complete\")\n\n\nclass PasswordResetCompleteView(auth_views.PasswordResetCompleteView):\n template_name = \"accounts/users/password_reset_complete.html\"\n\n\nclass SettingsView(LoginRequiredMixin, UpdateView):\n \"\"\"A form view to edit Profile\"\"\"\n\n login_url = \"accounts_login\"\n form_class = ProfileEditForm\n success_url = reverse_lazy(\"accounts_settings\")\n template_name = \"accounts/update_settings.html\"\n\n def get_object(self, queryset=None):\n return Profile.objects.get(user=self.request.user)\n\n def get_initial(self):\n profile = Profile.objects.get(user=self.request.user)\n self.initial.update(\n {\n \"username\": profile.user.username,\n \"email\": profile.user.email,\n \"first_name\": profile.first_name or None,\n \"last_name\": profile.last_name or None,\n \"about_me\": profile.about_me or None,\n }\n )\n return super(SettingsView, self).get_initial()\n\n\nclass ProfileActivationView(View):\n \"\"\"\n This shows different views to the user when they are verifying\n their account based on whether they are already verified or not.\n \"\"\"\n\n def get(self, request, uidb64, token):\n\n User = get_user_model()\n try:\n uid = force_str(urlsafe_base64_decode(uidb64))\n user = User.objects.get(pk=uid)\n\n except (TypeError, ValueError, OverflowError, User.DoesNotExist):\n user = None\n\n if user is not None and account_activation_token.check_token(user, token):\n profile = Profile.objects.get(user=user)\n if profile.is_verified:\n redirect_link = {\"href\": \"/\", \"label\": \"Back to Main\"}\n template_var = {\n \"title\": \"Email Already Verified\",\n \"content\": \"You have already verified your email\",\n \"link\": redirect_link,\n }\n else:\n profile.is_verified = True\n profile.save()\n\n redirect_link = {\"href\": \"/\", \"label\": \"Back to Main\"}\n template_var = {\n \"title\": \"Email Verification Successful\",\n \"content\": \"Thank you for verifying your email with CiviWiki\",\n \"link\": redirect_link,\n }\n else:\n # invalid link\n redirect_link = {\"href\": \"/\", \"label\": \"Back to Main\"}\n template_var = {\n \"title\": \"Email Verification Error\",\n \"content\": \"Email could not be verified\",\n \"link\": redirect_link,\n }\n\n return TemplateResponse(request, \"general_message.html\", template_var)\n\n\nclass ProfileSetupView(LoginRequiredMixin, View):\n \"\"\"A view to make the user profile full_profile\"\"\"\n\n login_url = \"accounts_login\"\n\n def get(self, request):\n profile = Profile.objects.get(user=request.user)\n if profile.full_profile:\n return HttpResponseRedirect(\"/\")\n # start temp rep rendering TODO: REMOVE THIS\n else:\n data = {\n \"username\": request.user.username,\n \"email\": request.user.email,\n }\n return TemplateResponse(request, \"accounts/user-setup.html\", data)\n\n\n@login_required\n@full_profile\ndef user_profile(request, username=None):\n User = get_user_model()\n if request.method == \"GET\":\n if not username:\n return HttpResponseRedirect(f\"/profile/{request.user}\")\n else:\n is_owner = username == request.user.username\n try:\n user = User.objects.get(username=username)\n profile = user.profile_set.first()\n except User.DoesNotExist:\n return HttpResponseRedirect(\"/404\")\n\n form = ProfileEditForm(\n initial={\n \"username\": user.username,\n \"email\": user.email,\n \"first_name\": profile.first_name or None,\n \"last_name\": profile.last_name or None,\n \"about_me\": profile.about_me or None,\n },\n readonly=True,\n )\n data = {\n \"username\": user,\n \"profile_image_form\": UpdateProfileImage,\n \"form\": form if is_owner else None,\n \"readonly\": True,\n }\n return TemplateResponse(request, \"account.html\", data)\n", "path": "project/accounts/views.py"}], "after_files": [{"content": "\"\"\"\nClass based views.\n\nThis module will include views for the accounts app.\n\"\"\"\n\nfrom core.custom_decorators import full_profile, login_required\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model, login\nfrom django.contrib.auth import views as auth_views\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.contrib.sites.shortcuts import get_current_site\nfrom django.http import HttpResponseRedirect\nfrom django.template.response import TemplateResponse\nfrom django.urls import reverse_lazy\nfrom django.utils.encoding import force_str\nfrom django.utils.http import urlsafe_base64_decode\nfrom django.views import View\nfrom django.views.generic.edit import FormView, UpdateView\n\nfrom accounts.authentication import account_activation_token, send_activation_email\nfrom accounts.forms import ProfileEditForm, UpdateProfileImage, UserRegistrationForm\nfrom accounts.models import Profile\n\n\nclass RegisterView(FormView):\n \"\"\"\n A form view that handles user registration.\n \"\"\"\n\n template_name = \"accounts/register/register.html\"\n form_class = UserRegistrationForm\n success_url = \"/\"\n\n def _create_user(self, form):\n username = form.cleaned_data[\"username\"]\n password = form.cleaned_data[\"password\"]\n email = form.cleaned_data[\"email\"]\n user = get_user_model().objects.create_user(username, email, password)\n return user\n\n def _send_email(self, user):\n domain = get_current_site(self.request).domain\n send_activation_email(user, domain)\n\n def _login(self, user):\n login(self.request, user)\n\n def form_valid(self, form):\n user = self._create_user(form)\n\n self._send_email(user)\n self._login(user)\n\n return super(RegisterView, self).form_valid(form)\n\n\nclass PasswordResetView(auth_views.PasswordResetView):\n template_name = \"accounts/users/password_reset.html\"\n email_template_name = \"accounts/users/password_reset_email.html\"\n subject_template_name = \"accounts/users/password_reset_subject.txt\"\n from_email = settings.EMAIL_HOST_USER\n success_url = reverse_lazy(\"accounts_password_reset_done\")\n\n\nclass PasswordResetDoneView(auth_views.PasswordResetDoneView):\n template_name = \"accounts/users/password_reset_done.html\"\n\n\nclass PasswordResetConfirmView(auth_views.PasswordResetConfirmView):\n template_name = \"accounts/users/password_reset_confirm.html\"\n success_url = reverse_lazy(\"accounts_password_reset_complete\")\n\n\nclass PasswordResetCompleteView(auth_views.PasswordResetCompleteView):\n template_name = \"accounts/users/password_reset_complete.html\"\n\n\nclass SettingsView(LoginRequiredMixin, UpdateView):\n \"\"\"A form view to edit Profile\"\"\"\n\n login_url = \"accounts_login\"\n form_class = ProfileEditForm\n success_url = reverse_lazy(\"accounts_settings\")\n template_name = \"accounts/update_settings.html\"\n\n def get_object(self, queryset=None):\n return Profile.objects.get(user=self.request.user)\n\n def get_initial(self):\n profile = Profile.objects.get(user=self.request.user)\n self.initial.update(\n {\n \"username\": profile.user.username,\n \"email\": profile.user.email,\n \"first_name\": profile.first_name or None,\n \"last_name\": profile.last_name or None,\n \"about_me\": profile.about_me or None,\n }\n )\n return super(SettingsView, self).get_initial()\n\n\nclass ProfileActivationView(View):\n \"\"\"\n This shows different views to the user when they are verifying\n their account based on whether they are already verified or not.\n \"\"\"\n\n def get(self, request, uidb64, token):\n\n User = get_user_model()\n try:\n uid = force_str(urlsafe_base64_decode(uidb64))\n user = User.objects.get(pk=uid)\n\n except (TypeError, ValueError, OverflowError, User.DoesNotExist):\n user = None\n\n if user is not None and account_activation_token.check_token(user, token):\n profile = Profile.objects.get(user=user)\n if profile.is_verified:\n redirect_link = {\"href\": \"/\", \"label\": \"Back to Main\"}\n template_var = {\n \"title\": \"Email Already Verified\",\n \"content\": \"You have already verified your email\",\n \"link\": redirect_link,\n }\n else:\n profile.is_verified = True\n profile.save()\n\n redirect_link = {\"href\": \"/\", \"label\": \"Back to Main\"}\n template_var = {\n \"title\": \"Email Verification Successful\",\n \"content\": \"Thank you for verifying your email with CiviWiki\",\n \"link\": redirect_link,\n }\n else:\n # invalid link\n redirect_link = {\"href\": \"/\", \"label\": \"Back to Main\"}\n template_var = {\n \"title\": \"Email Verification Error\",\n \"content\": \"Email could not be verified\",\n \"link\": redirect_link,\n }\n\n return TemplateResponse(request, \"general_message.html\", template_var)\n\n\nclass ProfileSetupView(LoginRequiredMixin, View):\n \"\"\"A view to make the user profile full_profile\"\"\"\n\n login_url = \"accounts_login\"\n\n def get(self, request):\n profile = Profile.objects.get(user=request.user)\n if profile.full_profile:\n return HttpResponseRedirect(\"/\")\n # start temp rep rendering TODO: REMOVE THIS\n else:\n data = {\n \"username\": request.user.username,\n \"email\": request.user.email,\n }\n return TemplateResponse(request, \"accounts/user-setup.html\", data)\n\n\n@login_required\n@full_profile\ndef user_profile(request, username=None):\n User = get_user_model()\n if request.method == \"GET\":\n if not username:\n return HttpResponseRedirect(f\"/profile/{request.user}\")\n else:\n is_owner = username == request.user.username\n try:\n user = User.objects.get(username=username)\n except User.DoesNotExist:\n return HttpResponseRedirect(\"/404\")\n\n form = ProfileEditForm(\n initial={\n \"username\": user.username,\n \"email\": user.email,\n \"first_name\": user.profile.first_name or None,\n \"last_name\": user.profile.last_name or None,\n \"about_me\": user.profile.about_me or None,\n },\n readonly=True,\n )\n data = {\n \"username\": user,\n \"profile_image_form\": UpdateProfileImage,\n \"form\": form if is_owner else None,\n \"readonly\": True,\n }\n return TemplateResponse(request, \"account.html\", data)\n", "path": "project/accounts/views.py"}]} | 2,567 | 201 |
gh_patches_debug_807 | rasdani/github-patches | git_diff | bokeh__bokeh-10106 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] `cd sphinx; make serve` doesn't work
#### ALL software version info (bokeh, python, notebook, OS, browser, any other relevant packages)
Bokeh 2.0.2-76-ga417746c9
#### Description of expected behavior and the observed behavior
The page at https://docs.bokeh.org/en/latest/docs/dev_guide/documentation.html mentions that it's possible to run `make serve` to serve the documentation locally. But running it results in:
```
Exception in thread Thread-2:
Traceback (most recent call last):
File "/home/p-himik/soft/miniconda3/envs/bokeh-dev/lib/python3.7/threading.py", line 917, in _bootstrap_inner
self.run()
File "/home/p-himik/soft/miniconda3/envs/bokeh-dev/lib/python3.7/threading.py", line 865, in run
self._target(*self._args, **self._kwargs)
File "docserver.py", line 43, in open_browser
webbrowser.open("http://localhost:%d/en/latest/index.html" % PORT, new="tab")
File "/home/p-himik/soft/miniconda3/envs/bokeh-dev/lib/python3.7/webbrowser.py", line 78, in open
if browser.open(url, new, autoraise):
File "/home/p-himik/soft/miniconda3/envs/bokeh-dev/lib/python3.7/webbrowser.py", line 251, in open
"expected 0, 1, or 2, got %s" % new)
webbrowser.Error: Bad 'new' parameter to open(); expected 0, 1, or 2, got tab
```
Not sure where `"tab"` has come from, but it has been there forever.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sphinx/docserver.py`
Content:
```
1 import os
2 import sys
3 import threading
4 import time
5 import webbrowser
6
7 import flask
8 import tornado
9 from tornado.httpserver import HTTPServer
10 from tornado.ioloop import IOLoop
11 from tornado.wsgi import WSGIContainer
12
13 _basedir = os.path.join("..", os.path.dirname(__file__))
14
15 app = flask.Flask(__name__, static_folder="/unused")
16 PORT=5009
17 http_server = HTTPServer(WSGIContainer(app))
18
19 @app.route('/')
20 def welcome():
21 return """
22 <h1>Welcome to the Bokeh documentation server</h1>
23 You probably want to go to <a href="/en/latest/index.html"> Index</a>
24 """
25
26 @app.route('/versions.json')
27 def send_versions():
28 return flask.send_from_directory(
29 os.path.join(_basedir, "sphinx"), "test_versions.json")
30
31 @app.route('/alert.html')
32 def send_alert():
33 return os.environ.get("BOKEH_DOCS_ALERT", "")
34
35 @app.route('/en/latest/<path:filename>')
36 def send_docs(filename):
37 return flask.send_from_directory(
38 os.path.join(_basedir, "sphinx/build/html/"), filename)
39
40 def open_browser():
41 # Child process
42 time.sleep(0.5)
43 webbrowser.open("http://localhost:%d/en/latest/index.html" % PORT, new="tab")
44
45 data = {}
46
47 def serve_http():
48 data['ioloop'] = IOLoop()
49 http_server.listen(PORT)
50 IOLoop.current().start()
51
52 def shutdown_server():
53 ioloop = data['ioloop']
54 ioloop.add_callback(ioloop.stop)
55 print("Asked Server to shut down.")
56
57 def ui():
58 try:
59 time.sleep(0.5)
60 input("Press <ENTER> to exit...\n") # lgtm [py/use-of-input]
61 except KeyboardInterrupt:
62 pass
63
64 if __name__ == "__main__":
65
66 if tornado.version_info[0] == 4:
67 print('docserver.py script requires tornado 5 or higher')
68 sys.exit(1)
69
70 print("\nStarting Bokeh plot server on port %d..." % PORT)
71 print("Visit http://localhost:%d/en/latest/index.html to see plots\n" % PORT)
72
73 t_server = threading.Thread(target=serve_http)
74 t_server.start()
75 t_browser = threading.Thread(target=open_browser)
76 t_browser.start()
77
78 ui()
79
80 shutdown_server()
81 t_server.join()
82 t_browser.join()
83 print("Server shut down.")
84
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sphinx/docserver.py b/sphinx/docserver.py
--- a/sphinx/docserver.py
+++ b/sphinx/docserver.py
@@ -40,7 +40,7 @@
def open_browser():
# Child process
time.sleep(0.5)
- webbrowser.open("http://localhost:%d/en/latest/index.html" % PORT, new="tab")
+ webbrowser.open("http://localhost:%d/en/latest/index.html" % PORT, new=2)
data = {}
| {"golden_diff": "diff --git a/sphinx/docserver.py b/sphinx/docserver.py\n--- a/sphinx/docserver.py\n+++ b/sphinx/docserver.py\n@@ -40,7 +40,7 @@\n def open_browser():\n # Child process\n time.sleep(0.5)\n- webbrowser.open(\"http://localhost:%d/en/latest/index.html\" % PORT, new=\"tab\")\n+ webbrowser.open(\"http://localhost:%d/en/latest/index.html\" % PORT, new=2)\n \n data = {}\n", "issue": "[BUG] `cd sphinx; make serve` doesn't work\n#### ALL software version info (bokeh, python, notebook, OS, browser, any other relevant packages)\r\nBokeh 2.0.2-76-ga417746c9\r\n\r\n#### Description of expected behavior and the observed behavior\r\nThe page at https://docs.bokeh.org/en/latest/docs/dev_guide/documentation.html mentions that it's possible to run `make serve` to serve the documentation locally. But running it results in:\r\n```\r\nException in thread Thread-2:\r\nTraceback (most recent call last):\r\n File \"/home/p-himik/soft/miniconda3/envs/bokeh-dev/lib/python3.7/threading.py\", line 917, in _bootstrap_inner\r\n self.run()\r\n File \"/home/p-himik/soft/miniconda3/envs/bokeh-dev/lib/python3.7/threading.py\", line 865, in run\r\n self._target(*self._args, **self._kwargs)\r\n File \"docserver.py\", line 43, in open_browser\r\n webbrowser.open(\"http://localhost:%d/en/latest/index.html\" % PORT, new=\"tab\")\r\n File \"/home/p-himik/soft/miniconda3/envs/bokeh-dev/lib/python3.7/webbrowser.py\", line 78, in open\r\n if browser.open(url, new, autoraise):\r\n File \"/home/p-himik/soft/miniconda3/envs/bokeh-dev/lib/python3.7/webbrowser.py\", line 251, in open\r\n \"expected 0, 1, or 2, got %s\" % new)\r\nwebbrowser.Error: Bad 'new' parameter to open(); expected 0, 1, or 2, got tab\r\n```\r\nNot sure where `\"tab\"` has come from, but it has been there forever.\n", "before_files": [{"content": "import os\nimport sys\nimport threading\nimport time\nimport webbrowser\n\nimport flask\nimport tornado\nfrom tornado.httpserver import HTTPServer\nfrom tornado.ioloop import IOLoop\nfrom tornado.wsgi import WSGIContainer\n\n_basedir = os.path.join(\"..\", os.path.dirname(__file__))\n\napp = flask.Flask(__name__, static_folder=\"/unused\")\nPORT=5009\nhttp_server = HTTPServer(WSGIContainer(app))\n\[email protected]('/')\ndef welcome():\n return \"\"\"\n <h1>Welcome to the Bokeh documentation server</h1>\n You probably want to go to <a href=\"/en/latest/index.html\"> Index</a>\n \"\"\"\n\[email protected]('/versions.json')\ndef send_versions():\n return flask.send_from_directory(\n os.path.join(_basedir, \"sphinx\"), \"test_versions.json\")\n\[email protected]('/alert.html')\ndef send_alert():\n return os.environ.get(\"BOKEH_DOCS_ALERT\", \"\")\n\[email protected]('/en/latest/<path:filename>')\ndef send_docs(filename):\n return flask.send_from_directory(\n os.path.join(_basedir, \"sphinx/build/html/\"), filename)\n\ndef open_browser():\n # Child process\n time.sleep(0.5)\n webbrowser.open(\"http://localhost:%d/en/latest/index.html\" % PORT, new=\"tab\")\n\ndata = {}\n\ndef serve_http():\n data['ioloop'] = IOLoop()\n http_server.listen(PORT)\n IOLoop.current().start()\n\ndef shutdown_server():\n ioloop = data['ioloop']\n ioloop.add_callback(ioloop.stop)\n print(\"Asked Server to shut down.\")\n\ndef ui():\n try:\n time.sleep(0.5)\n input(\"Press <ENTER> to exit...\\n\") # lgtm [py/use-of-input]\n except KeyboardInterrupt:\n pass\n\nif __name__ == \"__main__\":\n\n if tornado.version_info[0] == 4:\n print('docserver.py script requires tornado 5 or higher')\n sys.exit(1)\n\n print(\"\\nStarting Bokeh plot server on port %d...\" % PORT)\n print(\"Visit http://localhost:%d/en/latest/index.html to see plots\\n\" % PORT)\n\n t_server = threading.Thread(target=serve_http)\n t_server.start()\n t_browser = threading.Thread(target=open_browser)\n t_browser.start()\n\n ui()\n\n shutdown_server()\n t_server.join()\n t_browser.join()\n print(\"Server shut down.\")\n", "path": "sphinx/docserver.py"}], "after_files": [{"content": "import os\nimport sys\nimport threading\nimport time\nimport webbrowser\n\nimport flask\nimport tornado\nfrom tornado.httpserver import HTTPServer\nfrom tornado.ioloop import IOLoop\nfrom tornado.wsgi import WSGIContainer\n\n_basedir = os.path.join(\"..\", os.path.dirname(__file__))\n\napp = flask.Flask(__name__, static_folder=\"/unused\")\nPORT=5009\nhttp_server = HTTPServer(WSGIContainer(app))\n\[email protected]('/')\ndef welcome():\n return \"\"\"\n <h1>Welcome to the Bokeh documentation server</h1>\n You probably want to go to <a href=\"/en/latest/index.html\"> Index</a>\n \"\"\"\n\[email protected]('/versions.json')\ndef send_versions():\n return flask.send_from_directory(\n os.path.join(_basedir, \"sphinx\"), \"test_versions.json\")\n\[email protected]('/alert.html')\ndef send_alert():\n return os.environ.get(\"BOKEH_DOCS_ALERT\", \"\")\n\[email protected]('/en/latest/<path:filename>')\ndef send_docs(filename):\n return flask.send_from_directory(\n os.path.join(_basedir, \"sphinx/build/html/\"), filename)\n\ndef open_browser():\n # Child process\n time.sleep(0.5)\n webbrowser.open(\"http://localhost:%d/en/latest/index.html\" % PORT, new=2)\n\ndata = {}\n\ndef serve_http():\n data['ioloop'] = IOLoop()\n http_server.listen(PORT)\n IOLoop.current().start()\n\ndef shutdown_server():\n ioloop = data['ioloop']\n ioloop.add_callback(ioloop.stop)\n print(\"Asked Server to shut down.\")\n\ndef ui():\n try:\n time.sleep(0.5)\n input(\"Press <ENTER> to exit...\\n\") # lgtm [py/use-of-input]\n except KeyboardInterrupt:\n pass\n\nif __name__ == \"__main__\":\n\n if tornado.version_info[0] == 4:\n print('docserver.py script requires tornado 5 or higher')\n sys.exit(1)\n\n print(\"\\nStarting Bokeh plot server on port %d...\" % PORT)\n print(\"Visit http://localhost:%d/en/latest/index.html to see plots\\n\" % PORT)\n\n t_server = threading.Thread(target=serve_http)\n t_server.start()\n t_browser = threading.Thread(target=open_browser)\n t_browser.start()\n\n ui()\n\n shutdown_server()\n t_server.join()\n t_browser.join()\n print(\"Server shut down.\")\n", "path": "sphinx/docserver.py"}]} | 1,373 | 111 |
gh_patches_debug_3215 | rasdani/github-patches | git_diff | python-discord__bot-733 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Write unit tests for `bot/rules/newlines.py`
Write unit tests for [`bot/rules/newlines.py`](../blob/master/bot/rules/newlines.py).
## Implementation details
Please make sure to read the general information in the [meta issue](553) and the [testing README](../blob/master/tests/README.md). We are aiming for a 100% [branch coverage](https://coverage.readthedocs.io/en/stable/branch.html) for this file, but if you think that is not possible, please discuss that in this issue.
## Additional information
If you want to work on this issue, **please make sure that you get assigned to it** by one of the core devs before starting to work on it. We would like to prevent the situation that multiple people are working on the same issue. To get assigned, leave a comment showing your interesting in tackling this issue.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bot/rules/attachments.py`
Content:
```
1 from typing import Dict, Iterable, List, Optional, Tuple
2
3 from discord import Member, Message
4
5
6 async def apply(
7 last_message: Message, recent_messages: List[Message], config: Dict[str, int]
8 ) -> Optional[Tuple[str, Iterable[Member], Iterable[Message]]]:
9 """Detects total attachments exceeding the limit sent by a single user."""
10 relevant_messages = tuple(
11 msg
12 for msg in recent_messages
13 if (
14 msg.author == last_message.author
15 and len(msg.attachments) > 0
16 )
17 )
18 total_recent_attachments = sum(len(msg.attachments) for msg in relevant_messages)
19
20 if total_recent_attachments > config['max']:
21 return (
22 f"sent {total_recent_attachments} attachments in {config['max']}s",
23 (last_message.author,),
24 relevant_messages
25 )
26 return None
27
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bot/rules/attachments.py b/bot/rules/attachments.py
--- a/bot/rules/attachments.py
+++ b/bot/rules/attachments.py
@@ -19,7 +19,7 @@
if total_recent_attachments > config['max']:
return (
- f"sent {total_recent_attachments} attachments in {config['max']}s",
+ f"sent {total_recent_attachments} attachments in {config['interval']}s",
(last_message.author,),
relevant_messages
)
| {"golden_diff": "diff --git a/bot/rules/attachments.py b/bot/rules/attachments.py\n--- a/bot/rules/attachments.py\n+++ b/bot/rules/attachments.py\n@@ -19,7 +19,7 @@\n \n if total_recent_attachments > config['max']:\n return (\n- f\"sent {total_recent_attachments} attachments in {config['max']}s\",\n+ f\"sent {total_recent_attachments} attachments in {config['interval']}s\",\n (last_message.author,),\n relevant_messages\n )\n", "issue": "Write unit tests for `bot/rules/newlines.py`\nWrite unit tests for [`bot/rules/newlines.py`](../blob/master/bot/rules/newlines.py).\n\n## Implementation details\nPlease make sure to read the general information in the [meta issue](553) and the [testing README](../blob/master/tests/README.md). We are aiming for a 100% [branch coverage](https://coverage.readthedocs.io/en/stable/branch.html) for this file, but if you think that is not possible, please discuss that in this issue.\n\n## Additional information\nIf you want to work on this issue, **please make sure that you get assigned to it** by one of the core devs before starting to work on it. We would like to prevent the situation that multiple people are working on the same issue. To get assigned, leave a comment showing your interesting in tackling this issue.\n\n", "before_files": [{"content": "from typing import Dict, Iterable, List, Optional, Tuple\n\nfrom discord import Member, Message\n\n\nasync def apply(\n last_message: Message, recent_messages: List[Message], config: Dict[str, int]\n) -> Optional[Tuple[str, Iterable[Member], Iterable[Message]]]:\n \"\"\"Detects total attachments exceeding the limit sent by a single user.\"\"\"\n relevant_messages = tuple(\n msg\n for msg in recent_messages\n if (\n msg.author == last_message.author\n and len(msg.attachments) > 0\n )\n )\n total_recent_attachments = sum(len(msg.attachments) for msg in relevant_messages)\n\n if total_recent_attachments > config['max']:\n return (\n f\"sent {total_recent_attachments} attachments in {config['max']}s\",\n (last_message.author,),\n relevant_messages\n )\n return None\n", "path": "bot/rules/attachments.py"}], "after_files": [{"content": "from typing import Dict, Iterable, List, Optional, Tuple\n\nfrom discord import Member, Message\n\n\nasync def apply(\n last_message: Message, recent_messages: List[Message], config: Dict[str, int]\n) -> Optional[Tuple[str, Iterable[Member], Iterable[Message]]]:\n \"\"\"Detects total attachments exceeding the limit sent by a single user.\"\"\"\n relevant_messages = tuple(\n msg\n for msg in recent_messages\n if (\n msg.author == last_message.author\n and len(msg.attachments) > 0\n )\n )\n total_recent_attachments = sum(len(msg.attachments) for msg in relevant_messages)\n\n if total_recent_attachments > config['max']:\n return (\n f\"sent {total_recent_attachments} attachments in {config['interval']}s\",\n (last_message.author,),\n relevant_messages\n )\n return None\n", "path": "bot/rules/attachments.py"}]} | 674 | 112 |
gh_patches_debug_26022 | rasdani/github-patches | git_diff | mindee__doctr-173 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[docs] Add a visualization of the example script in the README
While the readme specifies how you can use the example script, it does not show any visualization examples. We could easily add one to help users.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `doctr/utils/visualization.py`
Content:
```
1 # Copyright (C) 2021, Mindee.
2
3 # This program is licensed under the Apache License version 2.
4 # See LICENSE or go to <https://www.apache.org/licenses/LICENSE-2.0.txt> for full license details.
5
6 import matplotlib.pyplot as plt
7 import matplotlib.patches as patches
8 import mplcursors
9 import numpy as np
10 from typing import Tuple, List, Dict, Any
11
12 from .common_types import BoundingBox
13
14 __all__ = ['visualize_page']
15
16
17 def create_patch(
18 geometry: BoundingBox,
19 label: str,
20 page_dimensions: Tuple[int, int],
21 color: Tuple[int, int, int],
22 alpha: float = 0.3,
23 linewidth: int = 2,
24 ) -> patches.Patch:
25 """Create a matplotlib patch (rectangle) bounding the element
26
27 Args:
28 geometry: bounding box of the element
29 label: label to display when hovered
30 page_dimensions: dimensions of the Page
31 color: color to draw box
32 alpha: opacity parameter to fill the boxes, 0 = transparent
33 linewidth: line width
34
35 Returns:
36 a rectangular Patch
37 """
38 h, w = page_dimensions
39 (xmin, ymin), (xmax, ymax) = geometry
40 xmin, xmax = xmin * w, xmax * w
41 ymin, ymax = ymin * h, ymax * h
42 rect = patches.Rectangle(
43 (xmin, ymin),
44 xmax - xmin,
45 ymax - ymin,
46 fill=True,
47 linewidth=linewidth,
48 edgecolor=(*color, alpha),
49 facecolor=(*color, alpha),
50 label=label
51 )
52 return rect
53
54
55 def visualize_page(
56 page: Dict[str, Any],
57 image: np.ndarray,
58 words_only: bool = True,
59 ) -> None:
60 """Visualize a full page with predicted blocks, lines and words
61
62 Example::
63 >>> import numpy as np
64 >>> import matplotlib.pyplot as plt
65 >>> from doctr.utils.visualization import visualize_page
66 >>> from doctr.models import ocr_db_crnn
67 >>> model = ocr_db_crnn(pretrained=True)
68 >>> input_page = (255 * np.random.rand(600, 800, 3)).astype(np.uint8)
69 >>> out = model([[input_page]])
70 >>> visualize_page(out[0].pages[0].export(), input_page)
71 >>> plt.show()
72
73 Args:
74 page: the exported Page of a Document
75 image: np array of the page, needs to have the same shape than page['dimensions']
76 words_only: whether only words should be displayed
77 """
78 # Display the image
79 _, ax = plt.subplots()
80 ax.imshow(image)
81 # hide both axis
82 ax.axis('off')
83
84 artists: List[patches.Patch] = [] # instantiate an empty list of patches (to be drawn on the page)
85
86 for block in page['blocks']:
87 if not words_only:
88 rect = create_patch(block['geometry'], 'block', page['dimensions'], (0, 1, 0), linewidth=1)
89 # add patch on figure
90 ax.add_patch(rect)
91 # add patch to cursor's artists
92 artists.append(rect)
93
94 for line in block['lines']:
95 if not words_only:
96 rect = create_patch(line['geometry'], 'line', page['dimensions'], (1, 0, 0), linewidth=1)
97 ax.add_patch(rect)
98 artists.append(rect)
99
100 for word in line['words']:
101 rect = create_patch(word['geometry'], f"{word['value']} (confidence: {word['confidence']:.2%})",
102 page['dimensions'], (0, 0, 1))
103 ax.add_patch(rect)
104 artists.append(rect)
105
106 if not words_only:
107 for artefact in block['artefacts']:
108 rect = create_patch(artefact['geometry'], 'artefact', page['dimensions'], (0.5, 0.5, 0.5), linewidth=1)
109 ax.add_patch(rect)
110 artists.append(rect)
111
112 # Create mlp Cursor to hover patches in artists
113 mplcursors.Cursor(artists, hover=2).connect("add", lambda sel: sel.annotation.set_text(sel.artist.get_label()))
114
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/doctr/utils/visualization.py b/doctr/utils/visualization.py
--- a/doctr/utils/visualization.py
+++ b/doctr/utils/visualization.py
@@ -56,6 +56,7 @@
page: Dict[str, Any],
image: np.ndarray,
words_only: bool = True,
+ scale: float = 10,
) -> None:
"""Visualize a full page with predicted blocks, lines and words
@@ -74,9 +75,13 @@
page: the exported Page of a Document
image: np array of the page, needs to have the same shape than page['dimensions']
words_only: whether only words should be displayed
+ scale: figsize of the largest windows side
"""
+ # Get proper scale and aspect ratio
+ h, w = image.shape[:2]
+ size = (scale * w / h, scale) if h > w else (scale, h / w * scale)
+ fig, ax = plt.subplots(figsize=size)
# Display the image
- _, ax = plt.subplots()
ax.imshow(image)
# hide both axis
ax.axis('off')
@@ -111,3 +116,4 @@
# Create mlp Cursor to hover patches in artists
mplcursors.Cursor(artists, hover=2).connect("add", lambda sel: sel.annotation.set_text(sel.artist.get_label()))
+ fig.tight_layout()
| {"golden_diff": "diff --git a/doctr/utils/visualization.py b/doctr/utils/visualization.py\n--- a/doctr/utils/visualization.py\n+++ b/doctr/utils/visualization.py\n@@ -56,6 +56,7 @@\n page: Dict[str, Any],\n image: np.ndarray,\n words_only: bool = True,\n+ scale: float = 10,\n ) -> None:\n \"\"\"Visualize a full page with predicted blocks, lines and words\n \n@@ -74,9 +75,13 @@\n page: the exported Page of a Document\n image: np array of the page, needs to have the same shape than page['dimensions']\n words_only: whether only words should be displayed\n+ scale: figsize of the largest windows side\n \"\"\"\n+ # Get proper scale and aspect ratio\n+ h, w = image.shape[:2]\n+ size = (scale * w / h, scale) if h > w else (scale, h / w * scale)\n+ fig, ax = plt.subplots(figsize=size)\n # Display the image\n- _, ax = plt.subplots()\n ax.imshow(image)\n # hide both axis\n ax.axis('off')\n@@ -111,3 +116,4 @@\n \n # Create mlp Cursor to hover patches in artists\n mplcursors.Cursor(artists, hover=2).connect(\"add\", lambda sel: sel.annotation.set_text(sel.artist.get_label()))\n+ fig.tight_layout()\n", "issue": "[docs] Add a visualization of the example script in the README\nWhile the readme specifies how you can use the example script, it does not show any visualization examples. We could easily add one to help users.\n", "before_files": [{"content": "# Copyright (C) 2021, Mindee.\n\n# This program is licensed under the Apache License version 2.\n# See LICENSE or go to <https://www.apache.org/licenses/LICENSE-2.0.txt> for full license details.\n\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\nimport mplcursors\nimport numpy as np\nfrom typing import Tuple, List, Dict, Any\n\nfrom .common_types import BoundingBox\n\n__all__ = ['visualize_page']\n\n\ndef create_patch(\n geometry: BoundingBox,\n label: str,\n page_dimensions: Tuple[int, int],\n color: Tuple[int, int, int],\n alpha: float = 0.3,\n linewidth: int = 2,\n) -> patches.Patch:\n \"\"\"Create a matplotlib patch (rectangle) bounding the element\n\n Args:\n geometry: bounding box of the element\n label: label to display when hovered\n page_dimensions: dimensions of the Page\n color: color to draw box\n alpha: opacity parameter to fill the boxes, 0 = transparent\n linewidth: line width\n\n Returns:\n a rectangular Patch\n \"\"\"\n h, w = page_dimensions\n (xmin, ymin), (xmax, ymax) = geometry\n xmin, xmax = xmin * w, xmax * w\n ymin, ymax = ymin * h, ymax * h\n rect = patches.Rectangle(\n (xmin, ymin),\n xmax - xmin,\n ymax - ymin,\n fill=True,\n linewidth=linewidth,\n edgecolor=(*color, alpha),\n facecolor=(*color, alpha),\n label=label\n )\n return rect\n\n\ndef visualize_page(\n page: Dict[str, Any],\n image: np.ndarray,\n words_only: bool = True,\n) -> None:\n \"\"\"Visualize a full page with predicted blocks, lines and words\n\n Example::\n >>> import numpy as np\n >>> import matplotlib.pyplot as plt\n >>> from doctr.utils.visualization import visualize_page\n >>> from doctr.models import ocr_db_crnn\n >>> model = ocr_db_crnn(pretrained=True)\n >>> input_page = (255 * np.random.rand(600, 800, 3)).astype(np.uint8)\n >>> out = model([[input_page]])\n >>> visualize_page(out[0].pages[0].export(), input_page)\n >>> plt.show()\n\n Args:\n page: the exported Page of a Document\n image: np array of the page, needs to have the same shape than page['dimensions']\n words_only: whether only words should be displayed\n \"\"\"\n # Display the image\n _, ax = plt.subplots()\n ax.imshow(image)\n # hide both axis\n ax.axis('off')\n\n artists: List[patches.Patch] = [] # instantiate an empty list of patches (to be drawn on the page)\n\n for block in page['blocks']:\n if not words_only:\n rect = create_patch(block['geometry'], 'block', page['dimensions'], (0, 1, 0), linewidth=1)\n # add patch on figure\n ax.add_patch(rect)\n # add patch to cursor's artists\n artists.append(rect)\n\n for line in block['lines']:\n if not words_only:\n rect = create_patch(line['geometry'], 'line', page['dimensions'], (1, 0, 0), linewidth=1)\n ax.add_patch(rect)\n artists.append(rect)\n\n for word in line['words']:\n rect = create_patch(word['geometry'], f\"{word['value']} (confidence: {word['confidence']:.2%})\",\n page['dimensions'], (0, 0, 1))\n ax.add_patch(rect)\n artists.append(rect)\n\n if not words_only:\n for artefact in block['artefacts']:\n rect = create_patch(artefact['geometry'], 'artefact', page['dimensions'], (0.5, 0.5, 0.5), linewidth=1)\n ax.add_patch(rect)\n artists.append(rect)\n\n # Create mlp Cursor to hover patches in artists\n mplcursors.Cursor(artists, hover=2).connect(\"add\", lambda sel: sel.annotation.set_text(sel.artist.get_label()))\n", "path": "doctr/utils/visualization.py"}], "after_files": [{"content": "# Copyright (C) 2021, Mindee.\n\n# This program is licensed under the Apache License version 2.\n# See LICENSE or go to <https://www.apache.org/licenses/LICENSE-2.0.txt> for full license details.\n\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\nimport mplcursors\nimport numpy as np\nfrom typing import Tuple, List, Dict, Any\n\nfrom .common_types import BoundingBox\n\n__all__ = ['visualize_page']\n\n\ndef create_patch(\n geometry: BoundingBox,\n label: str,\n page_dimensions: Tuple[int, int],\n color: Tuple[int, int, int],\n alpha: float = 0.3,\n linewidth: int = 2,\n) -> patches.Patch:\n \"\"\"Create a matplotlib patch (rectangle) bounding the element\n\n Args:\n geometry: bounding box of the element\n label: label to display when hovered\n page_dimensions: dimensions of the Page\n color: color to draw box\n alpha: opacity parameter to fill the boxes, 0 = transparent\n linewidth: line width\n\n Returns:\n a rectangular Patch\n \"\"\"\n h, w = page_dimensions\n (xmin, ymin), (xmax, ymax) = geometry\n xmin, xmax = xmin * w, xmax * w\n ymin, ymax = ymin * h, ymax * h\n rect = patches.Rectangle(\n (xmin, ymin),\n xmax - xmin,\n ymax - ymin,\n fill=True,\n linewidth=linewidth,\n edgecolor=(*color, alpha),\n facecolor=(*color, alpha),\n label=label\n )\n return rect\n\n\ndef visualize_page(\n page: Dict[str, Any],\n image: np.ndarray,\n words_only: bool = True,\n scale: float = 10,\n) -> None:\n \"\"\"Visualize a full page with predicted blocks, lines and words\n\n Example::\n >>> import numpy as np\n >>> import matplotlib.pyplot as plt\n >>> from doctr.utils.visualization import visualize_page\n >>> from doctr.models import ocr_db_crnn\n >>> model = ocr_db_crnn(pretrained=True)\n >>> input_page = (255 * np.random.rand(600, 800, 3)).astype(np.uint8)\n >>> out = model([[input_page]])\n >>> visualize_page(out[0].pages[0].export(), input_page)\n >>> plt.show()\n\n Args:\n page: the exported Page of a Document\n image: np array of the page, needs to have the same shape than page['dimensions']\n words_only: whether only words should be displayed\n scale: figsize of the largest windows side\n \"\"\"\n # Get proper scale and aspect ratio\n h, w = image.shape[:2]\n size = (scale * w / h, scale) if h > w else (scale, h / w * scale)\n fig, ax = plt.subplots(figsize=size)\n # Display the image\n ax.imshow(image)\n # hide both axis\n ax.axis('off')\n\n artists: List[patches.Patch] = [] # instantiate an empty list of patches (to be drawn on the page)\n\n for block in page['blocks']:\n if not words_only:\n rect = create_patch(block['geometry'], 'block', page['dimensions'], (0, 1, 0), linewidth=1)\n # add patch on figure\n ax.add_patch(rect)\n # add patch to cursor's artists\n artists.append(rect)\n\n for line in block['lines']:\n if not words_only:\n rect = create_patch(line['geometry'], 'line', page['dimensions'], (1, 0, 0), linewidth=1)\n ax.add_patch(rect)\n artists.append(rect)\n\n for word in line['words']:\n rect = create_patch(word['geometry'], f\"{word['value']} (confidence: {word['confidence']:.2%})\",\n page['dimensions'], (0, 0, 1))\n ax.add_patch(rect)\n artists.append(rect)\n\n if not words_only:\n for artefact in block['artefacts']:\n rect = create_patch(artefact['geometry'], 'artefact', page['dimensions'], (0.5, 0.5, 0.5), linewidth=1)\n ax.add_patch(rect)\n artists.append(rect)\n\n # Create mlp Cursor to hover patches in artists\n mplcursors.Cursor(artists, hover=2).connect(\"add\", lambda sel: sel.annotation.set_text(sel.artist.get_label()))\n fig.tight_layout()\n", "path": "doctr/utils/visualization.py"}]} | 1,461 | 317 |
gh_patches_debug_36520 | rasdani/github-patches | git_diff | vacanza__python-holidays-1555 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Update Denmark holidays
I've received an email with a link to https://www.norden.org/en/info-norden/public-holidays-denmark
The author complained about absence of June 5th in the list of holiday dates:
> The calendar for Denmark does not include 5 June.
Denmark holidays need to be extended using categories approach.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `holidays/countries/denmark.py`
Content:
```
1 # python-holidays
2 # ---------------
3 # A fast, efficient Python library for generating country, province and state
4 # specific sets of holidays on the fly. It aims to make determining whether a
5 # specific date is a holiday as fast and flexible as possible.
6 #
7 # Authors: dr-prodigy <[email protected]> (c) 2017-2023
8 # ryanss <[email protected]> (c) 2014-2017
9 # Website: https://github.com/dr-prodigy/python-holidays
10 # License: MIT (see LICENSE file)
11
12 from datetime import timedelta as td
13 from gettext import gettext as tr
14
15 from holidays.groups import ChristianHolidays, InternationalHolidays
16 from holidays.holiday_base import HolidayBase
17
18
19 class Denmark(HolidayBase, ChristianHolidays, InternationalHolidays):
20 """
21 Denmark holidays.
22
23 References:
24 - https://en.wikipedia.org/wiki/Public_holidays_in_Denmark
25 - https://www.ft.dk/samling/20222/lovforslag/l13/index.htm
26 """
27
28 country = "DK"
29 default_language = "da"
30 supported_languages = ("da", "en_US", "uk")
31
32 def __init__(self, *args, **kwargs):
33 ChristianHolidays.__init__(self)
34 InternationalHolidays.__init__(self)
35 super().__init__(*args, **kwargs)
36
37 def _populate(self, year):
38 super()._populate(year)
39
40 # New Year's Day.
41 self._add_new_years_day(tr("Nytårsdag"))
42
43 # Holy Thursday.
44 self._add_holy_thursday(tr("Skærtorsdag"))
45
46 # Good Friday.
47 self._add_good_friday(tr("Langfredag"))
48
49 # Easter Sunday.
50 self._add_easter_sunday(tr("Påskedag"))
51
52 # Easter Monday.
53 self._add_easter_monday(tr("Anden påskedag"))
54
55 # See https://www.ft.dk/samling/20222/lovforslag/l13/index.htm
56 if year <= 2023:
57 # Great Day of Prayers.
58 self._add_holiday(tr("Store bededag"), self._easter_sunday + td(days=+26))
59
60 # Ascension Day.
61 self._add_ascension_thursday(tr("Kristi himmelfartsdag"))
62
63 # Whit Sunday.
64 self._add_whit_sunday(tr("Pinsedag"))
65
66 # Whit Monday.
67 self._add_whit_monday(tr("Anden pinsedag"))
68
69 # Christmas Day.
70 self._add_christmas_day(tr("Juledag"))
71
72 # Second Day of Christmas.
73 self._add_christmas_day_two(tr("Anden juledag"))
74
75
76 class DK(Denmark):
77 pass
78
79
80 class DNK(Denmark):
81 pass
82
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/holidays/countries/denmark.py b/holidays/countries/denmark.py
--- a/holidays/countries/denmark.py
+++ b/holidays/countries/denmark.py
@@ -12,6 +12,7 @@
from datetime import timedelta as td
from gettext import gettext as tr
+from holidays.constants import OPTIONAL, PUBLIC
from holidays.groups import ChristianHolidays, InternationalHolidays
from holidays.holiday_base import HolidayBase
@@ -22,11 +23,13 @@
References:
- https://en.wikipedia.org/wiki/Public_holidays_in_Denmark
+ - https://www.norden.org/en/info-norden/public-holidays-denmark
- https://www.ft.dk/samling/20222/lovforslag/l13/index.htm
"""
country = "DK"
default_language = "da"
+ supported_categories = {OPTIONAL, PUBLIC}
supported_languages = ("da", "en_US", "uk")
def __init__(self, *args, **kwargs):
@@ -34,9 +37,7 @@
InternationalHolidays.__init__(self)
super().__init__(*args, **kwargs)
- def _populate(self, year):
- super()._populate(year)
-
+ def _populate_public_holidays(self):
# New Year's Day.
self._add_new_years_day(tr("Nytårsdag"))
@@ -53,7 +54,7 @@
self._add_easter_monday(tr("Anden påskedag"))
# See https://www.ft.dk/samling/20222/lovforslag/l13/index.htm
- if year <= 2023:
+ if self._year <= 2023:
# Great Day of Prayers.
self._add_holiday(tr("Store bededag"), self._easter_sunday + td(days=+26))
@@ -72,6 +73,19 @@
# Second Day of Christmas.
self._add_christmas_day_two(tr("Anden juledag"))
+ def _populate_optional_holidays(self):
+ # International Workers' Day.
+ self._add_labor_day(tr("Arbejdernes kampdag"))
+
+ # Constitution Day.
+ self._add_holiday_jun_5(tr("Grundlovsdag"))
+
+ # Christmas Eve.
+ self._add_christmas_eve(tr("Juleaftensdag"))
+
+ # New Year's Eve.
+ self._add_new_years_eve(tr("Nytårsaften"))
+
class DK(Denmark):
pass
| {"golden_diff": "diff --git a/holidays/countries/denmark.py b/holidays/countries/denmark.py\n--- a/holidays/countries/denmark.py\n+++ b/holidays/countries/denmark.py\n@@ -12,6 +12,7 @@\n from datetime import timedelta as td\n from gettext import gettext as tr\n \n+from holidays.constants import OPTIONAL, PUBLIC\n from holidays.groups import ChristianHolidays, InternationalHolidays\n from holidays.holiday_base import HolidayBase\n \n@@ -22,11 +23,13 @@\n \n References:\n - https://en.wikipedia.org/wiki/Public_holidays_in_Denmark\n+ - https://www.norden.org/en/info-norden/public-holidays-denmark\n - https://www.ft.dk/samling/20222/lovforslag/l13/index.htm\n \"\"\"\n \n country = \"DK\"\n default_language = \"da\"\n+ supported_categories = {OPTIONAL, PUBLIC}\n supported_languages = (\"da\", \"en_US\", \"uk\")\n \n def __init__(self, *args, **kwargs):\n@@ -34,9 +37,7 @@\n InternationalHolidays.__init__(self)\n super().__init__(*args, **kwargs)\n \n- def _populate(self, year):\n- super()._populate(year)\n-\n+ def _populate_public_holidays(self):\n # New Year's Day.\n self._add_new_years_day(tr(\"Nyt\u00e5rsdag\"))\n \n@@ -53,7 +54,7 @@\n self._add_easter_monday(tr(\"Anden p\u00e5skedag\"))\n \n # See https://www.ft.dk/samling/20222/lovforslag/l13/index.htm\n- if year <= 2023:\n+ if self._year <= 2023:\n # Great Day of Prayers.\n self._add_holiday(tr(\"Store bededag\"), self._easter_sunday + td(days=+26))\n \n@@ -72,6 +73,19 @@\n # Second Day of Christmas.\n self._add_christmas_day_two(tr(\"Anden juledag\"))\n \n+ def _populate_optional_holidays(self):\n+ # International Workers' Day.\n+ self._add_labor_day(tr(\"Arbejdernes kampdag\"))\n+\n+ # Constitution Day.\n+ self._add_holiday_jun_5(tr(\"Grundlovsdag\"))\n+\n+ # Christmas Eve.\n+ self._add_christmas_eve(tr(\"Juleaftensdag\"))\n+\n+ # New Year's Eve.\n+ self._add_new_years_eve(tr(\"Nyt\u00e5rsaften\"))\n+\n \n class DK(Denmark):\n pass\n", "issue": "Update Denmark holidays\nI've received an email with a link to https://www.norden.org/en/info-norden/public-holidays-denmark\r\n\r\nThe author complained about absence of June 5th in the list of holiday dates:\r\n\r\n> The calendar for Denmark does not include 5 June.\r\n\r\nDenmark holidays need to be extended using categories approach.\n", "before_files": [{"content": "# python-holidays\n# ---------------\n# A fast, efficient Python library for generating country, province and state\n# specific sets of holidays on the fly. It aims to make determining whether a\n# specific date is a holiday as fast and flexible as possible.\n#\n# Authors: dr-prodigy <[email protected]> (c) 2017-2023\n# ryanss <[email protected]> (c) 2014-2017\n# Website: https://github.com/dr-prodigy/python-holidays\n# License: MIT (see LICENSE file)\n\nfrom datetime import timedelta as td\nfrom gettext import gettext as tr\n\nfrom holidays.groups import ChristianHolidays, InternationalHolidays\nfrom holidays.holiday_base import HolidayBase\n\n\nclass Denmark(HolidayBase, ChristianHolidays, InternationalHolidays):\n \"\"\"\n Denmark holidays.\n\n References:\n - https://en.wikipedia.org/wiki/Public_holidays_in_Denmark\n - https://www.ft.dk/samling/20222/lovforslag/l13/index.htm\n \"\"\"\n\n country = \"DK\"\n default_language = \"da\"\n supported_languages = (\"da\", \"en_US\", \"uk\")\n\n def __init__(self, *args, **kwargs):\n ChristianHolidays.__init__(self)\n InternationalHolidays.__init__(self)\n super().__init__(*args, **kwargs)\n\n def _populate(self, year):\n super()._populate(year)\n\n # New Year's Day.\n self._add_new_years_day(tr(\"Nyt\u00e5rsdag\"))\n\n # Holy Thursday.\n self._add_holy_thursday(tr(\"Sk\u00e6rtorsdag\"))\n\n # Good Friday.\n self._add_good_friday(tr(\"Langfredag\"))\n\n # Easter Sunday.\n self._add_easter_sunday(tr(\"P\u00e5skedag\"))\n\n # Easter Monday.\n self._add_easter_monday(tr(\"Anden p\u00e5skedag\"))\n\n # See https://www.ft.dk/samling/20222/lovforslag/l13/index.htm\n if year <= 2023:\n # Great Day of Prayers.\n self._add_holiday(tr(\"Store bededag\"), self._easter_sunday + td(days=+26))\n\n # Ascension Day.\n self._add_ascension_thursday(tr(\"Kristi himmelfartsdag\"))\n\n # Whit Sunday.\n self._add_whit_sunday(tr(\"Pinsedag\"))\n\n # Whit Monday.\n self._add_whit_monday(tr(\"Anden pinsedag\"))\n\n # Christmas Day.\n self._add_christmas_day(tr(\"Juledag\"))\n\n # Second Day of Christmas.\n self._add_christmas_day_two(tr(\"Anden juledag\"))\n\n\nclass DK(Denmark):\n pass\n\n\nclass DNK(Denmark):\n pass\n", "path": "holidays/countries/denmark.py"}], "after_files": [{"content": "# python-holidays\n# ---------------\n# A fast, efficient Python library for generating country, province and state\n# specific sets of holidays on the fly. It aims to make determining whether a\n# specific date is a holiday as fast and flexible as possible.\n#\n# Authors: dr-prodigy <[email protected]> (c) 2017-2023\n# ryanss <[email protected]> (c) 2014-2017\n# Website: https://github.com/dr-prodigy/python-holidays\n# License: MIT (see LICENSE file)\n\nfrom datetime import timedelta as td\nfrom gettext import gettext as tr\n\nfrom holidays.constants import OPTIONAL, PUBLIC\nfrom holidays.groups import ChristianHolidays, InternationalHolidays\nfrom holidays.holiday_base import HolidayBase\n\n\nclass Denmark(HolidayBase, ChristianHolidays, InternationalHolidays):\n \"\"\"\n Denmark holidays.\n\n References:\n - https://en.wikipedia.org/wiki/Public_holidays_in_Denmark\n - https://www.norden.org/en/info-norden/public-holidays-denmark\n - https://www.ft.dk/samling/20222/lovforslag/l13/index.htm\n \"\"\"\n\n country = \"DK\"\n default_language = \"da\"\n supported_categories = {OPTIONAL, PUBLIC}\n supported_languages = (\"da\", \"en_US\", \"uk\")\n\n def __init__(self, *args, **kwargs):\n ChristianHolidays.__init__(self)\n InternationalHolidays.__init__(self)\n super().__init__(*args, **kwargs)\n\n def _populate_public_holidays(self):\n # New Year's Day.\n self._add_new_years_day(tr(\"Nyt\u00e5rsdag\"))\n\n # Holy Thursday.\n self._add_holy_thursday(tr(\"Sk\u00e6rtorsdag\"))\n\n # Good Friday.\n self._add_good_friday(tr(\"Langfredag\"))\n\n # Easter Sunday.\n self._add_easter_sunday(tr(\"P\u00e5skedag\"))\n\n # Easter Monday.\n self._add_easter_monday(tr(\"Anden p\u00e5skedag\"))\n\n # See https://www.ft.dk/samling/20222/lovforslag/l13/index.htm\n if self._year <= 2023:\n # Great Day of Prayers.\n self._add_holiday(tr(\"Store bededag\"), self._easter_sunday + td(days=+26))\n\n # Ascension Day.\n self._add_ascension_thursday(tr(\"Kristi himmelfartsdag\"))\n\n # Whit Sunday.\n self._add_whit_sunday(tr(\"Pinsedag\"))\n\n # Whit Monday.\n self._add_whit_monday(tr(\"Anden pinsedag\"))\n\n # Christmas Day.\n self._add_christmas_day(tr(\"Juledag\"))\n\n # Second Day of Christmas.\n self._add_christmas_day_two(tr(\"Anden juledag\"))\n\n def _populate_optional_holidays(self):\n # International Workers' Day.\n self._add_labor_day(tr(\"Arbejdernes kampdag\"))\n\n # Constitution Day.\n self._add_holiday_jun_5(tr(\"Grundlovsdag\"))\n\n # Christmas Eve.\n self._add_christmas_eve(tr(\"Juleaftensdag\"))\n\n # New Year's Eve.\n self._add_new_years_eve(tr(\"Nyt\u00e5rsaften\"))\n\n\nclass DK(Denmark):\n pass\n\n\nclass DNK(Denmark):\n pass\n", "path": "holidays/countries/denmark.py"}]} | 1,143 | 598 |
gh_patches_debug_21675 | rasdani/github-patches | git_diff | liqd__a4-meinberlin-2385 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
counting contributions to polls on module tile
as discussed please count the comments AND all answers on poll module tiles.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `meinberlin/apps/projects/templatetags/meinberlin_project_tags.py`
Content:
```
1 from django import template
2
3 from adhocracy4.comments.models import Comment
4 from meinberlin.apps.budgeting.models import Proposal as budget_proposal
5 from meinberlin.apps.ideas.models import Idea
6 from meinberlin.apps.kiezkasse.models import Proposal as kiezkasse_proposal
7 from meinberlin.apps.mapideas.models import MapIdea
8 from meinberlin.apps.projects import get_project_type
9
10 register = template.Library()
11
12
13 @register.filter
14 def project_url(project):
15 if get_project_type(project) in ('external', 'bplan'):
16 return project.externalproject.url
17 return project.get_absolute_url()
18
19
20 @register.filter
21 def project_type(project):
22 return get_project_type(project)
23
24
25 @register.filter
26 def is_external(project):
27 return get_project_type(project) in ('external', 'bplan')
28
29
30 @register.filter
31 def is_container(project):
32 return get_project_type(project) == 'container'
33
34
35 @register.simple_tag
36 def to_class_name(value):
37 return value.__class__.__name__
38
39
40 @register.simple_tag
41 def get_num_entries(module):
42 """Count all user-generated items."""
43 item_count = \
44 Idea.objects.filter(module=module).count() \
45 + MapIdea.objects.filter(module=module).count() \
46 + budget_proposal.objects.filter(module=module).count() \
47 + kiezkasse_proposal.objects.filter(module=module).count() \
48 + Comment.objects.filter(idea__module=module).count() \
49 + Comment.objects.filter(mapidea__module=module).count() \
50 + Comment.objects.filter(budget_proposal__module=module).count() \
51 + Comment.objects.filter(kiezkasse_proposal__module=module).count() \
52 + Comment.objects.filter(topic__module=module).count() \
53 + Comment.objects.filter(maptopic__module=module).count() \
54 + Comment.objects.filter(paragraph__chapter__module=module).count() \
55 + Comment.objects.filter(chapter__module=module).count() \
56 + Comment.objects.filter(poll__module=module).count()
57 return item_count
58
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/meinberlin/apps/projects/templatetags/meinberlin_project_tags.py b/meinberlin/apps/projects/templatetags/meinberlin_project_tags.py
--- a/meinberlin/apps/projects/templatetags/meinberlin_project_tags.py
+++ b/meinberlin/apps/projects/templatetags/meinberlin_project_tags.py
@@ -5,6 +5,7 @@
from meinberlin.apps.ideas.models import Idea
from meinberlin.apps.kiezkasse.models import Proposal as kiezkasse_proposal
from meinberlin.apps.mapideas.models import MapIdea
+from meinberlin.apps.polls.models import Vote
from meinberlin.apps.projects import get_project_type
register = template.Library()
@@ -53,5 +54,6 @@
+ Comment.objects.filter(maptopic__module=module).count() \
+ Comment.objects.filter(paragraph__chapter__module=module).count() \
+ Comment.objects.filter(chapter__module=module).count() \
- + Comment.objects.filter(poll__module=module).count()
+ + Comment.objects.filter(poll__module=module).count() \
+ + Vote.objects.filter(choice__question__poll__module=module).count()
return item_count
| {"golden_diff": "diff --git a/meinberlin/apps/projects/templatetags/meinberlin_project_tags.py b/meinberlin/apps/projects/templatetags/meinberlin_project_tags.py\n--- a/meinberlin/apps/projects/templatetags/meinberlin_project_tags.py\n+++ b/meinberlin/apps/projects/templatetags/meinberlin_project_tags.py\n@@ -5,6 +5,7 @@\n from meinberlin.apps.ideas.models import Idea\n from meinberlin.apps.kiezkasse.models import Proposal as kiezkasse_proposal\n from meinberlin.apps.mapideas.models import MapIdea\n+from meinberlin.apps.polls.models import Vote\n from meinberlin.apps.projects import get_project_type\n \n register = template.Library()\n@@ -53,5 +54,6 @@\n + Comment.objects.filter(maptopic__module=module).count() \\\n + Comment.objects.filter(paragraph__chapter__module=module).count() \\\n + Comment.objects.filter(chapter__module=module).count() \\\n- + Comment.objects.filter(poll__module=module).count()\n+ + Comment.objects.filter(poll__module=module).count() \\\n+ + Vote.objects.filter(choice__question__poll__module=module).count()\n return item_count\n", "issue": "counting contributions to polls on module tile\nas discussed please count the comments AND all answers on poll module tiles.\n", "before_files": [{"content": "from django import template\n\nfrom adhocracy4.comments.models import Comment\nfrom meinberlin.apps.budgeting.models import Proposal as budget_proposal\nfrom meinberlin.apps.ideas.models import Idea\nfrom meinberlin.apps.kiezkasse.models import Proposal as kiezkasse_proposal\nfrom meinberlin.apps.mapideas.models import MapIdea\nfrom meinberlin.apps.projects import get_project_type\n\nregister = template.Library()\n\n\[email protected]\ndef project_url(project):\n if get_project_type(project) in ('external', 'bplan'):\n return project.externalproject.url\n return project.get_absolute_url()\n\n\[email protected]\ndef project_type(project):\n return get_project_type(project)\n\n\[email protected]\ndef is_external(project):\n return get_project_type(project) in ('external', 'bplan')\n\n\[email protected]\ndef is_container(project):\n return get_project_type(project) == 'container'\n\n\[email protected]_tag\ndef to_class_name(value):\n return value.__class__.__name__\n\n\[email protected]_tag\ndef get_num_entries(module):\n \"\"\"Count all user-generated items.\"\"\"\n item_count = \\\n Idea.objects.filter(module=module).count() \\\n + MapIdea.objects.filter(module=module).count() \\\n + budget_proposal.objects.filter(module=module).count() \\\n + kiezkasse_proposal.objects.filter(module=module).count() \\\n + Comment.objects.filter(idea__module=module).count() \\\n + Comment.objects.filter(mapidea__module=module).count() \\\n + Comment.objects.filter(budget_proposal__module=module).count() \\\n + Comment.objects.filter(kiezkasse_proposal__module=module).count() \\\n + Comment.objects.filter(topic__module=module).count() \\\n + Comment.objects.filter(maptopic__module=module).count() \\\n + Comment.objects.filter(paragraph__chapter__module=module).count() \\\n + Comment.objects.filter(chapter__module=module).count() \\\n + Comment.objects.filter(poll__module=module).count()\n return item_count\n", "path": "meinberlin/apps/projects/templatetags/meinberlin_project_tags.py"}], "after_files": [{"content": "from django import template\n\nfrom adhocracy4.comments.models import Comment\nfrom meinberlin.apps.budgeting.models import Proposal as budget_proposal\nfrom meinberlin.apps.ideas.models import Idea\nfrom meinberlin.apps.kiezkasse.models import Proposal as kiezkasse_proposal\nfrom meinberlin.apps.mapideas.models import MapIdea\nfrom meinberlin.apps.polls.models import Vote\nfrom meinberlin.apps.projects import get_project_type\n\nregister = template.Library()\n\n\[email protected]\ndef project_url(project):\n if get_project_type(project) in ('external', 'bplan'):\n return project.externalproject.url\n return project.get_absolute_url()\n\n\[email protected]\ndef project_type(project):\n return get_project_type(project)\n\n\[email protected]\ndef is_external(project):\n return get_project_type(project) in ('external', 'bplan')\n\n\[email protected]\ndef is_container(project):\n return get_project_type(project) == 'container'\n\n\[email protected]_tag\ndef to_class_name(value):\n return value.__class__.__name__\n\n\[email protected]_tag\ndef get_num_entries(module):\n \"\"\"Count all user-generated items.\"\"\"\n item_count = \\\n Idea.objects.filter(module=module).count() \\\n + MapIdea.objects.filter(module=module).count() \\\n + budget_proposal.objects.filter(module=module).count() \\\n + kiezkasse_proposal.objects.filter(module=module).count() \\\n + Comment.objects.filter(idea__module=module).count() \\\n + Comment.objects.filter(mapidea__module=module).count() \\\n + Comment.objects.filter(budget_proposal__module=module).count() \\\n + Comment.objects.filter(kiezkasse_proposal__module=module).count() \\\n + Comment.objects.filter(topic__module=module).count() \\\n + Comment.objects.filter(maptopic__module=module).count() \\\n + Comment.objects.filter(paragraph__chapter__module=module).count() \\\n + Comment.objects.filter(chapter__module=module).count() \\\n + Comment.objects.filter(poll__module=module).count() \\\n + Vote.objects.filter(choice__question__poll__module=module).count()\n return item_count\n", "path": "meinberlin/apps/projects/templatetags/meinberlin_project_tags.py"}]} | 851 | 280 |
gh_patches_debug_32506 | rasdani/github-patches | git_diff | mkdocs__mkdocs-2438 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Re-building w/ symbolic links stopped working, regression after #2385
Since a444c43 in master using the local development server via `mkdocs serve` updating files that are symbolically linked is not triggering to rebuild (and therefore not reloading browser tabs).
On first glance this is due to the switch to watchdog for detecting file-system changes which needs more guidance to handle this file-type.
Preparing a PR with a patch.
Ref: a444c43474f91dea089922dd8fb188d1db3a4535
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mkdocs/livereload/__init__.py`
Content:
```
1 import functools
2 import io
3 import logging
4 import mimetypes
5 import os
6 import os.path
7 import pathlib
8 import re
9 import socketserver
10 import threading
11 import time
12 import warnings
13 import wsgiref.simple_server
14
15 import watchdog.events
16 import watchdog.observers
17
18
19 class _LoggerAdapter(logging.LoggerAdapter):
20 def process(self, msg, kwargs):
21 return time.strftime("[%H:%M:%S] ") + msg, kwargs
22
23
24 log = _LoggerAdapter(logging.getLogger(__name__), {})
25
26
27 class LiveReloadServer(socketserver.ThreadingMixIn, wsgiref.simple_server.WSGIServer):
28 daemon_threads = True
29 poll_response_timeout = 60
30
31 def __init__(
32 self,
33 builder,
34 host,
35 port,
36 root,
37 mount_path="/",
38 build_delay=0.25,
39 shutdown_delay=0.25,
40 **kwargs,
41 ):
42 self.builder = builder
43 self.server_name = host
44 self.server_port = port
45 self.root = os.path.abspath(root)
46 self.mount_path = ("/" + mount_path.lstrip("/")).rstrip("/") + "/"
47 self.url = f"http://{self.server_name}:{self.server_port}{self.mount_path}"
48 self.build_delay = build_delay
49 self.shutdown_delay = shutdown_delay
50 # To allow custom error pages.
51 self.error_handler = lambda code: None
52
53 super().__init__((host, port), _Handler, **kwargs)
54 self.set_app(self.serve_request)
55
56 self._wanted_epoch = _timestamp() # The version of the site that started building.
57 self._visible_epoch = self._wanted_epoch # Latest fully built version of the site.
58 self._epoch_cond = threading.Condition() # Must be held when accessing _visible_epoch.
59
60 self._to_rebuild = {} # Used as an ordered set of functions to call.
61 self._rebuild_cond = threading.Condition() # Must be held when accessing _to_rebuild.
62
63 self._shutdown = False
64 self.serve_thread = threading.Thread(target=lambda: self.serve_forever(shutdown_delay))
65 self.observer = watchdog.observers.Observer(timeout=shutdown_delay)
66
67 def watch(self, path, func=None, recursive=True):
68 """Add the 'path' to watched paths, call the function and reload when any file changes under it."""
69 path = os.path.abspath(path)
70 if func in (None, self.builder):
71 func = self.builder
72 else:
73 warnings.warn(
74 "Plugins should not pass the 'func' parameter of watch(). "
75 "The ability to execute custom callbacks will be removed soon.",
76 DeprecationWarning,
77 stacklevel=2,
78 )
79
80 def callback(event, allowed_path=None):
81 if isinstance(event, watchdog.events.DirCreatedEvent):
82 return
83 if allowed_path is not None and event.src_path != allowed_path:
84 return
85 # Text editors always cause a "file close" event in addition to "modified" when saving
86 # a file. Some editors also have "swap" functionality that keeps writing into another
87 # file that's never closed. Prevent such write events from causing a rebuild.
88 if isinstance(event, watchdog.events.FileModifiedEvent):
89 # But FileClosedEvent is implemented only on Linux, otherwise we mustn't skip this:
90 if type(self.observer).__name__ == "InotifyObserver":
91 return
92 log.debug(str(event))
93 with self._rebuild_cond:
94 self._to_rebuild[func] = True
95 self._rebuild_cond.notify_all()
96
97 dir_handler = watchdog.events.FileSystemEventHandler()
98 dir_handler.on_any_event = callback
99
100 seen = set()
101
102 def schedule(path):
103 seen.add(path)
104 if os.path.isfile(path):
105 # Watchdog doesn't support watching files, so watch its directory and filter by path
106 handler = watchdog.events.FileSystemEventHandler()
107 handler.on_any_event = lambda event: callback(event, allowed_path=path)
108
109 parent = os.path.dirname(path)
110 log.debug(f"Watching file '{path}' through directory '{parent}'")
111 self.observer.schedule(handler, parent)
112 else:
113 log.debug(f"Watching directory '{path}'")
114 self.observer.schedule(dir_handler, path, recursive=recursive)
115
116 schedule(os.path.realpath(path))
117
118 def watch_symlink_targets(path_obj): # path is os.DirEntry or pathlib.Path
119 if path_obj.is_symlink():
120 # The extra `readlink` is needed due to https://bugs.python.org/issue9949
121 target = os.path.realpath(os.readlink(os.fspath(path_obj)))
122 if target in seen or not os.path.exists(target):
123 return
124 schedule(target)
125
126 path_obj = pathlib.Path(target)
127
128 if path_obj.is_dir() and recursive:
129 with os.scandir(os.fspath(path_obj)) as scan:
130 for entry in scan:
131 watch_symlink_targets(entry)
132
133 watch_symlink_targets(pathlib.Path(path))
134
135 def serve(self):
136 self.observer.start()
137
138 log.info(f"Serving on {self.url}")
139 self.serve_thread.start()
140
141 self._build_loop()
142
143 def _build_loop(self):
144 while True:
145 with self._rebuild_cond:
146 while not self._rebuild_cond.wait_for(
147 lambda: self._to_rebuild or self._shutdown, timeout=self.shutdown_delay
148 ):
149 # We could have used just one wait instead of a loop + timeout, but we need
150 # occasional breaks, otherwise on Windows we can't receive KeyboardInterrupt.
151 pass
152 if self._shutdown:
153 break
154 log.info("Detected file changes")
155 while self._rebuild_cond.wait(timeout=self.build_delay):
156 log.debug("Waiting for file changes to stop happening")
157
158 self._wanted_epoch = _timestamp()
159 funcs = list(self._to_rebuild)
160 self._to_rebuild.clear()
161
162 for func in funcs:
163 func()
164
165 with self._epoch_cond:
166 log.info("Reloading browsers")
167 self._visible_epoch = self._wanted_epoch
168 self._epoch_cond.notify_all()
169
170 def shutdown(self):
171 self.observer.stop()
172 with self._rebuild_cond:
173 self._shutdown = True
174 self._rebuild_cond.notify_all()
175
176 if self.serve_thread.is_alive():
177 super().shutdown()
178 self.serve_thread.join()
179 self.observer.join()
180
181 def serve_request(self, environ, start_response):
182 try:
183 result = self._serve_request(environ, start_response)
184 except Exception:
185 code = 500
186 msg = "500 Internal Server Error"
187 log.exception(msg)
188 else:
189 if result is not None:
190 return result
191 code = 404
192 msg = "404 Not Found"
193
194 error_content = None
195 try:
196 error_content = self.error_handler(code)
197 except Exception:
198 log.exception("Failed to render an error message!")
199 if error_content is None:
200 error_content = msg.encode()
201
202 start_response(msg, [("Content-Type", "text/html")])
203 return [error_content]
204
205 def _serve_request(self, environ, start_response):
206 path = environ["PATH_INFO"]
207
208 m = re.fullmatch(r"/livereload/([0-9]+)/[0-9]+", path)
209 if m:
210 epoch = int(m[1])
211 start_response("200 OK", [("Content-Type", "text/plain")])
212
213 def condition():
214 return self._visible_epoch > epoch
215
216 with self._epoch_cond:
217 if not condition():
218 # Stall the browser, respond as soon as there's something new.
219 # If there's not, respond anyway after a minute.
220 self._log_poll_request(environ.get("HTTP_REFERER"), request_id=path)
221 self._epoch_cond.wait_for(condition, timeout=self.poll_response_timeout)
222 return [b"%d" % self._visible_epoch]
223
224 if path == "/js/livereload.js":
225 file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "livereload.js")
226 elif path.startswith(self.mount_path):
227 if path.endswith("/"):
228 path += "index.html"
229 path = path[len(self.mount_path):]
230 file_path = os.path.join(self.root, path.lstrip("/"))
231 elif path == "/":
232 start_response("302 Found", [("Location", self.mount_path)])
233 return []
234 else:
235 return None # Not found
236
237 # Wait until the ongoing rebuild (if any) finishes, so we're not serving a half-built site.
238 with self._epoch_cond:
239 self._epoch_cond.wait_for(lambda: self._visible_epoch == self._wanted_epoch)
240 epoch = self._visible_epoch
241
242 try:
243 file = open(file_path, "rb")
244 except OSError:
245 return None # Not found
246
247 if path.endswith(".html"):
248 with file:
249 content = file.read()
250 content = self._inject_js_into_html(content, epoch)
251 file = io.BytesIO(content)
252 content_length = len(content)
253 else:
254 content_length = os.path.getsize(file_path)
255
256 content_type = self._guess_type(file_path)
257 start_response(
258 "200 OK", [("Content-Type", content_type), ("Content-Length", str(content_length))]
259 )
260 return wsgiref.util.FileWrapper(file)
261
262 @classmethod
263 def _inject_js_into_html(cls, content, epoch):
264 try:
265 body_end = content.rindex(b"</body>")
266 except ValueError:
267 body_end = len(content)
268 # The page will reload if the livereload poller returns a newer epoch than what it knows.
269 # The other timestamp becomes just a unique identifier for the initiating page.
270 return (
271 b'%b<script src="/js/livereload.js"></script><script>livereload(%d, %d);</script>%b'
272 % (content[:body_end], epoch, _timestamp(), content[body_end:])
273 )
274
275 @classmethod
276 @functools.lru_cache() # "Cache" to not repeat the same message for the same browser tab.
277 def _log_poll_request(cls, url, request_id):
278 log.info(f"Browser connected: {url}")
279
280 def _guess_type(cls, path):
281 # MkDocs only ensures a few common types (as seen in livereload_tests.py::test_mime_types).
282 # Other uncommon types will not be accepted.
283 if path.endswith((".js", ".JS")):
284 return "application/javascript"
285 if path.endswith(".gz"):
286 return "application/gzip"
287
288 guess, _ = mimetypes.guess_type(path)
289 if guess:
290 return guess
291 return "application/octet-stream"
292
293
294 class _Handler(wsgiref.simple_server.WSGIRequestHandler):
295 def log_request(self, code="-", size="-"):
296 level = logging.DEBUG if str(code) == "200" else logging.WARNING
297 log.log(level, f'"{self.requestline}" code {code}')
298
299 def log_message(self, format, *args):
300 log.debug(format, *args)
301
302
303 def _timestamp():
304 return round(time.monotonic() * 1000)
305
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mkdocs/livereload/__init__.py b/mkdocs/livereload/__init__.py
--- a/mkdocs/livereload/__init__.py
+++ b/mkdocs/livereload/__init__.py
@@ -101,29 +101,26 @@
def schedule(path):
seen.add(path)
- if os.path.isfile(path):
+ if path.is_file():
# Watchdog doesn't support watching files, so watch its directory and filter by path
handler = watchdog.events.FileSystemEventHandler()
- handler.on_any_event = lambda event: callback(event, allowed_path=path)
+ handler.on_any_event = lambda event: callback(event, allowed_path=os.fspath(path))
- parent = os.path.dirname(path)
+ parent = path.parent
log.debug(f"Watching file '{path}' through directory '{parent}'")
self.observer.schedule(handler, parent)
else:
log.debug(f"Watching directory '{path}'")
self.observer.schedule(dir_handler, path, recursive=recursive)
- schedule(os.path.realpath(path))
+ schedule(pathlib.Path(path).resolve())
def watch_symlink_targets(path_obj): # path is os.DirEntry or pathlib.Path
if path_obj.is_symlink():
- # The extra `readlink` is needed due to https://bugs.python.org/issue9949
- target = os.path.realpath(os.readlink(os.fspath(path_obj)))
- if target in seen or not os.path.exists(target):
+ path_obj = pathlib.Path(path_obj).resolve()
+ if path_obj in seen or not path_obj.exists():
return
- schedule(target)
-
- path_obj = pathlib.Path(target)
+ schedule(path_obj)
if path_obj.is_dir() and recursive:
with os.scandir(os.fspath(path_obj)) as scan:
| {"golden_diff": "diff --git a/mkdocs/livereload/__init__.py b/mkdocs/livereload/__init__.py\n--- a/mkdocs/livereload/__init__.py\n+++ b/mkdocs/livereload/__init__.py\n@@ -101,29 +101,26 @@\n \n def schedule(path):\n seen.add(path)\n- if os.path.isfile(path):\n+ if path.is_file():\n # Watchdog doesn't support watching files, so watch its directory and filter by path\n handler = watchdog.events.FileSystemEventHandler()\n- handler.on_any_event = lambda event: callback(event, allowed_path=path)\n+ handler.on_any_event = lambda event: callback(event, allowed_path=os.fspath(path))\n \n- parent = os.path.dirname(path)\n+ parent = path.parent\n log.debug(f\"Watching file '{path}' through directory '{parent}'\")\n self.observer.schedule(handler, parent)\n else:\n log.debug(f\"Watching directory '{path}'\")\n self.observer.schedule(dir_handler, path, recursive=recursive)\n \n- schedule(os.path.realpath(path))\n+ schedule(pathlib.Path(path).resolve())\n \n def watch_symlink_targets(path_obj): # path is os.DirEntry or pathlib.Path\n if path_obj.is_symlink():\n- # The extra `readlink` is needed due to https://bugs.python.org/issue9949\n- target = os.path.realpath(os.readlink(os.fspath(path_obj)))\n- if target in seen or not os.path.exists(target):\n+ path_obj = pathlib.Path(path_obj).resolve()\n+ if path_obj in seen or not path_obj.exists():\n return\n- schedule(target)\n-\n- path_obj = pathlib.Path(target)\n+ schedule(path_obj)\n \n if path_obj.is_dir() and recursive:\n with os.scandir(os.fspath(path_obj)) as scan:\n", "issue": "Re-building w/ symbolic links stopped working, regression after #2385\nSince a444c43 in master using the local development server via `mkdocs serve` updating files that are symbolically linked is not triggering to rebuild (and therefore not reloading browser tabs).\r\n\r\nOn first glance this is due to the switch to watchdog for detecting file-system changes which needs more guidance to handle this file-type.\r\n\r\nPreparing a PR with a patch.\r\n\r\nRef: a444c43474f91dea089922dd8fb188d1db3a4535\n", "before_files": [{"content": "import functools\nimport io\nimport logging\nimport mimetypes\nimport os\nimport os.path\nimport pathlib\nimport re\nimport socketserver\nimport threading\nimport time\nimport warnings\nimport wsgiref.simple_server\n\nimport watchdog.events\nimport watchdog.observers\n\n\nclass _LoggerAdapter(logging.LoggerAdapter):\n def process(self, msg, kwargs):\n return time.strftime(\"[%H:%M:%S] \") + msg, kwargs\n\n\nlog = _LoggerAdapter(logging.getLogger(__name__), {})\n\n\nclass LiveReloadServer(socketserver.ThreadingMixIn, wsgiref.simple_server.WSGIServer):\n daemon_threads = True\n poll_response_timeout = 60\n\n def __init__(\n self,\n builder,\n host,\n port,\n root,\n mount_path=\"/\",\n build_delay=0.25,\n shutdown_delay=0.25,\n **kwargs,\n ):\n self.builder = builder\n self.server_name = host\n self.server_port = port\n self.root = os.path.abspath(root)\n self.mount_path = (\"/\" + mount_path.lstrip(\"/\")).rstrip(\"/\") + \"/\"\n self.url = f\"http://{self.server_name}:{self.server_port}{self.mount_path}\"\n self.build_delay = build_delay\n self.shutdown_delay = shutdown_delay\n # To allow custom error pages.\n self.error_handler = lambda code: None\n\n super().__init__((host, port), _Handler, **kwargs)\n self.set_app(self.serve_request)\n\n self._wanted_epoch = _timestamp() # The version of the site that started building.\n self._visible_epoch = self._wanted_epoch # Latest fully built version of the site.\n self._epoch_cond = threading.Condition() # Must be held when accessing _visible_epoch.\n\n self._to_rebuild = {} # Used as an ordered set of functions to call.\n self._rebuild_cond = threading.Condition() # Must be held when accessing _to_rebuild.\n\n self._shutdown = False\n self.serve_thread = threading.Thread(target=lambda: self.serve_forever(shutdown_delay))\n self.observer = watchdog.observers.Observer(timeout=shutdown_delay)\n\n def watch(self, path, func=None, recursive=True):\n \"\"\"Add the 'path' to watched paths, call the function and reload when any file changes under it.\"\"\"\n path = os.path.abspath(path)\n if func in (None, self.builder):\n func = self.builder\n else:\n warnings.warn(\n \"Plugins should not pass the 'func' parameter of watch(). \"\n \"The ability to execute custom callbacks will be removed soon.\",\n DeprecationWarning,\n stacklevel=2,\n )\n\n def callback(event, allowed_path=None):\n if isinstance(event, watchdog.events.DirCreatedEvent):\n return\n if allowed_path is not None and event.src_path != allowed_path:\n return\n # Text editors always cause a \"file close\" event in addition to \"modified\" when saving\n # a file. Some editors also have \"swap\" functionality that keeps writing into another\n # file that's never closed. Prevent such write events from causing a rebuild.\n if isinstance(event, watchdog.events.FileModifiedEvent):\n # But FileClosedEvent is implemented only on Linux, otherwise we mustn't skip this:\n if type(self.observer).__name__ == \"InotifyObserver\":\n return\n log.debug(str(event))\n with self._rebuild_cond:\n self._to_rebuild[func] = True\n self._rebuild_cond.notify_all()\n\n dir_handler = watchdog.events.FileSystemEventHandler()\n dir_handler.on_any_event = callback\n\n seen = set()\n\n def schedule(path):\n seen.add(path)\n if os.path.isfile(path):\n # Watchdog doesn't support watching files, so watch its directory and filter by path\n handler = watchdog.events.FileSystemEventHandler()\n handler.on_any_event = lambda event: callback(event, allowed_path=path)\n\n parent = os.path.dirname(path)\n log.debug(f\"Watching file '{path}' through directory '{parent}'\")\n self.observer.schedule(handler, parent)\n else:\n log.debug(f\"Watching directory '{path}'\")\n self.observer.schedule(dir_handler, path, recursive=recursive)\n\n schedule(os.path.realpath(path))\n\n def watch_symlink_targets(path_obj): # path is os.DirEntry or pathlib.Path\n if path_obj.is_symlink():\n # The extra `readlink` is needed due to https://bugs.python.org/issue9949\n target = os.path.realpath(os.readlink(os.fspath(path_obj)))\n if target in seen or not os.path.exists(target):\n return\n schedule(target)\n\n path_obj = pathlib.Path(target)\n\n if path_obj.is_dir() and recursive:\n with os.scandir(os.fspath(path_obj)) as scan:\n for entry in scan:\n watch_symlink_targets(entry)\n\n watch_symlink_targets(pathlib.Path(path))\n\n def serve(self):\n self.observer.start()\n\n log.info(f\"Serving on {self.url}\")\n self.serve_thread.start()\n\n self._build_loop()\n\n def _build_loop(self):\n while True:\n with self._rebuild_cond:\n while not self._rebuild_cond.wait_for(\n lambda: self._to_rebuild or self._shutdown, timeout=self.shutdown_delay\n ):\n # We could have used just one wait instead of a loop + timeout, but we need\n # occasional breaks, otherwise on Windows we can't receive KeyboardInterrupt.\n pass\n if self._shutdown:\n break\n log.info(\"Detected file changes\")\n while self._rebuild_cond.wait(timeout=self.build_delay):\n log.debug(\"Waiting for file changes to stop happening\")\n\n self._wanted_epoch = _timestamp()\n funcs = list(self._to_rebuild)\n self._to_rebuild.clear()\n\n for func in funcs:\n func()\n\n with self._epoch_cond:\n log.info(\"Reloading browsers\")\n self._visible_epoch = self._wanted_epoch\n self._epoch_cond.notify_all()\n\n def shutdown(self):\n self.observer.stop()\n with self._rebuild_cond:\n self._shutdown = True\n self._rebuild_cond.notify_all()\n\n if self.serve_thread.is_alive():\n super().shutdown()\n self.serve_thread.join()\n self.observer.join()\n\n def serve_request(self, environ, start_response):\n try:\n result = self._serve_request(environ, start_response)\n except Exception:\n code = 500\n msg = \"500 Internal Server Error\"\n log.exception(msg)\n else:\n if result is not None:\n return result\n code = 404\n msg = \"404 Not Found\"\n\n error_content = None\n try:\n error_content = self.error_handler(code)\n except Exception:\n log.exception(\"Failed to render an error message!\")\n if error_content is None:\n error_content = msg.encode()\n\n start_response(msg, [(\"Content-Type\", \"text/html\")])\n return [error_content]\n\n def _serve_request(self, environ, start_response):\n path = environ[\"PATH_INFO\"]\n\n m = re.fullmatch(r\"/livereload/([0-9]+)/[0-9]+\", path)\n if m:\n epoch = int(m[1])\n start_response(\"200 OK\", [(\"Content-Type\", \"text/plain\")])\n\n def condition():\n return self._visible_epoch > epoch\n\n with self._epoch_cond:\n if not condition():\n # Stall the browser, respond as soon as there's something new.\n # If there's not, respond anyway after a minute.\n self._log_poll_request(environ.get(\"HTTP_REFERER\"), request_id=path)\n self._epoch_cond.wait_for(condition, timeout=self.poll_response_timeout)\n return [b\"%d\" % self._visible_epoch]\n\n if path == \"/js/livereload.js\":\n file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"livereload.js\")\n elif path.startswith(self.mount_path):\n if path.endswith(\"/\"):\n path += \"index.html\"\n path = path[len(self.mount_path):]\n file_path = os.path.join(self.root, path.lstrip(\"/\"))\n elif path == \"/\":\n start_response(\"302 Found\", [(\"Location\", self.mount_path)])\n return []\n else:\n return None # Not found\n\n # Wait until the ongoing rebuild (if any) finishes, so we're not serving a half-built site.\n with self._epoch_cond:\n self._epoch_cond.wait_for(lambda: self._visible_epoch == self._wanted_epoch)\n epoch = self._visible_epoch\n\n try:\n file = open(file_path, \"rb\")\n except OSError:\n return None # Not found\n\n if path.endswith(\".html\"):\n with file:\n content = file.read()\n content = self._inject_js_into_html(content, epoch)\n file = io.BytesIO(content)\n content_length = len(content)\n else:\n content_length = os.path.getsize(file_path)\n\n content_type = self._guess_type(file_path)\n start_response(\n \"200 OK\", [(\"Content-Type\", content_type), (\"Content-Length\", str(content_length))]\n )\n return wsgiref.util.FileWrapper(file)\n\n @classmethod\n def _inject_js_into_html(cls, content, epoch):\n try:\n body_end = content.rindex(b\"</body>\")\n except ValueError:\n body_end = len(content)\n # The page will reload if the livereload poller returns a newer epoch than what it knows.\n # The other timestamp becomes just a unique identifier for the initiating page.\n return (\n b'%b<script src=\"/js/livereload.js\"></script><script>livereload(%d, %d);</script>%b'\n % (content[:body_end], epoch, _timestamp(), content[body_end:])\n )\n\n @classmethod\n @functools.lru_cache() # \"Cache\" to not repeat the same message for the same browser tab.\n def _log_poll_request(cls, url, request_id):\n log.info(f\"Browser connected: {url}\")\n\n def _guess_type(cls, path):\n # MkDocs only ensures a few common types (as seen in livereload_tests.py::test_mime_types).\n # Other uncommon types will not be accepted.\n if path.endswith((\".js\", \".JS\")):\n return \"application/javascript\"\n if path.endswith(\".gz\"):\n return \"application/gzip\"\n\n guess, _ = mimetypes.guess_type(path)\n if guess:\n return guess\n return \"application/octet-stream\"\n\n\nclass _Handler(wsgiref.simple_server.WSGIRequestHandler):\n def log_request(self, code=\"-\", size=\"-\"):\n level = logging.DEBUG if str(code) == \"200\" else logging.WARNING\n log.log(level, f'\"{self.requestline}\" code {code}')\n\n def log_message(self, format, *args):\n log.debug(format, *args)\n\n\ndef _timestamp():\n return round(time.monotonic() * 1000)\n", "path": "mkdocs/livereload/__init__.py"}], "after_files": [{"content": "import functools\nimport io\nimport logging\nimport mimetypes\nimport os\nimport os.path\nimport pathlib\nimport re\nimport socketserver\nimport threading\nimport time\nimport warnings\nimport wsgiref.simple_server\n\nimport watchdog.events\nimport watchdog.observers\n\n\nclass _LoggerAdapter(logging.LoggerAdapter):\n def process(self, msg, kwargs):\n return time.strftime(\"[%H:%M:%S] \") + msg, kwargs\n\n\nlog = _LoggerAdapter(logging.getLogger(__name__), {})\n\n\nclass LiveReloadServer(socketserver.ThreadingMixIn, wsgiref.simple_server.WSGIServer):\n daemon_threads = True\n poll_response_timeout = 60\n\n def __init__(\n self,\n builder,\n host,\n port,\n root,\n mount_path=\"/\",\n build_delay=0.25,\n shutdown_delay=0.25,\n **kwargs,\n ):\n self.builder = builder\n self.server_name = host\n self.server_port = port\n self.root = os.path.abspath(root)\n self.mount_path = (\"/\" + mount_path.lstrip(\"/\")).rstrip(\"/\") + \"/\"\n self.url = f\"http://{self.server_name}:{self.server_port}{self.mount_path}\"\n self.build_delay = build_delay\n self.shutdown_delay = shutdown_delay\n # To allow custom error pages.\n self.error_handler = lambda code: None\n\n super().__init__((host, port), _Handler, **kwargs)\n self.set_app(self.serve_request)\n\n self._wanted_epoch = _timestamp() # The version of the site that started building.\n self._visible_epoch = self._wanted_epoch # Latest fully built version of the site.\n self._epoch_cond = threading.Condition() # Must be held when accessing _visible_epoch.\n\n self._to_rebuild = {} # Used as an ordered set of functions to call.\n self._rebuild_cond = threading.Condition() # Must be held when accessing _to_rebuild.\n\n self._shutdown = False\n self.serve_thread = threading.Thread(target=lambda: self.serve_forever(shutdown_delay))\n self.observer = watchdog.observers.Observer(timeout=shutdown_delay)\n\n def watch(self, path, func=None, recursive=True):\n \"\"\"Add the 'path' to watched paths, call the function and reload when any file changes under it.\"\"\"\n path = os.path.abspath(path)\n if func in (None, self.builder):\n func = self.builder\n else:\n warnings.warn(\n \"Plugins should not pass the 'func' parameter of watch(). \"\n \"The ability to execute custom callbacks will be removed soon.\",\n DeprecationWarning,\n stacklevel=2,\n )\n\n def callback(event, allowed_path=None):\n if isinstance(event, watchdog.events.DirCreatedEvent):\n return\n if allowed_path is not None and event.src_path != allowed_path:\n return\n # Text editors always cause a \"file close\" event in addition to \"modified\" when saving\n # a file. Some editors also have \"swap\" functionality that keeps writing into another\n # file that's never closed. Prevent such write events from causing a rebuild.\n if isinstance(event, watchdog.events.FileModifiedEvent):\n # But FileClosedEvent is implemented only on Linux, otherwise we mustn't skip this:\n if type(self.observer).__name__ == \"InotifyObserver\":\n return\n log.debug(str(event))\n with self._rebuild_cond:\n self._to_rebuild[func] = True\n self._rebuild_cond.notify_all()\n\n dir_handler = watchdog.events.FileSystemEventHandler()\n dir_handler.on_any_event = callback\n\n seen = set()\n\n def schedule(path):\n seen.add(path)\n if path.is_file():\n # Watchdog doesn't support watching files, so watch its directory and filter by path\n handler = watchdog.events.FileSystemEventHandler()\n handler.on_any_event = lambda event: callback(event, allowed_path=os.fspath(path))\n\n parent = path.parent\n log.debug(f\"Watching file '{path}' through directory '{parent}'\")\n self.observer.schedule(handler, parent)\n else:\n log.debug(f\"Watching directory '{path}'\")\n self.observer.schedule(dir_handler, path, recursive=recursive)\n\n schedule(pathlib.Path(path).resolve())\n\n def watch_symlink_targets(path_obj): # path is os.DirEntry or pathlib.Path\n if path_obj.is_symlink():\n path_obj = pathlib.Path(path_obj).resolve()\n if path_obj in seen or not path_obj.exists():\n return\n schedule(path_obj)\n\n if path_obj.is_dir() and recursive:\n with os.scandir(os.fspath(path_obj)) as scan:\n for entry in scan:\n watch_symlink_targets(entry)\n\n watch_symlink_targets(pathlib.Path(path))\n\n def serve(self):\n self.observer.start()\n\n log.info(f\"Serving on {self.url}\")\n self.serve_thread.start()\n\n self._build_loop()\n\n def _build_loop(self):\n while True:\n with self._rebuild_cond:\n while not self._rebuild_cond.wait_for(\n lambda: self._to_rebuild or self._shutdown, timeout=self.shutdown_delay\n ):\n # We could have used just one wait instead of a loop + timeout, but we need\n # occasional breaks, otherwise on Windows we can't receive KeyboardInterrupt.\n pass\n if self._shutdown:\n break\n log.info(\"Detected file changes\")\n while self._rebuild_cond.wait(timeout=self.build_delay):\n log.debug(\"Waiting for file changes to stop happening\")\n\n self._wanted_epoch = _timestamp()\n funcs = list(self._to_rebuild)\n self._to_rebuild.clear()\n\n for func in funcs:\n func()\n\n with self._epoch_cond:\n log.info(\"Reloading browsers\")\n self._visible_epoch = self._wanted_epoch\n self._epoch_cond.notify_all()\n\n def shutdown(self):\n self.observer.stop()\n with self._rebuild_cond:\n self._shutdown = True\n self._rebuild_cond.notify_all()\n\n if self.serve_thread.is_alive():\n super().shutdown()\n self.serve_thread.join()\n self.observer.join()\n\n def serve_request(self, environ, start_response):\n try:\n result = self._serve_request(environ, start_response)\n except Exception:\n code = 500\n msg = \"500 Internal Server Error\"\n log.exception(msg)\n else:\n if result is not None:\n return result\n code = 404\n msg = \"404 Not Found\"\n\n error_content = None\n try:\n error_content = self.error_handler(code)\n except Exception:\n log.exception(\"Failed to render an error message!\")\n if error_content is None:\n error_content = msg.encode()\n\n start_response(msg, [(\"Content-Type\", \"text/html\")])\n return [error_content]\n\n def _serve_request(self, environ, start_response):\n path = environ[\"PATH_INFO\"]\n\n m = re.fullmatch(r\"/livereload/([0-9]+)/[0-9]+\", path)\n if m:\n epoch = int(m[1])\n start_response(\"200 OK\", [(\"Content-Type\", \"text/plain\")])\n\n def condition():\n return self._visible_epoch > epoch\n\n with self._epoch_cond:\n if not condition():\n # Stall the browser, respond as soon as there's something new.\n # If there's not, respond anyway after a minute.\n self._log_poll_request(environ.get(\"HTTP_REFERER\"), request_id=path)\n self._epoch_cond.wait_for(condition, timeout=self.poll_response_timeout)\n return [b\"%d\" % self._visible_epoch]\n\n if path == \"/js/livereload.js\":\n file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"livereload.js\")\n elif path.startswith(self.mount_path):\n if path.endswith(\"/\"):\n path += \"index.html\"\n path = path[len(self.mount_path):]\n file_path = os.path.join(self.root, path.lstrip(\"/\"))\n elif path == \"/\":\n start_response(\"302 Found\", [(\"Location\", self.mount_path)])\n return []\n else:\n return None # Not found\n\n # Wait until the ongoing rebuild (if any) finishes, so we're not serving a half-built site.\n with self._epoch_cond:\n self._epoch_cond.wait_for(lambda: self._visible_epoch == self._wanted_epoch)\n epoch = self._visible_epoch\n\n try:\n file = open(file_path, \"rb\")\n except OSError:\n return None # Not found\n\n if path.endswith(\".html\"):\n with file:\n content = file.read()\n content = self._inject_js_into_html(content, epoch)\n file = io.BytesIO(content)\n content_length = len(content)\n else:\n content_length = os.path.getsize(file_path)\n\n content_type = self._guess_type(file_path)\n start_response(\n \"200 OK\", [(\"Content-Type\", content_type), (\"Content-Length\", str(content_length))]\n )\n return wsgiref.util.FileWrapper(file)\n\n @classmethod\n def _inject_js_into_html(cls, content, epoch):\n try:\n body_end = content.rindex(b\"</body>\")\n except ValueError:\n body_end = len(content)\n # The page will reload if the livereload poller returns a newer epoch than what it knows.\n # The other timestamp becomes just a unique identifier for the initiating page.\n return (\n b'%b<script src=\"/js/livereload.js\"></script><script>livereload(%d, %d);</script>%b'\n % (content[:body_end], epoch, _timestamp(), content[body_end:])\n )\n\n @classmethod\n @functools.lru_cache() # \"Cache\" to not repeat the same message for the same browser tab.\n def _log_poll_request(cls, url, request_id):\n log.info(f\"Browser connected: {url}\")\n\n def _guess_type(cls, path):\n # MkDocs only ensures a few common types (as seen in livereload_tests.py::test_mime_types).\n # Other uncommon types will not be accepted.\n if path.endswith((\".js\", \".JS\")):\n return \"application/javascript\"\n if path.endswith(\".gz\"):\n return \"application/gzip\"\n\n guess, _ = mimetypes.guess_type(path)\n if guess:\n return guess\n return \"application/octet-stream\"\n\n\nclass _Handler(wsgiref.simple_server.WSGIRequestHandler):\n def log_request(self, code=\"-\", size=\"-\"):\n level = logging.DEBUG if str(code) == \"200\" else logging.WARNING\n log.log(level, f'\"{self.requestline}\" code {code}')\n\n def log_message(self, format, *args):\n log.debug(format, *args)\n\n\ndef _timestamp():\n return round(time.monotonic() * 1000)\n", "path": "mkdocs/livereload/__init__.py"}]} | 3,609 | 405 |
gh_patches_debug_15547 | rasdani/github-patches | git_diff | open-telemetry__opentelemetry-python-977 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Change default port for OTLP exporter
By default the collector OTLP receiver is configured on port 55680, the current default port in the OTLP exporter is still using the old OpenCensus receiver port. This should be changed.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `exporter/opentelemetry-exporter-otlp/src/opentelemetry/exporter/otlp/__init__.py`
Content:
```
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15
16 """
17 This library allows to export tracing data to an OTLP collector.
18
19 Usage
20 -----
21
22 The **OTLP Span Exporter** allows to export `OpenTelemetry`_ traces to the
23 `OTLP`_ collector.
24
25
26 .. _OTLP: https://github.com/open-telemetry/opentelemetry-collector/
27 .. _OpenTelemetry: https://github.com/open-telemetry/opentelemetry-python/
28
29 .. code:: python
30
31 from opentelemetry import trace
32 from opentelemetry.exporter.otlp.trace_exporter import OTLPSpanExporter
33 from opentelemetry.sdk.resources import Resource
34 from opentelemetry.sdk.trace import TracerProvider
35 from opentelemetry.sdk.trace.export import BatchExportSpanProcessor
36
37 # Resource can be required for some backends, e.g. Jaeger
38 # If resource wouldn't be set - traces wouldn't appears in Jaeger
39 resource = Resource(labels=labels={
40 "service.name": "service"
41 })
42
43 trace.set_tracer_provider(TracerProvider(resource=resource)))
44 tracer = trace.get_tracer(__name__)
45
46 otlp_exporter = OTLPSpanExporter(endpoint="localhost:55678")
47
48 span_processor = BatchExportSpanProcessor(otlp_exporter)
49
50 trace.get_tracer_provider().add_span_processor(span_processor)
51
52 with tracer.start_as_current_span("foo"):
53 print("Hello world!")
54
55 API
56 ---
57 """
58
```
Path: `exporter/opentelemetry-exporter-otlp/src/opentelemetry/exporter/otlp/trace_exporter/__init__.py`
Content:
```
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """OTLP Span Exporter"""
16
17 import logging
18 from collections.abc import Mapping, Sequence
19 from time import sleep
20 from typing import Sequence as TypingSequence
21
22 from backoff import expo
23 from google.rpc.error_details_pb2 import RetryInfo
24 from grpc import (
25 ChannelCredentials,
26 RpcError,
27 StatusCode,
28 insecure_channel,
29 secure_channel,
30 )
31
32 from opentelemetry.proto.collector.trace.v1.trace_service_pb2 import (
33 ExportTraceServiceRequest,
34 )
35 from opentelemetry.proto.collector.trace.v1.trace_service_pb2_grpc import (
36 TraceServiceStub,
37 )
38 from opentelemetry.proto.common.v1.common_pb2 import AnyValue, KeyValue
39 from opentelemetry.proto.resource.v1.resource_pb2 import Resource
40 from opentelemetry.proto.trace.v1.trace_pb2 import (
41 InstrumentationLibrarySpans,
42 ResourceSpans,
43 )
44 from opentelemetry.proto.trace.v1.trace_pb2 import Span as CollectorSpan
45 from opentelemetry.proto.trace.v1.trace_pb2 import Status
46 from opentelemetry.sdk.trace import Span as SDKSpan
47 from opentelemetry.sdk.trace.export import SpanExporter, SpanExportResult
48
49 logger = logging.getLogger(__name__)
50
51
52 def _translate_key_values(key, value):
53
54 if isinstance(value, bool):
55 any_value = AnyValue(bool_value=value)
56
57 elif isinstance(value, str):
58 any_value = AnyValue(string_value=value)
59
60 elif isinstance(value, int):
61 any_value = AnyValue(int_value=value)
62
63 elif isinstance(value, float):
64 any_value = AnyValue(double_value=value)
65
66 elif isinstance(value, Sequence):
67 any_value = AnyValue(array_value=value)
68
69 elif isinstance(value, Mapping):
70 any_value = AnyValue(kvlist_value=value)
71
72 else:
73 raise Exception(
74 "Invalid type {} of value {}".format(type(value), value)
75 )
76
77 return KeyValue(key=key, value=any_value)
78
79
80 # pylint: disable=no-member
81 class OTLPSpanExporter(SpanExporter):
82 """OTLP span exporter
83
84 Args:
85 endpoint: OpenTelemetry Collector receiver endpoint
86 credentials: Credentials object for server authentication
87 metadata: Metadata to send when exporting
88 """
89
90 def __init__(
91 self,
92 endpoint="localhost:55678",
93 credentials: ChannelCredentials = None,
94 metadata=None,
95 ):
96 super().__init__()
97
98 self._metadata = metadata
99 self._collector_span_kwargs = None
100
101 if credentials is None:
102 self._client = TraceServiceStub(insecure_channel(endpoint))
103 else:
104 self._client = TraceServiceStub(
105 secure_channel(endpoint, credentials)
106 )
107
108 def _translate_name(self, sdk_span):
109 self._collector_span_kwargs["name"] = sdk_span.name
110
111 def _translate_start_time(self, sdk_span):
112 self._collector_span_kwargs[
113 "start_time_unix_nano"
114 ] = sdk_span.start_time
115
116 def _translate_end_time(self, sdk_span):
117 self._collector_span_kwargs["end_time_unix_nano"] = sdk_span.end_time
118
119 def _translate_span_id(self, sdk_span):
120 self._collector_span_kwargs[
121 "span_id"
122 ] = sdk_span.context.span_id.to_bytes(8, "big")
123
124 def _translate_trace_id(self, sdk_span):
125 self._collector_span_kwargs[
126 "trace_id"
127 ] = sdk_span.context.trace_id.to_bytes(16, "big")
128
129 def _translate_parent(self, sdk_span):
130 if sdk_span.parent is not None:
131 self._collector_span_kwargs[
132 "parent_span_id"
133 ] = sdk_span.parent.span_id.to_bytes(8, "big")
134
135 def _translate_context_trace_state(self, sdk_span):
136 if sdk_span.context.trace_state is not None:
137 self._collector_span_kwargs["trace_state"] = ",".join(
138 [
139 "{}={}".format(key, value)
140 for key, value in (sdk_span.context.trace_state.items())
141 ]
142 )
143
144 def _translate_attributes(self, sdk_span):
145 if sdk_span.attributes:
146
147 self._collector_span_kwargs["attributes"] = []
148
149 for key, value in sdk_span.attributes.items():
150
151 try:
152 self._collector_span_kwargs["attributes"].append(
153 _translate_key_values(key, value)
154 )
155 except Exception as error: # pylint: disable=broad-except
156 logger.exception(error)
157
158 def _translate_events(self, sdk_span):
159 if sdk_span.events:
160 self._collector_span_kwargs["events"] = []
161
162 for sdk_span_event in sdk_span.events:
163
164 collector_span_event = CollectorSpan.Event(
165 name=sdk_span_event.name,
166 time_unix_nano=sdk_span_event.timestamp,
167 )
168
169 for key, value in sdk_span_event.attributes.items():
170 try:
171 collector_span_event.attributes.append(
172 _translate_key_values(key, value)
173 )
174 # pylint: disable=broad-except
175 except Exception as error:
176 logger.exception(error)
177
178 self._collector_span_kwargs["events"].append(
179 collector_span_event
180 )
181
182 def _translate_links(self, sdk_span):
183 if sdk_span.links:
184 self._collector_span_kwargs["links"] = []
185
186 for sdk_span_link in sdk_span.links:
187
188 collector_span_link = CollectorSpan.Link(
189 trace_id=(
190 sdk_span_link.context.trace_id.to_bytes(16, "big")
191 ),
192 span_id=(sdk_span_link.context.span_id.to_bytes(8, "big")),
193 )
194
195 for key, value in sdk_span_link.attributes.items():
196 try:
197 collector_span_link.attributes.append(
198 _translate_key_values(key, value)
199 )
200 # pylint: disable=broad-except
201 except Exception as error:
202 logger.exception(error)
203
204 self._collector_span_kwargs["links"].append(
205 collector_span_link
206 )
207
208 def _translate_status(self, sdk_span):
209 if sdk_span.status is not None:
210 self._collector_span_kwargs["status"] = Status(
211 code=sdk_span.status.canonical_code.value,
212 message=sdk_span.status.description,
213 )
214
215 def _translate_spans(
216 self, sdk_spans: TypingSequence[SDKSpan],
217 ) -> ExportTraceServiceRequest:
218
219 sdk_resource_instrumentation_library_spans = {}
220
221 for sdk_span in sdk_spans:
222
223 if sdk_span.resource not in (
224 sdk_resource_instrumentation_library_spans.keys()
225 ):
226 sdk_resource_instrumentation_library_spans[
227 sdk_span.resource
228 ] = InstrumentationLibrarySpans()
229
230 self._collector_span_kwargs = {}
231
232 self._translate_name(sdk_span)
233 self._translate_start_time(sdk_span)
234 self._translate_end_time(sdk_span)
235 self._translate_span_id(sdk_span)
236 self._translate_trace_id(sdk_span)
237 self._translate_parent(sdk_span)
238 self._translate_context_trace_state(sdk_span)
239 self._translate_attributes(sdk_span)
240 self._translate_events(sdk_span)
241 self._translate_links(sdk_span)
242 self._translate_status(sdk_span)
243
244 self._collector_span_kwargs["kind"] = getattr(
245 CollectorSpan.SpanKind, sdk_span.kind.name
246 )
247
248 sdk_resource_instrumentation_library_spans[
249 sdk_span.resource
250 ].spans.append(CollectorSpan(**self._collector_span_kwargs))
251
252 resource_spans = []
253
254 for (
255 sdk_resource,
256 instrumentation_library_spans,
257 ) in sdk_resource_instrumentation_library_spans.items():
258
259 collector_resource = Resource()
260
261 for key, value in sdk_resource.labels.items():
262
263 try:
264 collector_resource.attributes.append(
265 _translate_key_values(key, value)
266 )
267 except Exception as error: # pylint: disable=broad-except
268 logger.exception(error)
269
270 resource_spans.append(
271 ResourceSpans(
272 resource=collector_resource,
273 instrumentation_library_spans=[
274 instrumentation_library_spans
275 ],
276 )
277 )
278
279 return ExportTraceServiceRequest(resource_spans=resource_spans)
280
281 def export(self, spans: TypingSequence[SDKSpan]) -> SpanExportResult:
282 # expo returns a generator that yields delay values which grow
283 # exponentially. Once delay is greater than max_value, the yielded
284 # value will remain constant.
285 # max_value is set to 900 (900 seconds is 15 minutes) to use the same
286 # value as used in the Go implementation.
287
288 max_value = 900
289
290 for delay in expo(max_value=max_value):
291
292 if delay == max_value:
293 return SpanExportResult.FAILURE
294
295 try:
296 self._client.Export(
297 request=self._translate_spans(spans),
298 metadata=self._metadata,
299 )
300
301 return SpanExportResult.SUCCESS
302
303 except RpcError as error:
304
305 if error.code() in [
306 StatusCode.CANCELLED,
307 StatusCode.DEADLINE_EXCEEDED,
308 StatusCode.PERMISSION_DENIED,
309 StatusCode.UNAUTHENTICATED,
310 StatusCode.RESOURCE_EXHAUSTED,
311 StatusCode.ABORTED,
312 StatusCode.OUT_OF_RANGE,
313 StatusCode.UNAVAILABLE,
314 StatusCode.DATA_LOSS,
315 ]:
316
317 retry_info_bin = dict(error.trailing_metadata()).get(
318 "google.rpc.retryinfo-bin"
319 )
320 if retry_info_bin is not None:
321 retry_info = RetryInfo()
322 retry_info.ParseFromString(retry_info_bin)
323 delay = (
324 retry_info.retry_delay.seconds
325 + retry_info.retry_delay.nanos / 1.0e9
326 )
327
328 logger.debug("Waiting %ss before retrying export of span")
329 sleep(delay)
330 continue
331
332 if error.code() == StatusCode.OK:
333 return SpanExportResult.SUCCESS
334
335 return SpanExportResult.FAILURE
336
337 return SpanExportResult.FAILURE
338
339 def shutdown(self):
340 pass
341
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/exporter/opentelemetry-exporter-otlp/src/opentelemetry/exporter/otlp/__init__.py b/exporter/opentelemetry-exporter-otlp/src/opentelemetry/exporter/otlp/__init__.py
--- a/exporter/opentelemetry-exporter-otlp/src/opentelemetry/exporter/otlp/__init__.py
+++ b/exporter/opentelemetry-exporter-otlp/src/opentelemetry/exporter/otlp/__init__.py
@@ -43,7 +43,7 @@
trace.set_tracer_provider(TracerProvider(resource=resource)))
tracer = trace.get_tracer(__name__)
- otlp_exporter = OTLPSpanExporter(endpoint="localhost:55678")
+ otlp_exporter = OTLPSpanExporter(endpoint="localhost:55680")
span_processor = BatchExportSpanProcessor(otlp_exporter)
diff --git a/exporter/opentelemetry-exporter-otlp/src/opentelemetry/exporter/otlp/trace_exporter/__init__.py b/exporter/opentelemetry-exporter-otlp/src/opentelemetry/exporter/otlp/trace_exporter/__init__.py
--- a/exporter/opentelemetry-exporter-otlp/src/opentelemetry/exporter/otlp/trace_exporter/__init__.py
+++ b/exporter/opentelemetry-exporter-otlp/src/opentelemetry/exporter/otlp/trace_exporter/__init__.py
@@ -89,7 +89,7 @@
def __init__(
self,
- endpoint="localhost:55678",
+ endpoint="localhost:55680",
credentials: ChannelCredentials = None,
metadata=None,
):
| {"golden_diff": "diff --git a/exporter/opentelemetry-exporter-otlp/src/opentelemetry/exporter/otlp/__init__.py b/exporter/opentelemetry-exporter-otlp/src/opentelemetry/exporter/otlp/__init__.py\n--- a/exporter/opentelemetry-exporter-otlp/src/opentelemetry/exporter/otlp/__init__.py\n+++ b/exporter/opentelemetry-exporter-otlp/src/opentelemetry/exporter/otlp/__init__.py\n@@ -43,7 +43,7 @@\n trace.set_tracer_provider(TracerProvider(resource=resource)))\n tracer = trace.get_tracer(__name__)\n \n- otlp_exporter = OTLPSpanExporter(endpoint=\"localhost:55678\")\n+ otlp_exporter = OTLPSpanExporter(endpoint=\"localhost:55680\")\n \n span_processor = BatchExportSpanProcessor(otlp_exporter)\n \ndiff --git a/exporter/opentelemetry-exporter-otlp/src/opentelemetry/exporter/otlp/trace_exporter/__init__.py b/exporter/opentelemetry-exporter-otlp/src/opentelemetry/exporter/otlp/trace_exporter/__init__.py\n--- a/exporter/opentelemetry-exporter-otlp/src/opentelemetry/exporter/otlp/trace_exporter/__init__.py\n+++ b/exporter/opentelemetry-exporter-otlp/src/opentelemetry/exporter/otlp/trace_exporter/__init__.py\n@@ -89,7 +89,7 @@\n \n def __init__(\n self,\n- endpoint=\"localhost:55678\",\n+ endpoint=\"localhost:55680\",\n credentials: ChannelCredentials = None,\n metadata=None,\n ):\n", "issue": "Change default port for OTLP exporter\nBy default the collector OTLP receiver is configured on port 55680, the current default port in the OTLP exporter is still using the old OpenCensus receiver port. This should be changed.\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n\"\"\"\nThis library allows to export tracing data to an OTLP collector.\n\nUsage\n-----\n\nThe **OTLP Span Exporter** allows to export `OpenTelemetry`_ traces to the\n`OTLP`_ collector.\n\n\n.. _OTLP: https://github.com/open-telemetry/opentelemetry-collector/\n.. _OpenTelemetry: https://github.com/open-telemetry/opentelemetry-python/\n\n.. code:: python\n\n from opentelemetry import trace\n from opentelemetry.exporter.otlp.trace_exporter import OTLPSpanExporter\n from opentelemetry.sdk.resources import Resource\n from opentelemetry.sdk.trace import TracerProvider\n from opentelemetry.sdk.trace.export import BatchExportSpanProcessor\n\n # Resource can be required for some backends, e.g. Jaeger\n # If resource wouldn't be set - traces wouldn't appears in Jaeger\n resource = Resource(labels=labels={\n \"service.name\": \"service\"\n })\n\n trace.set_tracer_provider(TracerProvider(resource=resource)))\n tracer = trace.get_tracer(__name__)\n\n otlp_exporter = OTLPSpanExporter(endpoint=\"localhost:55678\")\n\n span_processor = BatchExportSpanProcessor(otlp_exporter)\n\n trace.get_tracer_provider().add_span_processor(span_processor)\n\n with tracer.start_as_current_span(\"foo\"):\n print(\"Hello world!\")\n\nAPI\n---\n\"\"\"\n", "path": "exporter/opentelemetry-exporter-otlp/src/opentelemetry/exporter/otlp/__init__.py"}, {"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"OTLP Span Exporter\"\"\"\n\nimport logging\nfrom collections.abc import Mapping, Sequence\nfrom time import sleep\nfrom typing import Sequence as TypingSequence\n\nfrom backoff import expo\nfrom google.rpc.error_details_pb2 import RetryInfo\nfrom grpc import (\n ChannelCredentials,\n RpcError,\n StatusCode,\n insecure_channel,\n secure_channel,\n)\n\nfrom opentelemetry.proto.collector.trace.v1.trace_service_pb2 import (\n ExportTraceServiceRequest,\n)\nfrom opentelemetry.proto.collector.trace.v1.trace_service_pb2_grpc import (\n TraceServiceStub,\n)\nfrom opentelemetry.proto.common.v1.common_pb2 import AnyValue, KeyValue\nfrom opentelemetry.proto.resource.v1.resource_pb2 import Resource\nfrom opentelemetry.proto.trace.v1.trace_pb2 import (\n InstrumentationLibrarySpans,\n ResourceSpans,\n)\nfrom opentelemetry.proto.trace.v1.trace_pb2 import Span as CollectorSpan\nfrom opentelemetry.proto.trace.v1.trace_pb2 import Status\nfrom opentelemetry.sdk.trace import Span as SDKSpan\nfrom opentelemetry.sdk.trace.export import SpanExporter, SpanExportResult\n\nlogger = logging.getLogger(__name__)\n\n\ndef _translate_key_values(key, value):\n\n if isinstance(value, bool):\n any_value = AnyValue(bool_value=value)\n\n elif isinstance(value, str):\n any_value = AnyValue(string_value=value)\n\n elif isinstance(value, int):\n any_value = AnyValue(int_value=value)\n\n elif isinstance(value, float):\n any_value = AnyValue(double_value=value)\n\n elif isinstance(value, Sequence):\n any_value = AnyValue(array_value=value)\n\n elif isinstance(value, Mapping):\n any_value = AnyValue(kvlist_value=value)\n\n else:\n raise Exception(\n \"Invalid type {} of value {}\".format(type(value), value)\n )\n\n return KeyValue(key=key, value=any_value)\n\n\n# pylint: disable=no-member\nclass OTLPSpanExporter(SpanExporter):\n \"\"\"OTLP span exporter\n\n Args:\n endpoint: OpenTelemetry Collector receiver endpoint\n credentials: Credentials object for server authentication\n metadata: Metadata to send when exporting\n \"\"\"\n\n def __init__(\n self,\n endpoint=\"localhost:55678\",\n credentials: ChannelCredentials = None,\n metadata=None,\n ):\n super().__init__()\n\n self._metadata = metadata\n self._collector_span_kwargs = None\n\n if credentials is None:\n self._client = TraceServiceStub(insecure_channel(endpoint))\n else:\n self._client = TraceServiceStub(\n secure_channel(endpoint, credentials)\n )\n\n def _translate_name(self, sdk_span):\n self._collector_span_kwargs[\"name\"] = sdk_span.name\n\n def _translate_start_time(self, sdk_span):\n self._collector_span_kwargs[\n \"start_time_unix_nano\"\n ] = sdk_span.start_time\n\n def _translate_end_time(self, sdk_span):\n self._collector_span_kwargs[\"end_time_unix_nano\"] = sdk_span.end_time\n\n def _translate_span_id(self, sdk_span):\n self._collector_span_kwargs[\n \"span_id\"\n ] = sdk_span.context.span_id.to_bytes(8, \"big\")\n\n def _translate_trace_id(self, sdk_span):\n self._collector_span_kwargs[\n \"trace_id\"\n ] = sdk_span.context.trace_id.to_bytes(16, \"big\")\n\n def _translate_parent(self, sdk_span):\n if sdk_span.parent is not None:\n self._collector_span_kwargs[\n \"parent_span_id\"\n ] = sdk_span.parent.span_id.to_bytes(8, \"big\")\n\n def _translate_context_trace_state(self, sdk_span):\n if sdk_span.context.trace_state is not None:\n self._collector_span_kwargs[\"trace_state\"] = \",\".join(\n [\n \"{}={}\".format(key, value)\n for key, value in (sdk_span.context.trace_state.items())\n ]\n )\n\n def _translate_attributes(self, sdk_span):\n if sdk_span.attributes:\n\n self._collector_span_kwargs[\"attributes\"] = []\n\n for key, value in sdk_span.attributes.items():\n\n try:\n self._collector_span_kwargs[\"attributes\"].append(\n _translate_key_values(key, value)\n )\n except Exception as error: # pylint: disable=broad-except\n logger.exception(error)\n\n def _translate_events(self, sdk_span):\n if sdk_span.events:\n self._collector_span_kwargs[\"events\"] = []\n\n for sdk_span_event in sdk_span.events:\n\n collector_span_event = CollectorSpan.Event(\n name=sdk_span_event.name,\n time_unix_nano=sdk_span_event.timestamp,\n )\n\n for key, value in sdk_span_event.attributes.items():\n try:\n collector_span_event.attributes.append(\n _translate_key_values(key, value)\n )\n # pylint: disable=broad-except\n except Exception as error:\n logger.exception(error)\n\n self._collector_span_kwargs[\"events\"].append(\n collector_span_event\n )\n\n def _translate_links(self, sdk_span):\n if sdk_span.links:\n self._collector_span_kwargs[\"links\"] = []\n\n for sdk_span_link in sdk_span.links:\n\n collector_span_link = CollectorSpan.Link(\n trace_id=(\n sdk_span_link.context.trace_id.to_bytes(16, \"big\")\n ),\n span_id=(sdk_span_link.context.span_id.to_bytes(8, \"big\")),\n )\n\n for key, value in sdk_span_link.attributes.items():\n try:\n collector_span_link.attributes.append(\n _translate_key_values(key, value)\n )\n # pylint: disable=broad-except\n except Exception as error:\n logger.exception(error)\n\n self._collector_span_kwargs[\"links\"].append(\n collector_span_link\n )\n\n def _translate_status(self, sdk_span):\n if sdk_span.status is not None:\n self._collector_span_kwargs[\"status\"] = Status(\n code=sdk_span.status.canonical_code.value,\n message=sdk_span.status.description,\n )\n\n def _translate_spans(\n self, sdk_spans: TypingSequence[SDKSpan],\n ) -> ExportTraceServiceRequest:\n\n sdk_resource_instrumentation_library_spans = {}\n\n for sdk_span in sdk_spans:\n\n if sdk_span.resource not in (\n sdk_resource_instrumentation_library_spans.keys()\n ):\n sdk_resource_instrumentation_library_spans[\n sdk_span.resource\n ] = InstrumentationLibrarySpans()\n\n self._collector_span_kwargs = {}\n\n self._translate_name(sdk_span)\n self._translate_start_time(sdk_span)\n self._translate_end_time(sdk_span)\n self._translate_span_id(sdk_span)\n self._translate_trace_id(sdk_span)\n self._translate_parent(sdk_span)\n self._translate_context_trace_state(sdk_span)\n self._translate_attributes(sdk_span)\n self._translate_events(sdk_span)\n self._translate_links(sdk_span)\n self._translate_status(sdk_span)\n\n self._collector_span_kwargs[\"kind\"] = getattr(\n CollectorSpan.SpanKind, sdk_span.kind.name\n )\n\n sdk_resource_instrumentation_library_spans[\n sdk_span.resource\n ].spans.append(CollectorSpan(**self._collector_span_kwargs))\n\n resource_spans = []\n\n for (\n sdk_resource,\n instrumentation_library_spans,\n ) in sdk_resource_instrumentation_library_spans.items():\n\n collector_resource = Resource()\n\n for key, value in sdk_resource.labels.items():\n\n try:\n collector_resource.attributes.append(\n _translate_key_values(key, value)\n )\n except Exception as error: # pylint: disable=broad-except\n logger.exception(error)\n\n resource_spans.append(\n ResourceSpans(\n resource=collector_resource,\n instrumentation_library_spans=[\n instrumentation_library_spans\n ],\n )\n )\n\n return ExportTraceServiceRequest(resource_spans=resource_spans)\n\n def export(self, spans: TypingSequence[SDKSpan]) -> SpanExportResult:\n # expo returns a generator that yields delay values which grow\n # exponentially. Once delay is greater than max_value, the yielded\n # value will remain constant.\n # max_value is set to 900 (900 seconds is 15 minutes) to use the same\n # value as used in the Go implementation.\n\n max_value = 900\n\n for delay in expo(max_value=max_value):\n\n if delay == max_value:\n return SpanExportResult.FAILURE\n\n try:\n self._client.Export(\n request=self._translate_spans(spans),\n metadata=self._metadata,\n )\n\n return SpanExportResult.SUCCESS\n\n except RpcError as error:\n\n if error.code() in [\n StatusCode.CANCELLED,\n StatusCode.DEADLINE_EXCEEDED,\n StatusCode.PERMISSION_DENIED,\n StatusCode.UNAUTHENTICATED,\n StatusCode.RESOURCE_EXHAUSTED,\n StatusCode.ABORTED,\n StatusCode.OUT_OF_RANGE,\n StatusCode.UNAVAILABLE,\n StatusCode.DATA_LOSS,\n ]:\n\n retry_info_bin = dict(error.trailing_metadata()).get(\n \"google.rpc.retryinfo-bin\"\n )\n if retry_info_bin is not None:\n retry_info = RetryInfo()\n retry_info.ParseFromString(retry_info_bin)\n delay = (\n retry_info.retry_delay.seconds\n + retry_info.retry_delay.nanos / 1.0e9\n )\n\n logger.debug(\"Waiting %ss before retrying export of span\")\n sleep(delay)\n continue\n\n if error.code() == StatusCode.OK:\n return SpanExportResult.SUCCESS\n\n return SpanExportResult.FAILURE\n\n return SpanExportResult.FAILURE\n\n def shutdown(self):\n pass\n", "path": "exporter/opentelemetry-exporter-otlp/src/opentelemetry/exporter/otlp/trace_exporter/__init__.py"}], "after_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n\"\"\"\nThis library allows to export tracing data to an OTLP collector.\n\nUsage\n-----\n\nThe **OTLP Span Exporter** allows to export `OpenTelemetry`_ traces to the\n`OTLP`_ collector.\n\n\n.. _OTLP: https://github.com/open-telemetry/opentelemetry-collector/\n.. _OpenTelemetry: https://github.com/open-telemetry/opentelemetry-python/\n\n.. code:: python\n\n from opentelemetry import trace\n from opentelemetry.exporter.otlp.trace_exporter import OTLPSpanExporter\n from opentelemetry.sdk.resources import Resource\n from opentelemetry.sdk.trace import TracerProvider\n from opentelemetry.sdk.trace.export import BatchExportSpanProcessor\n\n # Resource can be required for some backends, e.g. Jaeger\n # If resource wouldn't be set - traces wouldn't appears in Jaeger\n resource = Resource(labels=labels={\n \"service.name\": \"service\"\n })\n\n trace.set_tracer_provider(TracerProvider(resource=resource)))\n tracer = trace.get_tracer(__name__)\n\n otlp_exporter = OTLPSpanExporter(endpoint=\"localhost:55680\")\n\n span_processor = BatchExportSpanProcessor(otlp_exporter)\n\n trace.get_tracer_provider().add_span_processor(span_processor)\n\n with tracer.start_as_current_span(\"foo\"):\n print(\"Hello world!\")\n\nAPI\n---\n\"\"\"\n", "path": "exporter/opentelemetry-exporter-otlp/src/opentelemetry/exporter/otlp/__init__.py"}, {"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"OTLP Span Exporter\"\"\"\n\nimport logging\nfrom collections.abc import Mapping, Sequence\nfrom time import sleep\nfrom typing import Sequence as TypingSequence\n\nfrom backoff import expo\nfrom google.rpc.error_details_pb2 import RetryInfo\nfrom grpc import (\n ChannelCredentials,\n RpcError,\n StatusCode,\n insecure_channel,\n secure_channel,\n)\n\nfrom opentelemetry.proto.collector.trace.v1.trace_service_pb2 import (\n ExportTraceServiceRequest,\n)\nfrom opentelemetry.proto.collector.trace.v1.trace_service_pb2_grpc import (\n TraceServiceStub,\n)\nfrom opentelemetry.proto.common.v1.common_pb2 import AnyValue, KeyValue\nfrom opentelemetry.proto.resource.v1.resource_pb2 import Resource\nfrom opentelemetry.proto.trace.v1.trace_pb2 import (\n InstrumentationLibrarySpans,\n ResourceSpans,\n)\nfrom opentelemetry.proto.trace.v1.trace_pb2 import Span as CollectorSpan\nfrom opentelemetry.proto.trace.v1.trace_pb2 import Status\nfrom opentelemetry.sdk.trace import Span as SDKSpan\nfrom opentelemetry.sdk.trace.export import SpanExporter, SpanExportResult\n\nlogger = logging.getLogger(__name__)\n\n\ndef _translate_key_values(key, value):\n\n if isinstance(value, bool):\n any_value = AnyValue(bool_value=value)\n\n elif isinstance(value, str):\n any_value = AnyValue(string_value=value)\n\n elif isinstance(value, int):\n any_value = AnyValue(int_value=value)\n\n elif isinstance(value, float):\n any_value = AnyValue(double_value=value)\n\n elif isinstance(value, Sequence):\n any_value = AnyValue(array_value=value)\n\n elif isinstance(value, Mapping):\n any_value = AnyValue(kvlist_value=value)\n\n else:\n raise Exception(\n \"Invalid type {} of value {}\".format(type(value), value)\n )\n\n return KeyValue(key=key, value=any_value)\n\n\n# pylint: disable=no-member\nclass OTLPSpanExporter(SpanExporter):\n \"\"\"OTLP span exporter\n\n Args:\n endpoint: OpenTelemetry Collector receiver endpoint\n credentials: Credentials object for server authentication\n metadata: Metadata to send when exporting\n \"\"\"\n\n def __init__(\n self,\n endpoint=\"localhost:55680\",\n credentials: ChannelCredentials = None,\n metadata=None,\n ):\n super().__init__()\n\n self._metadata = metadata\n self._collector_span_kwargs = None\n\n if credentials is None:\n self._client = TraceServiceStub(insecure_channel(endpoint))\n else:\n self._client = TraceServiceStub(\n secure_channel(endpoint, credentials)\n )\n\n def _translate_name(self, sdk_span):\n self._collector_span_kwargs[\"name\"] = sdk_span.name\n\n def _translate_start_time(self, sdk_span):\n self._collector_span_kwargs[\n \"start_time_unix_nano\"\n ] = sdk_span.start_time\n\n def _translate_end_time(self, sdk_span):\n self._collector_span_kwargs[\"end_time_unix_nano\"] = sdk_span.end_time\n\n def _translate_span_id(self, sdk_span):\n self._collector_span_kwargs[\n \"span_id\"\n ] = sdk_span.context.span_id.to_bytes(8, \"big\")\n\n def _translate_trace_id(self, sdk_span):\n self._collector_span_kwargs[\n \"trace_id\"\n ] = sdk_span.context.trace_id.to_bytes(16, \"big\")\n\n def _translate_parent(self, sdk_span):\n if sdk_span.parent is not None:\n self._collector_span_kwargs[\n \"parent_span_id\"\n ] = sdk_span.parent.span_id.to_bytes(8, \"big\")\n\n def _translate_context_trace_state(self, sdk_span):\n if sdk_span.context.trace_state is not None:\n self._collector_span_kwargs[\"trace_state\"] = \",\".join(\n [\n \"{}={}\".format(key, value)\n for key, value in (sdk_span.context.trace_state.items())\n ]\n )\n\n def _translate_attributes(self, sdk_span):\n if sdk_span.attributes:\n\n self._collector_span_kwargs[\"attributes\"] = []\n\n for key, value in sdk_span.attributes.items():\n\n try:\n self._collector_span_kwargs[\"attributes\"].append(\n _translate_key_values(key, value)\n )\n except Exception as error: # pylint: disable=broad-except\n logger.exception(error)\n\n def _translate_events(self, sdk_span):\n if sdk_span.events:\n self._collector_span_kwargs[\"events\"] = []\n\n for sdk_span_event in sdk_span.events:\n\n collector_span_event = CollectorSpan.Event(\n name=sdk_span_event.name,\n time_unix_nano=sdk_span_event.timestamp,\n )\n\n for key, value in sdk_span_event.attributes.items():\n try:\n collector_span_event.attributes.append(\n _translate_key_values(key, value)\n )\n # pylint: disable=broad-except\n except Exception as error:\n logger.exception(error)\n\n self._collector_span_kwargs[\"events\"].append(\n collector_span_event\n )\n\n def _translate_links(self, sdk_span):\n if sdk_span.links:\n self._collector_span_kwargs[\"links\"] = []\n\n for sdk_span_link in sdk_span.links:\n\n collector_span_link = CollectorSpan.Link(\n trace_id=(\n sdk_span_link.context.trace_id.to_bytes(16, \"big\")\n ),\n span_id=(sdk_span_link.context.span_id.to_bytes(8, \"big\")),\n )\n\n for key, value in sdk_span_link.attributes.items():\n try:\n collector_span_link.attributes.append(\n _translate_key_values(key, value)\n )\n # pylint: disable=broad-except\n except Exception as error:\n logger.exception(error)\n\n self._collector_span_kwargs[\"links\"].append(\n collector_span_link\n )\n\n def _translate_status(self, sdk_span):\n if sdk_span.status is not None:\n self._collector_span_kwargs[\"status\"] = Status(\n code=sdk_span.status.canonical_code.value,\n message=sdk_span.status.description,\n )\n\n def _translate_spans(\n self, sdk_spans: TypingSequence[SDKSpan],\n ) -> ExportTraceServiceRequest:\n\n sdk_resource_instrumentation_library_spans = {}\n\n for sdk_span in sdk_spans:\n\n if sdk_span.resource not in (\n sdk_resource_instrumentation_library_spans.keys()\n ):\n sdk_resource_instrumentation_library_spans[\n sdk_span.resource\n ] = InstrumentationLibrarySpans()\n\n self._collector_span_kwargs = {}\n\n self._translate_name(sdk_span)\n self._translate_start_time(sdk_span)\n self._translate_end_time(sdk_span)\n self._translate_span_id(sdk_span)\n self._translate_trace_id(sdk_span)\n self._translate_parent(sdk_span)\n self._translate_context_trace_state(sdk_span)\n self._translate_attributes(sdk_span)\n self._translate_events(sdk_span)\n self._translate_links(sdk_span)\n self._translate_status(sdk_span)\n\n self._collector_span_kwargs[\"kind\"] = getattr(\n CollectorSpan.SpanKind, sdk_span.kind.name\n )\n\n sdk_resource_instrumentation_library_spans[\n sdk_span.resource\n ].spans.append(CollectorSpan(**self._collector_span_kwargs))\n\n resource_spans = []\n\n for (\n sdk_resource,\n instrumentation_library_spans,\n ) in sdk_resource_instrumentation_library_spans.items():\n\n collector_resource = Resource()\n\n for key, value in sdk_resource.labels.items():\n\n try:\n collector_resource.attributes.append(\n _translate_key_values(key, value)\n )\n except Exception as error: # pylint: disable=broad-except\n logger.exception(error)\n\n resource_spans.append(\n ResourceSpans(\n resource=collector_resource,\n instrumentation_library_spans=[\n instrumentation_library_spans\n ],\n )\n )\n\n return ExportTraceServiceRequest(resource_spans=resource_spans)\n\n def export(self, spans: TypingSequence[SDKSpan]) -> SpanExportResult:\n # expo returns a generator that yields delay values which grow\n # exponentially. Once delay is greater than max_value, the yielded\n # value will remain constant.\n # max_value is set to 900 (900 seconds is 15 minutes) to use the same\n # value as used in the Go implementation.\n\n max_value = 900\n\n for delay in expo(max_value=max_value):\n\n if delay == max_value:\n return SpanExportResult.FAILURE\n\n try:\n self._client.Export(\n request=self._translate_spans(spans),\n metadata=self._metadata,\n )\n\n return SpanExportResult.SUCCESS\n\n except RpcError as error:\n\n if error.code() in [\n StatusCode.CANCELLED,\n StatusCode.DEADLINE_EXCEEDED,\n StatusCode.PERMISSION_DENIED,\n StatusCode.UNAUTHENTICATED,\n StatusCode.RESOURCE_EXHAUSTED,\n StatusCode.ABORTED,\n StatusCode.OUT_OF_RANGE,\n StatusCode.UNAVAILABLE,\n StatusCode.DATA_LOSS,\n ]:\n\n retry_info_bin = dict(error.trailing_metadata()).get(\n \"google.rpc.retryinfo-bin\"\n )\n if retry_info_bin is not None:\n retry_info = RetryInfo()\n retry_info.ParseFromString(retry_info_bin)\n delay = (\n retry_info.retry_delay.seconds\n + retry_info.retry_delay.nanos / 1.0e9\n )\n\n logger.debug(\"Waiting %ss before retrying export of span\")\n sleep(delay)\n continue\n\n if error.code() == StatusCode.OK:\n return SpanExportResult.SUCCESS\n\n return SpanExportResult.FAILURE\n\n return SpanExportResult.FAILURE\n\n def shutdown(self):\n pass\n", "path": "exporter/opentelemetry-exporter-otlp/src/opentelemetry/exporter/otlp/trace_exporter/__init__.py"}]} | 4,057 | 382 |
gh_patches_debug_4165 | rasdani/github-patches | git_diff | ivy-llc__ivy-14979 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
extract
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ivy/functional/frontends/numpy/sorting_searching_counting/searching.py`
Content:
```
1 # local
2
3 import ivy
4
5 from ivy.functional.frontends.numpy import promote_types_of_numpy_inputs
6
7 from ivy.functional.frontends.numpy.func_wrapper import (
8 to_ivy_arrays_and_back,
9 from_zero_dim_arrays_to_scalar,
10 handle_numpy_out,
11 )
12
13
14 @to_ivy_arrays_and_back
15 def where(cond, x1=None, x2=None, /):
16 if x1 is None and x2 is None:
17 # numpy where behaves as np.asarray(condition).nonzero() when x and y
18 # not included
19 return ivy.asarray(cond).nonzero()
20 elif x1 is not None and x2 is not None:
21 x1, x2 = promote_types_of_numpy_inputs(x1, x2)
22 return ivy.where(cond, x1, x2)
23 else:
24 raise ivy.utils.exceptions.IvyException("where takes either 1 or 3 arguments")
25
26
27 @to_ivy_arrays_and_back
28 def nonzero(a):
29 return ivy.nonzero(a)
30
31
32 @handle_numpy_out
33 @to_ivy_arrays_and_back
34 @from_zero_dim_arrays_to_scalar
35 def argmin(a, /, *, axis=None, keepdims=False, out=None):
36 return ivy.argmin(a, axis=axis, out=out, keepdims=keepdims)
37
38
39 @handle_numpy_out
40 @to_ivy_arrays_and_back
41 @from_zero_dim_arrays_to_scalar
42 def argmax(
43 a,
44 /,
45 *,
46 axis=None,
47 out=None,
48 keepdims=False,
49 ):
50 return ivy.argmax(a, axis=axis, out=out, keepdims=keepdims)
51
52
53 @to_ivy_arrays_and_back
54 def flatnonzero(a):
55 return ivy.nonzero(ivy.reshape(a, (-1,)))
56
57
58 @to_ivy_arrays_and_back
59 def searchsorted(a, v, side="left", sorter=None):
60 return ivy.searchsorted(a, v, side=side, sorter=sorter)
61
62
63 @to_ivy_arrays_and_back
64 def argwhere(a):
65 return ivy.argwhere(a)
66
67
68 # nanargmin and nanargmax composition helper
69 def _nanargminmax(a, axis=None):
70 # check nans
71 nans = ivy.isnan(a).astype(ivy.bool)
72 # replace nans with inf
73 a = ivy.where(nans, ivy.inf, a)
74 if nans is not None:
75 nans = ivy.all(nans, axis=axis)
76 if ivy.any(nans):
77 raise ivy.utils.exceptions.IvyError("All-NaN slice encountered")
78 return a
79
80
81 @handle_numpy_out
82 @to_ivy_arrays_and_back
83 @from_zero_dim_arrays_to_scalar
84 def nanargmax(a, /, *, axis=None, out=None, keepdims=False):
85 a = _nanargminmax(a, axis=axis)
86 return ivy.argmax(a, axis=axis, keepdims=keepdims, out=out)
87
88
89 @handle_numpy_out
90 @to_ivy_arrays_and_back
91 @from_zero_dim_arrays_to_scalar
92 def nanargmin(a, /, *, axis=None, out=None, keepdims=False):
93 a = _nanargminmax(a, axis=axis)
94 return ivy.argmin(a, axis=axis, keepdims=keepdims, out=out)
95
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ivy/functional/frontends/numpy/sorting_searching_counting/searching.py b/ivy/functional/frontends/numpy/sorting_searching_counting/searching.py
--- a/ivy/functional/frontends/numpy/sorting_searching_counting/searching.py
+++ b/ivy/functional/frontends/numpy/sorting_searching_counting/searching.py
@@ -92,3 +92,11 @@
def nanargmin(a, /, *, axis=None, out=None, keepdims=False):
a = _nanargminmax(a, axis=axis)
return ivy.argmin(a, axis=axis, keepdims=keepdims, out=out)
+
+
+@to_ivy_arrays_and_back
+def extract(cond, arr, /):
+ if cond.dtype == 'bool':
+ return arr[cond]
+ else:
+ return arr[cond !=0]
\ No newline at end of file
| {"golden_diff": "diff --git a/ivy/functional/frontends/numpy/sorting_searching_counting/searching.py b/ivy/functional/frontends/numpy/sorting_searching_counting/searching.py\n--- a/ivy/functional/frontends/numpy/sorting_searching_counting/searching.py\n+++ b/ivy/functional/frontends/numpy/sorting_searching_counting/searching.py\n@@ -92,3 +92,11 @@\n def nanargmin(a, /, *, axis=None, out=None, keepdims=False):\n a = _nanargminmax(a, axis=axis)\n return ivy.argmin(a, axis=axis, keepdims=keepdims, out=out)\n+\n+\n+@to_ivy_arrays_and_back\n+def extract(cond, arr, /):\n+ if cond.dtype == 'bool':\n+ return arr[cond]\n+ else:\n+ return arr[cond !=0]\n\\ No newline at end of file\n", "issue": "extract\n\n", "before_files": [{"content": "# local\n\nimport ivy\n\nfrom ivy.functional.frontends.numpy import promote_types_of_numpy_inputs\n\nfrom ivy.functional.frontends.numpy.func_wrapper import (\n to_ivy_arrays_and_back,\n from_zero_dim_arrays_to_scalar,\n handle_numpy_out,\n)\n\n\n@to_ivy_arrays_and_back\ndef where(cond, x1=None, x2=None, /):\n if x1 is None and x2 is None:\n # numpy where behaves as np.asarray(condition).nonzero() when x and y\n # not included\n return ivy.asarray(cond).nonzero()\n elif x1 is not None and x2 is not None:\n x1, x2 = promote_types_of_numpy_inputs(x1, x2)\n return ivy.where(cond, x1, x2)\n else:\n raise ivy.utils.exceptions.IvyException(\"where takes either 1 or 3 arguments\")\n\n\n@to_ivy_arrays_and_back\ndef nonzero(a):\n return ivy.nonzero(a)\n\n\n@handle_numpy_out\n@to_ivy_arrays_and_back\n@from_zero_dim_arrays_to_scalar\ndef argmin(a, /, *, axis=None, keepdims=False, out=None):\n return ivy.argmin(a, axis=axis, out=out, keepdims=keepdims)\n\n\n@handle_numpy_out\n@to_ivy_arrays_and_back\n@from_zero_dim_arrays_to_scalar\ndef argmax(\n a,\n /,\n *,\n axis=None,\n out=None,\n keepdims=False,\n):\n return ivy.argmax(a, axis=axis, out=out, keepdims=keepdims)\n\n\n@to_ivy_arrays_and_back\ndef flatnonzero(a):\n return ivy.nonzero(ivy.reshape(a, (-1,)))\n\n\n@to_ivy_arrays_and_back\ndef searchsorted(a, v, side=\"left\", sorter=None):\n return ivy.searchsorted(a, v, side=side, sorter=sorter)\n\n\n@to_ivy_arrays_and_back\ndef argwhere(a):\n return ivy.argwhere(a)\n\n\n# nanargmin and nanargmax composition helper\ndef _nanargminmax(a, axis=None):\n # check nans\n nans = ivy.isnan(a).astype(ivy.bool)\n # replace nans with inf\n a = ivy.where(nans, ivy.inf, a)\n if nans is not None:\n nans = ivy.all(nans, axis=axis)\n if ivy.any(nans):\n raise ivy.utils.exceptions.IvyError(\"All-NaN slice encountered\")\n return a\n\n\n@handle_numpy_out\n@to_ivy_arrays_and_back\n@from_zero_dim_arrays_to_scalar\ndef nanargmax(a, /, *, axis=None, out=None, keepdims=False):\n a = _nanargminmax(a, axis=axis)\n return ivy.argmax(a, axis=axis, keepdims=keepdims, out=out)\n\n\n@handle_numpy_out\n@to_ivy_arrays_and_back\n@from_zero_dim_arrays_to_scalar\ndef nanargmin(a, /, *, axis=None, out=None, keepdims=False):\n a = _nanargminmax(a, axis=axis)\n return ivy.argmin(a, axis=axis, keepdims=keepdims, out=out)\n", "path": "ivy/functional/frontends/numpy/sorting_searching_counting/searching.py"}], "after_files": [{"content": "# local\n\nimport ivy\n\nfrom ivy.functional.frontends.numpy import promote_types_of_numpy_inputs\n\nfrom ivy.functional.frontends.numpy.func_wrapper import (\n to_ivy_arrays_and_back,\n from_zero_dim_arrays_to_scalar,\n handle_numpy_out,\n)\n\n\n@to_ivy_arrays_and_back\ndef where(cond, x1=None, x2=None, /):\n if x1 is None and x2 is None:\n # numpy where behaves as np.asarray(condition).nonzero() when x and y\n # not included\n return ivy.asarray(cond).nonzero()\n elif x1 is not None and x2 is not None:\n x1, x2 = promote_types_of_numpy_inputs(x1, x2)\n return ivy.where(cond, x1, x2)\n else:\n raise ivy.utils.exceptions.IvyException(\"where takes either 1 or 3 arguments\")\n\n\n@to_ivy_arrays_and_back\ndef nonzero(a):\n return ivy.nonzero(a)\n\n\n@handle_numpy_out\n@to_ivy_arrays_and_back\n@from_zero_dim_arrays_to_scalar\ndef argmin(a, /, *, axis=None, keepdims=False, out=None):\n return ivy.argmin(a, axis=axis, out=out, keepdims=keepdims)\n\n\n@handle_numpy_out\n@to_ivy_arrays_and_back\n@from_zero_dim_arrays_to_scalar\ndef argmax(\n a,\n /,\n *,\n axis=None,\n out=None,\n keepdims=False,\n):\n return ivy.argmax(a, axis=axis, out=out, keepdims=keepdims)\n\n\n@to_ivy_arrays_and_back\ndef flatnonzero(a):\n return ivy.nonzero(ivy.reshape(a, (-1,)))\n\n\n@to_ivy_arrays_and_back\ndef searchsorted(a, v, side=\"left\", sorter=None):\n return ivy.searchsorted(a, v, side=side, sorter=sorter)\n\n\n@to_ivy_arrays_and_back\ndef argwhere(a):\n return ivy.argwhere(a)\n\n\n# nanargmin and nanargmax composition helper\ndef _nanargminmax(a, axis=None):\n # check nans\n nans = ivy.isnan(a).astype(ivy.bool)\n # replace nans with inf\n a = ivy.where(nans, ivy.inf, a)\n if nans is not None:\n nans = ivy.all(nans, axis=axis)\n if ivy.any(nans):\n raise ivy.utils.exceptions.IvyError(\"All-NaN slice encountered\")\n return a\n\n\n@handle_numpy_out\n@to_ivy_arrays_and_back\n@from_zero_dim_arrays_to_scalar\ndef nanargmax(a, /, *, axis=None, out=None, keepdims=False):\n a = _nanargminmax(a, axis=axis)\n return ivy.argmax(a, axis=axis, keepdims=keepdims, out=out)\n\n\n@handle_numpy_out\n@to_ivy_arrays_and_back\n@from_zero_dim_arrays_to_scalar\ndef nanargmin(a, /, *, axis=None, out=None, keepdims=False):\n a = _nanargminmax(a, axis=axis)\n return ivy.argmin(a, axis=axis, keepdims=keepdims, out=out)\n\n\n@to_ivy_arrays_and_back\ndef extract(cond, arr, /):\n if cond.dtype == 'bool':\n return arr[cond]\n else:\n return arr[cond !=0]", "path": "ivy/functional/frontends/numpy/sorting_searching_counting/searching.py"}]} | 1,167 | 204 |
gh_patches_debug_23574 | rasdani/github-patches | git_diff | tensorflow__tensor2tensor-514 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
problem for compute the "real" BLEU
it seems has some small problems.
Traceback (most recent call last):
File "/usr/local/bin/t2t-bleu", line 6, in
exec(compile(open(file).read(), file, 'exec'))
File "/workspace/tf_project/tensor2tensor/tensor2tensor/bin/t2t-bleu", line 137, in
tf.app.run()
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/platform/app.py", line 48, in run
_sys.exit(main(_sys.argv[:1] + flags_passthrough))
File "/workspace/tf_project/tensor2tensor/tensor2tensor/bin/t2t-bleu", line 80, in main
bleu = 100 * bleu_hook.bleu_wrapper(FLAGS.reference, FLAGS.translation, case_sensitive=False)
File "/workspace/tf_project/tensor2tensor/tensor2tensor/utils/bleu_hook.py", line 202, in bleu_wrapper
ref_tokens = [bleu_tokenize(x) for x in ref_lines]
File "/workspace/tf_project/tensor2tensor/tensor2tensor/utils/bleu_hook.py", line 187, in bleu_tokenize
string = UnicodeRegex.nondigit_punct_re.sub(r'\1 \2 ', string)
AttributeError: type object 'UnicodeRegex' has no attribute 'nondigit_punct_re'
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tensor2tensor/utils/bleu_hook.py`
Content:
```
1 # coding=utf-8
2 # Copyright 2017 The Tensor2Tensor Authors.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 """BLEU metric util used during eval for MT."""
17 from __future__ import absolute_import
18 from __future__ import division
19 from __future__ import print_function
20
21 import collections
22 import math
23 import os
24 import re
25 import sys
26 import time
27 import unicodedata
28 from collections import namedtuple
29
30 # Dependency imports
31
32 import numpy as np
33 import six
34 # pylint: disable=redefined-builtin
35 from six.moves import xrange
36 from six.moves import zip
37 # pylint: enable=redefined-builtin
38
39 import tensorflow as tf
40
41
42 def _get_ngrams(segment, max_order):
43 """Extracts all n-grams upto a given maximum order from an input segment.
44
45 Args:
46 segment: text segment from which n-grams will be extracted.
47 max_order: maximum length in tokens of the n-grams returned by this
48 methods.
49
50 Returns:
51 The Counter containing all n-grams upto max_order in segment
52 with a count of how many times each n-gram occurred.
53 """
54 ngram_counts = collections.Counter()
55 for order in xrange(1, max_order + 1):
56 for i in xrange(0, len(segment) - order + 1):
57 ngram = tuple(segment[i:i + order])
58 ngram_counts[ngram] += 1
59 return ngram_counts
60
61
62 def compute_bleu(reference_corpus,
63 translation_corpus,
64 max_order=4,
65 use_bp=True):
66 """Computes BLEU score of translated segments against one or more references.
67
68 Args:
69 reference_corpus: list of references for each translation. Each
70 reference should be tokenized into a list of tokens.
71 translation_corpus: list of translations to score. Each translation
72 should be tokenized into a list of tokens.
73 max_order: Maximum n-gram order to use when computing BLEU score.
74 use_bp: boolean, whether to apply brevity penalty.
75
76 Returns:
77 BLEU score.
78 """
79 reference_length = 0
80 translation_length = 0
81 bp = 1.0
82 geo_mean = 0
83
84 matches_by_order = [0] * max_order
85 possible_matches_by_order = [0] * max_order
86 precisions = []
87
88 for (references, translations) in zip(reference_corpus, translation_corpus):
89 reference_length += len(references)
90 translation_length += len(translations)
91 ref_ngram_counts = _get_ngrams(references, max_order)
92 translation_ngram_counts = _get_ngrams(translations, max_order)
93
94 overlap = dict((ngram,
95 min(count, translation_ngram_counts[ngram]))
96 for ngram, count in ref_ngram_counts.items())
97
98 for ngram in overlap:
99 matches_by_order[len(ngram) - 1] += overlap[ngram]
100 for ngram in translation_ngram_counts:
101 possible_matches_by_order[len(ngram)-1] += translation_ngram_counts[ngram]
102 precisions = [0] * max_order
103 smooth = 1.0
104 for i in xrange(0, max_order):
105 if possible_matches_by_order[i] > 0:
106 precisions[i] = matches_by_order[i] / possible_matches_by_order[i]
107 if matches_by_order[i] > 0:
108 precisions[i] = matches_by_order[i] / possible_matches_by_order[i]
109 else:
110 smooth *= 2
111 precisions[i] = 1.0 / (smooth * possible_matches_by_order[i])
112 else:
113 precisions[i] = 0.0
114
115 if max(precisions) > 0:
116 p_log_sum = sum(math.log(p) for p in precisions if p)
117 geo_mean = math.exp(p_log_sum/max_order)
118
119 if use_bp:
120 ratio = translation_length / reference_length
121 bp = math.exp(1 - 1. / ratio) if ratio < 1.0 else 1.0
122 bleu = geo_mean * bp
123 return np.float32(bleu)
124
125
126 def bleu_score(predictions, labels, **unused_kwargs):
127 """BLEU score computation between labels and predictions.
128
129 An approximate BLEU scoring method since we do not glue word pieces or
130 decode the ids and tokenize the output. By default, we use ngram order of 4
131 and use brevity penalty. Also, this does not have beam search.
132
133 Args:
134 predictions: tensor, model predicitons
135 labels: tensor, gold output.
136
137 Returns:
138 bleu: int, approx bleu score
139 """
140 outputs = tf.to_int32(tf.argmax(predictions, axis=-1))
141 # Convert the outputs and labels to a [batch_size, input_length] tensor.
142 outputs = tf.squeeze(outputs, axis=[-1, -2])
143 labels = tf.squeeze(labels, axis=[-1, -2])
144
145 bleu = tf.py_func(compute_bleu, (labels, outputs), tf.float32)
146 return bleu, tf.constant(1.0)
147
148
149 class UnicodeRegex(object):
150 """Ad-hoc hack to recognize all punctuation and symbols."""
151
152 def __init__(self):
153 def _property_chars(prefix):
154 return ''.join(six.unichr(x) for x in range(sys.maxunicode)
155 if unicodedata.category(six.unichr(x)).startswith(prefix))
156 punctuation = self._property_chars('P')
157 self.nondigit_punct_re = re.compile(r'([^\d])([' + punctuation + r'])')
158 self.punct_nondigit_re = re.compile(r'([' + punctuation + r'])([^\d])')
159 self.symbol_re = re.compile('([' + _property_chars('S') + '])')
160
161
162 def bleu_tokenize(string):
163 r"""Tokenize a string following the official BLEU implementation.
164
165 See https://github.com/moses-smt/mosesdecoder/"
166 "blob/master/scripts/generic/mteval-v14.pl#L954-L983
167 In our case, the input string is expected to be just one line
168 and no HTML entities de-escaping is needed.
169 So we just tokenize on punctuation and symbols,
170 except when a punctuation is preceded and followed by a digit
171 (e.g. a comma/dot as a thousand/decimal separator).
172
173 Note that a numer (e.g. a year) followed by a dot at the end of sentence
174 is NOT tokenized,
175 i.e. the dot stays with the number because `s/(\p{P})(\P{N})/ $1 $2/g`
176 does not match this case (unless we add a space after each sentence).
177 However, this error is already in the original mteval-v14.pl
178 and we want to be consistent with it.
179
180 Args:
181 string: the input string
182
183 Returns:
184 a list of tokens
185 """
186 string = UnicodeRegex.nondigit_punct_re.sub(r'\1 \2 ', string)
187 string = UnicodeRegex.punct_nondigit_re.sub(r' \1 \2', string)
188 string = UnicodeRegex.symbol_re.sub(r' \1 ', string)
189 return string.split()
190
191
192 def bleu_wrapper(ref_filename, hyp_filename, case_sensitive=False):
193 """Compute BLEU for two files (reference and hypothesis translation)."""
194 ref_lines = open(ref_filename).read().splitlines()
195 hyp_lines = open(hyp_filename).read().splitlines()
196 assert len(ref_lines) == len(hyp_lines)
197 if not case_sensitive:
198 ref_lines = [x.lower() for x in ref_lines]
199 hyp_lines = [x.lower() for x in hyp_lines]
200 ref_tokens = [bleu_tokenize(x) for x in ref_lines]
201 hyp_tokens = [bleu_tokenize(x) for x in hyp_lines]
202 return compute_bleu(ref_tokens, hyp_tokens)
203
204
205 StepFile = namedtuple('StepFile', 'filename mtime ctime steps')
206
207
208 def _read_stepfiles_list(path_prefix, path_suffix='.index', min_steps=0):
209 stepfiles = []
210 for filename in tf.gfile.Glob(path_prefix + '*-[0-9]*' + path_suffix):
211 basename = filename[:-len(path_suffix)] if len(path_suffix) else filename
212 try:
213 steps = int(basename.rsplit('-')[-1])
214 except ValueError: # The -[0-9]* part is not an integer.
215 continue
216 if steps < min_steps:
217 continue
218 if not os.path.exists(filename):
219 tf.logging.info(filename + " was deleted, so skipping it")
220 continue
221 stepfiles.append(StepFile(basename, os.path.getmtime(filename),
222 os.path.getctime(filename), steps))
223 return sorted(stepfiles, key=lambda x: -x.steps)
224
225
226 def stepfiles_iterator(path_prefix, wait_minutes=0, min_steps=0,
227 path_suffix='.index', sleep_sec=10):
228 """Continuously yield new files with steps in filename as they appear.
229
230 This is useful for checkpoint files or other files whose names differ just in an interger
231 marking the number of steps and match the wildcard path_prefix + '*-[0-9]*' + path_suffix.
232 Unlike `tf.contrib.training.checkpoints_iterator`, this
233 implementation always starts from the oldest files
234 (and it cannot miss any file). Note that the oldest checkpoint
235 may be deleted anytime by Tensorflow (if set up so). It is up to the user
236 to check that the files returned by this generator actually exist.
237 Args:
238 path_prefix: The directory + possible common filename prefix to the files.
239 path_suffix: Common filename suffix (after steps), including possible extension dot.
240 wait_minutes: The maximum amount of minutes to wait between files.
241 min_steps: Skip files with lower global step.
242 sleep_sec: How often to check for new files.
243 Yields:
244 named tuples (filename, mtime, ctime, steps) of the files as they arrive.
245 """
246 # Wildcard D*-[0-9]* does not match D/x-1, so if D is a directory let path_prefix='D/'.
247 if not path_prefix.endswith(os.sep) and os.path.isdir(path_prefix):
248 path_prefix += os.sep
249 stepfiles = _read_stepfiles_list(path_prefix, path_suffix, min_steps)
250 tf.logging.info("Found %d files with steps: %s"
251 % (len(stepfiles), ", ".join(str(x.steps) for x in reversed(stepfiles))))
252 exit_time = time.time() + wait_minutes * 60
253 while True:
254 if not stepfiles and wait_minutes:
255 tf.logging.info('Waiting till %s if a new file matching %s*-[0-9]*%s appears'
256 % (time.asctime(time.localtime(exit_time)), path_prefix, path_suffix))
257 while True:
258 stepfiles = _read_stepfiles_list(path_prefix, path_suffix, min_steps)
259 if stepfiles or time.time() > exit_time:
260 break
261 time.sleep(sleep_sec)
262 if not stepfiles:
263 return
264
265 stepfile = stepfiles.pop()
266 exit_time, min_steps = stepfile.ctime + wait_minutes * 60, stepfile.steps + 1
267 yield stepfile
268
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/tensor2tensor/utils/bleu_hook.py b/tensor2tensor/utils/bleu_hook.py
--- a/tensor2tensor/utils/bleu_hook.py
+++ b/tensor2tensor/utils/bleu_hook.py
@@ -153,7 +153,7 @@
def _property_chars(prefix):
return ''.join(six.unichr(x) for x in range(sys.maxunicode)
if unicodedata.category(six.unichr(x)).startswith(prefix))
- punctuation = self._property_chars('P')
+ punctuation = _property_chars('P')
self.nondigit_punct_re = re.compile(r'([^\d])([' + punctuation + r'])')
self.punct_nondigit_re = re.compile(r'([' + punctuation + r'])([^\d])')
self.symbol_re = re.compile('([' + _property_chars('S') + '])')
@@ -183,9 +183,10 @@
Returns:
a list of tokens
"""
- string = UnicodeRegex.nondigit_punct_re.sub(r'\1 \2 ', string)
- string = UnicodeRegex.punct_nondigit_re.sub(r' \1 \2', string)
- string = UnicodeRegex.symbol_re.sub(r' \1 ', string)
+ uregex = UnicodeRegex()
+ string = uregex.nondigit_punct_re.sub(r'\1 \2 ', string)
+ string = uregex.punct_nondigit_re.sub(r' \1 \2', string)
+ string = uregex.symbol_re.sub(r' \1 ', string)
return string.split()
| {"golden_diff": "diff --git a/tensor2tensor/utils/bleu_hook.py b/tensor2tensor/utils/bleu_hook.py\n--- a/tensor2tensor/utils/bleu_hook.py\n+++ b/tensor2tensor/utils/bleu_hook.py\n@@ -153,7 +153,7 @@\n def _property_chars(prefix):\n return ''.join(six.unichr(x) for x in range(sys.maxunicode)\n if unicodedata.category(six.unichr(x)).startswith(prefix))\n- punctuation = self._property_chars('P')\n+ punctuation = _property_chars('P')\n self.nondigit_punct_re = re.compile(r'([^\\d])([' + punctuation + r'])')\n self.punct_nondigit_re = re.compile(r'([' + punctuation + r'])([^\\d])')\n self.symbol_re = re.compile('([' + _property_chars('S') + '])')\n@@ -183,9 +183,10 @@\n Returns:\n a list of tokens\n \"\"\"\n- string = UnicodeRegex.nondigit_punct_re.sub(r'\\1 \\2 ', string)\n- string = UnicodeRegex.punct_nondigit_re.sub(r' \\1 \\2', string)\n- string = UnicodeRegex.symbol_re.sub(r' \\1 ', string)\n+ uregex = UnicodeRegex()\n+ string = uregex.nondigit_punct_re.sub(r'\\1 \\2 ', string)\n+ string = uregex.punct_nondigit_re.sub(r' \\1 \\2', string)\n+ string = uregex.symbol_re.sub(r' \\1 ', string)\n return string.split()\n", "issue": "problem for compute the \"real\" BLEU \nit seems has some small problems.\r\n\r\nTraceback (most recent call last):\r\nFile \"/usr/local/bin/t2t-bleu\", line 6, in\r\nexec(compile(open(file).read(), file, 'exec'))\r\nFile \"/workspace/tf_project/tensor2tensor/tensor2tensor/bin/t2t-bleu\", line 137, in\r\ntf.app.run()\r\nFile \"/usr/local/lib/python2.7/dist-packages/tensorflow/python/platform/app.py\", line 48, in run\r\n_sys.exit(main(_sys.argv[:1] + flags_passthrough))\r\nFile \"/workspace/tf_project/tensor2tensor/tensor2tensor/bin/t2t-bleu\", line 80, in main\r\nbleu = 100 * bleu_hook.bleu_wrapper(FLAGS.reference, FLAGS.translation, case_sensitive=False)\r\nFile \"/workspace/tf_project/tensor2tensor/tensor2tensor/utils/bleu_hook.py\", line 202, in bleu_wrapper\r\nref_tokens = [bleu_tokenize(x) for x in ref_lines]\r\nFile \"/workspace/tf_project/tensor2tensor/tensor2tensor/utils/bleu_hook.py\", line 187, in bleu_tokenize\r\nstring = UnicodeRegex.nondigit_punct_re.sub(r'\\1 \\2 ', string)\r\nAttributeError: type object 'UnicodeRegex' has no attribute 'nondigit_punct_re'\n", "before_files": [{"content": "# coding=utf-8\n# Copyright 2017 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"BLEU metric util used during eval for MT.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport math\nimport os\nimport re\nimport sys\nimport time\nimport unicodedata\nfrom collections import namedtuple\n\n# Dependency imports\n\nimport numpy as np\nimport six\n# pylint: disable=redefined-builtin\nfrom six.moves import xrange\nfrom six.moves import zip\n# pylint: enable=redefined-builtin\n\nimport tensorflow as tf\n\n\ndef _get_ngrams(segment, max_order):\n \"\"\"Extracts all n-grams upto a given maximum order from an input segment.\n\n Args:\n segment: text segment from which n-grams will be extracted.\n max_order: maximum length in tokens of the n-grams returned by this\n methods.\n\n Returns:\n The Counter containing all n-grams upto max_order in segment\n with a count of how many times each n-gram occurred.\n \"\"\"\n ngram_counts = collections.Counter()\n for order in xrange(1, max_order + 1):\n for i in xrange(0, len(segment) - order + 1):\n ngram = tuple(segment[i:i + order])\n ngram_counts[ngram] += 1\n return ngram_counts\n\n\ndef compute_bleu(reference_corpus,\n translation_corpus,\n max_order=4,\n use_bp=True):\n \"\"\"Computes BLEU score of translated segments against one or more references.\n\n Args:\n reference_corpus: list of references for each translation. Each\n reference should be tokenized into a list of tokens.\n translation_corpus: list of translations to score. Each translation\n should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n use_bp: boolean, whether to apply brevity penalty.\n\n Returns:\n BLEU score.\n \"\"\"\n reference_length = 0\n translation_length = 0\n bp = 1.0\n geo_mean = 0\n\n matches_by_order = [0] * max_order\n possible_matches_by_order = [0] * max_order\n precisions = []\n\n for (references, translations) in zip(reference_corpus, translation_corpus):\n reference_length += len(references)\n translation_length += len(translations)\n ref_ngram_counts = _get_ngrams(references, max_order)\n translation_ngram_counts = _get_ngrams(translations, max_order)\n\n overlap = dict((ngram,\n min(count, translation_ngram_counts[ngram]))\n for ngram, count in ref_ngram_counts.items())\n\n for ngram in overlap:\n matches_by_order[len(ngram) - 1] += overlap[ngram]\n for ngram in translation_ngram_counts:\n possible_matches_by_order[len(ngram)-1] += translation_ngram_counts[ngram]\n precisions = [0] * max_order\n smooth = 1.0\n for i in xrange(0, max_order):\n if possible_matches_by_order[i] > 0:\n precisions[i] = matches_by_order[i] / possible_matches_by_order[i]\n if matches_by_order[i] > 0:\n precisions[i] = matches_by_order[i] / possible_matches_by_order[i]\n else:\n smooth *= 2\n precisions[i] = 1.0 / (smooth * possible_matches_by_order[i])\n else:\n precisions[i] = 0.0\n\n if max(precisions) > 0:\n p_log_sum = sum(math.log(p) for p in precisions if p)\n geo_mean = math.exp(p_log_sum/max_order)\n\n if use_bp:\n ratio = translation_length / reference_length\n bp = math.exp(1 - 1. / ratio) if ratio < 1.0 else 1.0\n bleu = geo_mean * bp\n return np.float32(bleu)\n\n\ndef bleu_score(predictions, labels, **unused_kwargs):\n \"\"\"BLEU score computation between labels and predictions.\n\n An approximate BLEU scoring method since we do not glue word pieces or\n decode the ids and tokenize the output. By default, we use ngram order of 4\n and use brevity penalty. Also, this does not have beam search.\n\n Args:\n predictions: tensor, model predicitons\n labels: tensor, gold output.\n\n Returns:\n bleu: int, approx bleu score\n \"\"\"\n outputs = tf.to_int32(tf.argmax(predictions, axis=-1))\n # Convert the outputs and labels to a [batch_size, input_length] tensor.\n outputs = tf.squeeze(outputs, axis=[-1, -2])\n labels = tf.squeeze(labels, axis=[-1, -2])\n\n bleu = tf.py_func(compute_bleu, (labels, outputs), tf.float32)\n return bleu, tf.constant(1.0)\n\n\nclass UnicodeRegex(object):\n \"\"\"Ad-hoc hack to recognize all punctuation and symbols.\"\"\"\n\n def __init__(self):\n def _property_chars(prefix):\n return ''.join(six.unichr(x) for x in range(sys.maxunicode)\n if unicodedata.category(six.unichr(x)).startswith(prefix))\n punctuation = self._property_chars('P')\n self.nondigit_punct_re = re.compile(r'([^\\d])([' + punctuation + r'])')\n self.punct_nondigit_re = re.compile(r'([' + punctuation + r'])([^\\d])')\n self.symbol_re = re.compile('([' + _property_chars('S') + '])')\n\n\ndef bleu_tokenize(string):\n r\"\"\"Tokenize a string following the official BLEU implementation.\n\n See https://github.com/moses-smt/mosesdecoder/\"\n \"blob/master/scripts/generic/mteval-v14.pl#L954-L983\n In our case, the input string is expected to be just one line\n and no HTML entities de-escaping is needed.\n So we just tokenize on punctuation and symbols,\n except when a punctuation is preceded and followed by a digit\n (e.g. a comma/dot as a thousand/decimal separator).\n\n Note that a numer (e.g. a year) followed by a dot at the end of sentence\n is NOT tokenized,\n i.e. the dot stays with the number because `s/(\\p{P})(\\P{N})/ $1 $2/g`\n does not match this case (unless we add a space after each sentence).\n However, this error is already in the original mteval-v14.pl\n and we want to be consistent with it.\n\n Args:\n string: the input string\n\n Returns:\n a list of tokens\n \"\"\"\n string = UnicodeRegex.nondigit_punct_re.sub(r'\\1 \\2 ', string)\n string = UnicodeRegex.punct_nondigit_re.sub(r' \\1 \\2', string)\n string = UnicodeRegex.symbol_re.sub(r' \\1 ', string)\n return string.split()\n\n\ndef bleu_wrapper(ref_filename, hyp_filename, case_sensitive=False):\n \"\"\"Compute BLEU for two files (reference and hypothesis translation).\"\"\"\n ref_lines = open(ref_filename).read().splitlines()\n hyp_lines = open(hyp_filename).read().splitlines()\n assert len(ref_lines) == len(hyp_lines)\n if not case_sensitive:\n ref_lines = [x.lower() for x in ref_lines]\n hyp_lines = [x.lower() for x in hyp_lines]\n ref_tokens = [bleu_tokenize(x) for x in ref_lines]\n hyp_tokens = [bleu_tokenize(x) for x in hyp_lines]\n return compute_bleu(ref_tokens, hyp_tokens)\n\n\nStepFile = namedtuple('StepFile', 'filename mtime ctime steps')\n\n\ndef _read_stepfiles_list(path_prefix, path_suffix='.index', min_steps=0):\n stepfiles = []\n for filename in tf.gfile.Glob(path_prefix + '*-[0-9]*' + path_suffix):\n basename = filename[:-len(path_suffix)] if len(path_suffix) else filename\n try:\n steps = int(basename.rsplit('-')[-1])\n except ValueError: # The -[0-9]* part is not an integer.\n continue\n if steps < min_steps:\n continue\n if not os.path.exists(filename):\n tf.logging.info(filename + \" was deleted, so skipping it\")\n continue\n stepfiles.append(StepFile(basename, os.path.getmtime(filename),\n os.path.getctime(filename), steps))\n return sorted(stepfiles, key=lambda x: -x.steps)\n\n\ndef stepfiles_iterator(path_prefix, wait_minutes=0, min_steps=0,\n path_suffix='.index', sleep_sec=10):\n \"\"\"Continuously yield new files with steps in filename as they appear.\n\n This is useful for checkpoint files or other files whose names differ just in an interger\n marking the number of steps and match the wildcard path_prefix + '*-[0-9]*' + path_suffix.\n Unlike `tf.contrib.training.checkpoints_iterator`, this\n implementation always starts from the oldest files\n (and it cannot miss any file). Note that the oldest checkpoint\n may be deleted anytime by Tensorflow (if set up so). It is up to the user\n to check that the files returned by this generator actually exist.\n Args:\n path_prefix: The directory + possible common filename prefix to the files.\n path_suffix: Common filename suffix (after steps), including possible extension dot.\n wait_minutes: The maximum amount of minutes to wait between files.\n min_steps: Skip files with lower global step.\n sleep_sec: How often to check for new files.\n Yields:\n named tuples (filename, mtime, ctime, steps) of the files as they arrive.\n \"\"\"\n # Wildcard D*-[0-9]* does not match D/x-1, so if D is a directory let path_prefix='D/'.\n if not path_prefix.endswith(os.sep) and os.path.isdir(path_prefix):\n path_prefix += os.sep\n stepfiles = _read_stepfiles_list(path_prefix, path_suffix, min_steps)\n tf.logging.info(\"Found %d files with steps: %s\"\n % (len(stepfiles), \", \".join(str(x.steps) for x in reversed(stepfiles))))\n exit_time = time.time() + wait_minutes * 60\n while True:\n if not stepfiles and wait_minutes:\n tf.logging.info('Waiting till %s if a new file matching %s*-[0-9]*%s appears'\n % (time.asctime(time.localtime(exit_time)), path_prefix, path_suffix))\n while True:\n stepfiles = _read_stepfiles_list(path_prefix, path_suffix, min_steps)\n if stepfiles or time.time() > exit_time:\n break\n time.sleep(sleep_sec)\n if not stepfiles:\n return\n\n stepfile = stepfiles.pop()\n exit_time, min_steps = stepfile.ctime + wait_minutes * 60, stepfile.steps + 1\n yield stepfile\n", "path": "tensor2tensor/utils/bleu_hook.py"}], "after_files": [{"content": "# coding=utf-8\n# Copyright 2017 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"BLEU metric util used during eval for MT.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport math\nimport os\nimport re\nimport sys\nimport time\nimport unicodedata\nfrom collections import namedtuple\n\n# Dependency imports\n\nimport numpy as np\nimport six\n# pylint: disable=redefined-builtin\nfrom six.moves import xrange\nfrom six.moves import zip\n# pylint: enable=redefined-builtin\n\nimport tensorflow as tf\n\n\ndef _get_ngrams(segment, max_order):\n \"\"\"Extracts all n-grams upto a given maximum order from an input segment.\n\n Args:\n segment: text segment from which n-grams will be extracted.\n max_order: maximum length in tokens of the n-grams returned by this\n methods.\n\n Returns:\n The Counter containing all n-grams upto max_order in segment\n with a count of how many times each n-gram occurred.\n \"\"\"\n ngram_counts = collections.Counter()\n for order in xrange(1, max_order + 1):\n for i in xrange(0, len(segment) - order + 1):\n ngram = tuple(segment[i:i + order])\n ngram_counts[ngram] += 1\n return ngram_counts\n\n\ndef compute_bleu(reference_corpus,\n translation_corpus,\n max_order=4,\n use_bp=True):\n \"\"\"Computes BLEU score of translated segments against one or more references.\n\n Args:\n reference_corpus: list of references for each translation. Each\n reference should be tokenized into a list of tokens.\n translation_corpus: list of translations to score. Each translation\n should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n use_bp: boolean, whether to apply brevity penalty.\n\n Returns:\n BLEU score.\n \"\"\"\n reference_length = 0\n translation_length = 0\n bp = 1.0\n geo_mean = 0\n\n matches_by_order = [0] * max_order\n possible_matches_by_order = [0] * max_order\n precisions = []\n\n for (references, translations) in zip(reference_corpus, translation_corpus):\n reference_length += len(references)\n translation_length += len(translations)\n ref_ngram_counts = _get_ngrams(references, max_order)\n translation_ngram_counts = _get_ngrams(translations, max_order)\n\n overlap = dict((ngram,\n min(count, translation_ngram_counts[ngram]))\n for ngram, count in ref_ngram_counts.items())\n\n for ngram in overlap:\n matches_by_order[len(ngram) - 1] += overlap[ngram]\n for ngram in translation_ngram_counts:\n possible_matches_by_order[len(ngram)-1] += translation_ngram_counts[ngram]\n precisions = [0] * max_order\n smooth = 1.0\n for i in xrange(0, max_order):\n if possible_matches_by_order[i] > 0:\n precisions[i] = matches_by_order[i] / possible_matches_by_order[i]\n if matches_by_order[i] > 0:\n precisions[i] = matches_by_order[i] / possible_matches_by_order[i]\n else:\n smooth *= 2\n precisions[i] = 1.0 / (smooth * possible_matches_by_order[i])\n else:\n precisions[i] = 0.0\n\n if max(precisions) > 0:\n p_log_sum = sum(math.log(p) for p in precisions if p)\n geo_mean = math.exp(p_log_sum/max_order)\n\n if use_bp:\n ratio = translation_length / reference_length\n bp = math.exp(1 - 1. / ratio) if ratio < 1.0 else 1.0\n bleu = geo_mean * bp\n return np.float32(bleu)\n\n\ndef bleu_score(predictions, labels, **unused_kwargs):\n \"\"\"BLEU score computation between labels and predictions.\n\n An approximate BLEU scoring method since we do not glue word pieces or\n decode the ids and tokenize the output. By default, we use ngram order of 4\n and use brevity penalty. Also, this does not have beam search.\n\n Args:\n predictions: tensor, model predicitons\n labels: tensor, gold output.\n\n Returns:\n bleu: int, approx bleu score\n \"\"\"\n outputs = tf.to_int32(tf.argmax(predictions, axis=-1))\n # Convert the outputs and labels to a [batch_size, input_length] tensor.\n outputs = tf.squeeze(outputs, axis=[-1, -2])\n labels = tf.squeeze(labels, axis=[-1, -2])\n\n bleu = tf.py_func(compute_bleu, (labels, outputs), tf.float32)\n return bleu, tf.constant(1.0)\n\n\nclass UnicodeRegex(object):\n \"\"\"Ad-hoc hack to recognize all punctuation and symbols.\"\"\"\n\n def __init__(self):\n def _property_chars(prefix):\n return ''.join(six.unichr(x) for x in range(sys.maxunicode)\n if unicodedata.category(six.unichr(x)).startswith(prefix))\n punctuation = _property_chars('P')\n self.nondigit_punct_re = re.compile(r'([^\\d])([' + punctuation + r'])')\n self.punct_nondigit_re = re.compile(r'([' + punctuation + r'])([^\\d])')\n self.symbol_re = re.compile('([' + _property_chars('S') + '])')\n\n\ndef bleu_tokenize(string):\n r\"\"\"Tokenize a string following the official BLEU implementation.\n\n See https://github.com/moses-smt/mosesdecoder/\"\n \"blob/master/scripts/generic/mteval-v14.pl#L954-L983\n In our case, the input string is expected to be just one line\n and no HTML entities de-escaping is needed.\n So we just tokenize on punctuation and symbols,\n except when a punctuation is preceded and followed by a digit\n (e.g. a comma/dot as a thousand/decimal separator).\n\n Note that a numer (e.g. a year) followed by a dot at the end of sentence\n is NOT tokenized,\n i.e. the dot stays with the number because `s/(\\p{P})(\\P{N})/ $1 $2/g`\n does not match this case (unless we add a space after each sentence).\n However, this error is already in the original mteval-v14.pl\n and we want to be consistent with it.\n\n Args:\n string: the input string\n\n Returns:\n a list of tokens\n \"\"\"\n uregex = UnicodeRegex()\n string = uregex.nondigit_punct_re.sub(r'\\1 \\2 ', string)\n string = uregex.punct_nondigit_re.sub(r' \\1 \\2', string)\n string = uregex.symbol_re.sub(r' \\1 ', string)\n return string.split()\n\n\ndef bleu_wrapper(ref_filename, hyp_filename, case_sensitive=False):\n \"\"\"Compute BLEU for two files (reference and hypothesis translation).\"\"\"\n ref_lines = open(ref_filename).read().splitlines()\n hyp_lines = open(hyp_filename).read().splitlines()\n assert len(ref_lines) == len(hyp_lines)\n if not case_sensitive:\n ref_lines = [x.lower() for x in ref_lines]\n hyp_lines = [x.lower() for x in hyp_lines]\n ref_tokens = [bleu_tokenize(x) for x in ref_lines]\n hyp_tokens = [bleu_tokenize(x) for x in hyp_lines]\n return compute_bleu(ref_tokens, hyp_tokens)\n\n\nStepFile = namedtuple('StepFile', 'filename mtime ctime steps')\n\n\ndef _read_stepfiles_list(path_prefix, path_suffix='.index', min_steps=0):\n stepfiles = []\n for filename in tf.gfile.Glob(path_prefix + '*-[0-9]*' + path_suffix):\n basename = filename[:-len(path_suffix)] if len(path_suffix) else filename\n try:\n steps = int(basename.rsplit('-')[-1])\n except ValueError: # The -[0-9]* part is not an integer.\n continue\n if steps < min_steps:\n continue\n if not os.path.exists(filename):\n tf.logging.info(filename + \" was deleted, so skipping it\")\n continue\n stepfiles.append(StepFile(basename, os.path.getmtime(filename),\n os.path.getctime(filename), steps))\n return sorted(stepfiles, key=lambda x: -x.steps)\n\n\ndef stepfiles_iterator(path_prefix, wait_minutes=0, min_steps=0,\n path_suffix='.index', sleep_sec=10):\n \"\"\"Continuously yield new files with steps in filename as they appear.\n\n This is useful for checkpoint files or other files whose names differ just in an interger\n marking the number of steps and match the wildcard path_prefix + '*-[0-9]*' + path_suffix.\n Unlike `tf.contrib.training.checkpoints_iterator`, this\n implementation always starts from the oldest files\n (and it cannot miss any file). Note that the oldest checkpoint\n may be deleted anytime by Tensorflow (if set up so). It is up to the user\n to check that the files returned by this generator actually exist.\n Args:\n path_prefix: The directory + possible common filename prefix to the files.\n path_suffix: Common filename suffix (after steps), including possible extension dot.\n wait_minutes: The maximum amount of minutes to wait between files.\n min_steps: Skip files with lower global step.\n sleep_sec: How often to check for new files.\n Yields:\n named tuples (filename, mtime, ctime, steps) of the files as they arrive.\n \"\"\"\n # Wildcard D*-[0-9]* does not match D/x-1, so if D is a directory let path_prefix='D/'.\n if not path_prefix.endswith(os.sep) and os.path.isdir(path_prefix):\n path_prefix += os.sep\n stepfiles = _read_stepfiles_list(path_prefix, path_suffix, min_steps)\n tf.logging.info(\"Found %d files with steps: %s\"\n % (len(stepfiles), \", \".join(str(x.steps) for x in reversed(stepfiles))))\n exit_time = time.time() + wait_minutes * 60\n while True:\n if not stepfiles and wait_minutes:\n tf.logging.info('Waiting till %s if a new file matching %s*-[0-9]*%s appears'\n % (time.asctime(time.localtime(exit_time)), path_prefix, path_suffix))\n while True:\n stepfiles = _read_stepfiles_list(path_prefix, path_suffix, min_steps)\n if stepfiles or time.time() > exit_time:\n break\n time.sleep(sleep_sec)\n if not stepfiles:\n return\n\n stepfile = stepfiles.pop()\n exit_time, min_steps = stepfile.ctime + wait_minutes * 60, stepfile.steps + 1\n yield stepfile\n", "path": "tensor2tensor/utils/bleu_hook.py"}]} | 3,822 | 359 |
gh_patches_debug_35590 | rasdani/github-patches | git_diff | biolab__orange3-text-165 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
GeoMap counting doesn't eliminate duplicates inside documents
With `Locations` attribute for NY Times sometimes you get a result: `Ljubljana (Slovenia), Slovenia, Europe (Slovenia),` which would count Slovenia 3 times instead of once. For a specific county a given document should not increment the count for more than one.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `orangecontrib/text/widgets/owgeomap.py`
Content:
```
1 # coding: utf-8
2 import os
3 import re
4 from collections import defaultdict, Counter
5 from itertools import chain
6 from urllib.parse import urljoin
7 from urllib.request import pathname2url
8
9 import numpy as np
10 from AnyQt.QtCore import Qt, QTimer, pyqtSlot, QUrl
11 from AnyQt.QtWidgets import QApplication, QSizePolicy
12
13 from Orange.data import Table
14 from Orange.widgets import widget, gui, settings
15 from Orange.widgets.utils.itemmodels import VariableListModel
16 from orangecontrib.text.corpus import Corpus
17 from orangecontrib.text.country_codes import \
18 CC_EUROPE, INV_CC_EUROPE, SET_CC_EUROPE, \
19 CC_WORLD, INV_CC_WORLD, \
20 CC_USA, INV_CC_USA, SET_CC_USA
21
22 CC_NAMES = re.compile('[\w\s\.\-]+')
23
24
25 class Map:
26 WORLD = 'world_mill_en'
27 EUROPE = 'europe_mill_en'
28 USA = 'us_aea_en'
29 all = (('World', WORLD),
30 ('Europe', EUROPE),
31 ('USA', USA))
32
33
34 class OWGeoMap(widget.OWWidget):
35 name = "GeoMap"
36 priority = 20000
37 icon = "icons/GeoMap.svg"
38 inputs = [("Data", Table, "on_data")]
39 outputs = [('Corpus', Corpus)]
40
41 want_main_area = False
42
43 selected_attr = settings.Setting('')
44 selected_map = settings.Setting(0)
45 regions = settings.Setting([])
46
47 def __init__(self):
48 super().__init__()
49 self.data = None
50 self._create_layout()
51
52 @pyqtSlot(str)
53 def region_selected(self, regions):
54 """Called from JavaScript"""
55 if not regions:
56 self.regions = []
57 if not regions or self.data is None:
58 return self.send('Corpus', None)
59 self.regions = regions.split(',')
60 attr = self.data.domain[self.selected_attr]
61 if attr.is_discrete: return # TODO, FIXME: make this work for discrete attrs also
62 from Orange.data.filter import FilterRegex
63 filter = FilterRegex(attr, r'\b{}\b'.format(r'\b|\b'.join(self.regions)), re.IGNORECASE)
64 self.send('Corpus', self.data._filter_values(filter))
65
66 def _create_layout(self):
67 box = gui.widgetBox(self.controlArea,
68 orientation='horizontal')
69 self.varmodel = VariableListModel(parent=self)
70 self.attr_combo = gui.comboBox(box, self, 'selected_attr',
71 orientation=Qt.Horizontal,
72 label='Region attribute:',
73 callback=self.on_attr_change,
74 sendSelectedValue=True)
75 self.attr_combo.setModel(self.varmodel)
76 self.map_combo = gui.comboBox(box, self, 'selected_map',
77 orientation=Qt.Horizontal,
78 label='Map type:',
79 callback=self.on_map_change,
80 items=Map.all)
81 hexpand = QSizePolicy(QSizePolicy.Expanding,
82 QSizePolicy.Fixed)
83 self.attr_combo.setSizePolicy(hexpand)
84 self.map_combo.setSizePolicy(hexpand)
85
86 url = urljoin('file:',
87 pathname2url(os.path.join(
88 os.path.dirname(__file__),
89 'resources',
90 'owgeomap.html')))
91 self.webview = gui.WebviewWidget(self.controlArea, self, url=QUrl(url))
92 self.controlArea.layout().addWidget(self.webview)
93
94 QTimer.singleShot(
95 0, lambda: self.webview.evalJS('REGIONS = {};'.format({Map.WORLD: CC_WORLD,
96 Map.EUROPE: CC_EUROPE,
97 Map.USA: CC_USA})))
98
99 def _repopulate_attr_combo(self, data):
100 vars = [a for a in chain(data.domain.metas,
101 data.domain.attributes,
102 data.domain.class_vars)
103 if a.is_string] if data else []
104 self.varmodel.wrap(vars)
105 # Select default attribute
106 self.selected_attr = next((var.name
107 for var in vars
108 if var.name.lower().startswith(('country', 'location', 'region'))),
109 vars[0].name if vars else '')
110
111 def on_data(self, data):
112 if data and not isinstance(data, Corpus):
113 data = Corpus.from_table(data.domain, data)
114 self.data = data
115 self._repopulate_attr_combo(data)
116 if not data:
117 self.region_selected('')
118 QTimer.singleShot(0, lambda: self.webview.evalJS('DATA = {}; renderMap();'))
119 else:
120 QTimer.singleShot(0, self.on_attr_change)
121
122 def on_map_change(self, map_code=''):
123 if map_code:
124 self.map_combo.setCurrentIndex(self.map_combo.findData(map_code))
125 else:
126 map_code = self.map_combo.itemData(self.selected_map)
127
128 inv_cc_map, cc_map = {Map.USA: (INV_CC_USA, CC_USA),
129 Map.WORLD: (INV_CC_WORLD, CC_WORLD),
130 Map.EUROPE: (INV_CC_EUROPE, CC_EUROPE)} [map_code]
131 # Set country counts in JS
132 data = defaultdict(int)
133 for cc in getattr(self, 'cc_counts', ()):
134 key = inv_cc_map.get(cc, cc)
135 if key in cc_map:
136 data[key] += self.cc_counts[cc]
137 # Draw the new map
138 self.webview.evalJS('DATA = {};'
139 'MAP_CODE = "{}";'
140 'SELECTED_REGIONS = {};'
141 'renderMap();'.format(dict(data),
142 map_code,
143 self.regions))
144
145 def on_attr_change(self):
146 if not self.selected_attr:
147 return
148 attr = self.data.domain[self.selected_attr]
149 self.cc_counts = Counter(chain.from_iterable(
150 set(name.strip() for name in CC_NAMES.findall(i.lower())) if len(i) > 3 else (i,)
151 for i in self.data.get_column_view(self.data.domain.index(attr))[0]))
152 # Auto-select region map
153 values = set(self.cc_counts)
154 if 0 == len(values - SET_CC_USA):
155 map_code = Map.USA
156 elif 0 == len(values - SET_CC_EUROPE):
157 map_code = Map.EUROPE
158 else:
159 map_code = Map.WORLD
160 self.on_map_change(map_code)
161
162
163 def main():
164 from Orange.data import Table, Domain, StringVariable
165
166 words = np.column_stack([
167 'Slovenia Slovenia SVN USA Iraq Iraq Iraq Iraq France FR'.split(),
168 'Slovenia Slovenia SVN France FR Austria NL GB GB GB'.split(),
169 'Alabama AL Texas TX TX TX MS Montana US-MT MT'.split(),
170 ])
171 metas = [
172 StringVariable('World'),
173 StringVariable('Europe'),
174 StringVariable('USA'),
175 ]
176 domain = Domain([], metas=metas)
177 table = Table.from_numpy(domain,
178 X=np.zeros((len(words), 0)),
179 metas=words)
180 app = QApplication([''])
181 w = OWGeoMap()
182 w.on_data(table)
183 w.show()
184 app.exec()
185
186
187 if __name__ == "__main__":
188 main()
189
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/orangecontrib/text/widgets/owgeomap.py b/orangecontrib/text/widgets/owgeomap.py
--- a/orangecontrib/text/widgets/owgeomap.py
+++ b/orangecontrib/text/widgets/owgeomap.py
@@ -127,13 +127,14 @@
inv_cc_map, cc_map = {Map.USA: (INV_CC_USA, CC_USA),
Map.WORLD: (INV_CC_WORLD, CC_WORLD),
- Map.EUROPE: (INV_CC_EUROPE, CC_EUROPE)} [map_code]
- # Set country counts in JS
+ Map.EUROPE: (INV_CC_EUROPE, CC_EUROPE)}[map_code]
+ # Set country counts for JS
data = defaultdict(int)
- for cc in getattr(self, 'cc_counts', ()):
- key = inv_cc_map.get(cc, cc)
- if key in cc_map:
- data[key] += self.cc_counts[cc]
+ for locations in self._iter_locations():
+ keys = set(inv_cc_map.get(loc, loc) for loc in locations)
+ for key in keys:
+ if key in cc_map:
+ data[key] += 1
# Draw the new map
self.webview.evalJS('DATA = {};'
'MAP_CODE = "{}";'
@@ -145,12 +146,8 @@
def on_attr_change(self):
if not self.selected_attr:
return
- attr = self.data.domain[self.selected_attr]
- self.cc_counts = Counter(chain.from_iterable(
- set(name.strip() for name in CC_NAMES.findall(i.lower())) if len(i) > 3 else (i,)
- for i in self.data.get_column_view(self.data.domain.index(attr))[0]))
+ values = set(chain.from_iterable(self._iter_locations()))
# Auto-select region map
- values = set(self.cc_counts)
if 0 == len(values - SET_CC_USA):
map_code = Map.USA
elif 0 == len(values - SET_CC_EUROPE):
@@ -159,6 +156,16 @@
map_code = Map.WORLD
self.on_map_change(map_code)
+ def _iter_locations(self):
+ """ Iterator that yields an iterable per documents with all its's
+ locations. """
+ attr = self.data.domain[self.selected_attr]
+ for i in self.data.get_column_view(self.data.domain.index(attr))[0]:
+ if len(i) > 3:
+ yield map(lambda x: x.strip(), CC_NAMES.findall(i.lower()))
+ else:
+ yield (i, )
+
def main():
from Orange.data import Table, Domain, StringVariable
| {"golden_diff": "diff --git a/orangecontrib/text/widgets/owgeomap.py b/orangecontrib/text/widgets/owgeomap.py\n--- a/orangecontrib/text/widgets/owgeomap.py\n+++ b/orangecontrib/text/widgets/owgeomap.py\n@@ -127,13 +127,14 @@\n \n inv_cc_map, cc_map = {Map.USA: (INV_CC_USA, CC_USA),\n Map.WORLD: (INV_CC_WORLD, CC_WORLD),\n- Map.EUROPE: (INV_CC_EUROPE, CC_EUROPE)} [map_code]\n- # Set country counts in JS\n+ Map.EUROPE: (INV_CC_EUROPE, CC_EUROPE)}[map_code]\n+ # Set country counts for JS\n data = defaultdict(int)\n- for cc in getattr(self, 'cc_counts', ()):\n- key = inv_cc_map.get(cc, cc)\n- if key in cc_map:\n- data[key] += self.cc_counts[cc]\n+ for locations in self._iter_locations():\n+ keys = set(inv_cc_map.get(loc, loc) for loc in locations)\n+ for key in keys:\n+ if key in cc_map:\n+ data[key] += 1\n # Draw the new map\n self.webview.evalJS('DATA = {};'\n 'MAP_CODE = \"{}\";'\n@@ -145,12 +146,8 @@\n def on_attr_change(self):\n if not self.selected_attr:\n return\n- attr = self.data.domain[self.selected_attr]\n- self.cc_counts = Counter(chain.from_iterable(\n- set(name.strip() for name in CC_NAMES.findall(i.lower())) if len(i) > 3 else (i,)\n- for i in self.data.get_column_view(self.data.domain.index(attr))[0]))\n+ values = set(chain.from_iterable(self._iter_locations()))\n # Auto-select region map\n- values = set(self.cc_counts)\n if 0 == len(values - SET_CC_USA):\n map_code = Map.USA\n elif 0 == len(values - SET_CC_EUROPE):\n@@ -159,6 +156,16 @@\n map_code = Map.WORLD\n self.on_map_change(map_code)\n \n+ def _iter_locations(self):\n+ \"\"\" Iterator that yields an iterable per documents with all its's\n+ locations. \"\"\"\n+ attr = self.data.domain[self.selected_attr]\n+ for i in self.data.get_column_view(self.data.domain.index(attr))[0]:\n+ if len(i) > 3:\n+ yield map(lambda x: x.strip(), CC_NAMES.findall(i.lower()))\n+ else:\n+ yield (i, )\n+\n \n def main():\n from Orange.data import Table, Domain, StringVariable\n", "issue": "GeoMap counting doesn't eliminate duplicates inside documents\nWith `Locations` attribute for NY Times sometimes you get a result: `Ljubljana (Slovenia), Slovenia, Europe (Slovenia),` which would count Slovenia 3 times instead of once. For a specific county a given document should not increment the count for more than one.\n", "before_files": [{"content": "# coding: utf-8\nimport os\nimport re\nfrom collections import defaultdict, Counter\nfrom itertools import chain\nfrom urllib.parse import urljoin\nfrom urllib.request import pathname2url\n\nimport numpy as np\nfrom AnyQt.QtCore import Qt, QTimer, pyqtSlot, QUrl\nfrom AnyQt.QtWidgets import QApplication, QSizePolicy\n\nfrom Orange.data import Table\nfrom Orange.widgets import widget, gui, settings\nfrom Orange.widgets.utils.itemmodels import VariableListModel\nfrom orangecontrib.text.corpus import Corpus\nfrom orangecontrib.text.country_codes import \\\n CC_EUROPE, INV_CC_EUROPE, SET_CC_EUROPE, \\\n CC_WORLD, INV_CC_WORLD, \\\n CC_USA, INV_CC_USA, SET_CC_USA\n\nCC_NAMES = re.compile('[\\w\\s\\.\\-]+')\n\n\nclass Map:\n WORLD = 'world_mill_en'\n EUROPE = 'europe_mill_en'\n USA = 'us_aea_en'\n all = (('World', WORLD),\n ('Europe', EUROPE),\n ('USA', USA))\n\n\nclass OWGeoMap(widget.OWWidget):\n name = \"GeoMap\"\n priority = 20000\n icon = \"icons/GeoMap.svg\"\n inputs = [(\"Data\", Table, \"on_data\")]\n outputs = [('Corpus', Corpus)]\n\n want_main_area = False\n\n selected_attr = settings.Setting('')\n selected_map = settings.Setting(0)\n regions = settings.Setting([])\n\n def __init__(self):\n super().__init__()\n self.data = None\n self._create_layout()\n\n @pyqtSlot(str)\n def region_selected(self, regions):\n \"\"\"Called from JavaScript\"\"\"\n if not regions:\n self.regions = []\n if not regions or self.data is None:\n return self.send('Corpus', None)\n self.regions = regions.split(',')\n attr = self.data.domain[self.selected_attr]\n if attr.is_discrete: return # TODO, FIXME: make this work for discrete attrs also\n from Orange.data.filter import FilterRegex\n filter = FilterRegex(attr, r'\\b{}\\b'.format(r'\\b|\\b'.join(self.regions)), re.IGNORECASE)\n self.send('Corpus', self.data._filter_values(filter))\n\n def _create_layout(self):\n box = gui.widgetBox(self.controlArea,\n orientation='horizontal')\n self.varmodel = VariableListModel(parent=self)\n self.attr_combo = gui.comboBox(box, self, 'selected_attr',\n orientation=Qt.Horizontal,\n label='Region attribute:',\n callback=self.on_attr_change,\n sendSelectedValue=True)\n self.attr_combo.setModel(self.varmodel)\n self.map_combo = gui.comboBox(box, self, 'selected_map',\n orientation=Qt.Horizontal,\n label='Map type:',\n callback=self.on_map_change,\n items=Map.all)\n hexpand = QSizePolicy(QSizePolicy.Expanding,\n QSizePolicy.Fixed)\n self.attr_combo.setSizePolicy(hexpand)\n self.map_combo.setSizePolicy(hexpand)\n\n url = urljoin('file:',\n pathname2url(os.path.join(\n os.path.dirname(__file__),\n 'resources',\n 'owgeomap.html')))\n self.webview = gui.WebviewWidget(self.controlArea, self, url=QUrl(url))\n self.controlArea.layout().addWidget(self.webview)\n\n QTimer.singleShot(\n 0, lambda: self.webview.evalJS('REGIONS = {};'.format({Map.WORLD: CC_WORLD,\n Map.EUROPE: CC_EUROPE,\n Map.USA: CC_USA})))\n\n def _repopulate_attr_combo(self, data):\n vars = [a for a in chain(data.domain.metas,\n data.domain.attributes,\n data.domain.class_vars)\n if a.is_string] if data else []\n self.varmodel.wrap(vars)\n # Select default attribute\n self.selected_attr = next((var.name\n for var in vars\n if var.name.lower().startswith(('country', 'location', 'region'))),\n vars[0].name if vars else '')\n\n def on_data(self, data):\n if data and not isinstance(data, Corpus):\n data = Corpus.from_table(data.domain, data)\n self.data = data\n self._repopulate_attr_combo(data)\n if not data:\n self.region_selected('')\n QTimer.singleShot(0, lambda: self.webview.evalJS('DATA = {}; renderMap();'))\n else:\n QTimer.singleShot(0, self.on_attr_change)\n\n def on_map_change(self, map_code=''):\n if map_code:\n self.map_combo.setCurrentIndex(self.map_combo.findData(map_code))\n else:\n map_code = self.map_combo.itemData(self.selected_map)\n\n inv_cc_map, cc_map = {Map.USA: (INV_CC_USA, CC_USA),\n Map.WORLD: (INV_CC_WORLD, CC_WORLD),\n Map.EUROPE: (INV_CC_EUROPE, CC_EUROPE)} [map_code]\n # Set country counts in JS\n data = defaultdict(int)\n for cc in getattr(self, 'cc_counts', ()):\n key = inv_cc_map.get(cc, cc)\n if key in cc_map:\n data[key] += self.cc_counts[cc]\n # Draw the new map\n self.webview.evalJS('DATA = {};'\n 'MAP_CODE = \"{}\";'\n 'SELECTED_REGIONS = {};'\n 'renderMap();'.format(dict(data),\n map_code,\n self.regions))\n\n def on_attr_change(self):\n if not self.selected_attr:\n return\n attr = self.data.domain[self.selected_attr]\n self.cc_counts = Counter(chain.from_iterable(\n set(name.strip() for name in CC_NAMES.findall(i.lower())) if len(i) > 3 else (i,)\n for i in self.data.get_column_view(self.data.domain.index(attr))[0]))\n # Auto-select region map\n values = set(self.cc_counts)\n if 0 == len(values - SET_CC_USA):\n map_code = Map.USA\n elif 0 == len(values - SET_CC_EUROPE):\n map_code = Map.EUROPE\n else:\n map_code = Map.WORLD\n self.on_map_change(map_code)\n\n\ndef main():\n from Orange.data import Table, Domain, StringVariable\n\n words = np.column_stack([\n 'Slovenia Slovenia SVN USA Iraq Iraq Iraq Iraq France FR'.split(),\n 'Slovenia Slovenia SVN France FR Austria NL GB GB GB'.split(),\n 'Alabama AL Texas TX TX TX MS Montana US-MT MT'.split(),\n ])\n metas = [\n StringVariable('World'),\n StringVariable('Europe'),\n StringVariable('USA'),\n ]\n domain = Domain([], metas=metas)\n table = Table.from_numpy(domain,\n X=np.zeros((len(words), 0)),\n metas=words)\n app = QApplication([''])\n w = OWGeoMap()\n w.on_data(table)\n w.show()\n app.exec()\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "orangecontrib/text/widgets/owgeomap.py"}], "after_files": [{"content": "# coding: utf-8\nimport os\nimport re\nfrom collections import defaultdict, Counter\nfrom itertools import chain\nfrom urllib.parse import urljoin\nfrom urllib.request import pathname2url\n\nimport numpy as np\nfrom AnyQt.QtCore import Qt, QTimer, pyqtSlot, QUrl\nfrom AnyQt.QtWidgets import QApplication, QSizePolicy\n\nfrom Orange.data import Table\nfrom Orange.widgets import widget, gui, settings\nfrom Orange.widgets.utils.itemmodels import VariableListModel\nfrom orangecontrib.text.corpus import Corpus\nfrom orangecontrib.text.country_codes import \\\n CC_EUROPE, INV_CC_EUROPE, SET_CC_EUROPE, \\\n CC_WORLD, INV_CC_WORLD, \\\n CC_USA, INV_CC_USA, SET_CC_USA\n\nCC_NAMES = re.compile('[\\w\\s\\.\\-]+')\n\n\nclass Map:\n WORLD = 'world_mill_en'\n EUROPE = 'europe_mill_en'\n USA = 'us_aea_en'\n all = (('World', WORLD),\n ('Europe', EUROPE),\n ('USA', USA))\n\n\nclass OWGeoMap(widget.OWWidget):\n name = \"GeoMap\"\n priority = 20000\n icon = \"icons/GeoMap.svg\"\n inputs = [(\"Data\", Table, \"on_data\")]\n outputs = [('Corpus', Corpus)]\n\n want_main_area = False\n\n selected_attr = settings.Setting('')\n selected_map = settings.Setting(0)\n regions = settings.Setting([])\n\n def __init__(self):\n super().__init__()\n self.data = None\n self._create_layout()\n\n @pyqtSlot(str)\n def region_selected(self, regions):\n \"\"\"Called from JavaScript\"\"\"\n if not regions:\n self.regions = []\n if not regions or self.data is None:\n return self.send('Corpus', None)\n self.regions = regions.split(',')\n attr = self.data.domain[self.selected_attr]\n if attr.is_discrete: return # TODO, FIXME: make this work for discrete attrs also\n from Orange.data.filter import FilterRegex\n filter = FilterRegex(attr, r'\\b{}\\b'.format(r'\\b|\\b'.join(self.regions)), re.IGNORECASE)\n self.send('Corpus', self.data._filter_values(filter))\n\n def _create_layout(self):\n box = gui.widgetBox(self.controlArea,\n orientation='horizontal')\n self.varmodel = VariableListModel(parent=self)\n self.attr_combo = gui.comboBox(box, self, 'selected_attr',\n orientation=Qt.Horizontal,\n label='Region attribute:',\n callback=self.on_attr_change,\n sendSelectedValue=True)\n self.attr_combo.setModel(self.varmodel)\n self.map_combo = gui.comboBox(box, self, 'selected_map',\n orientation=Qt.Horizontal,\n label='Map type:',\n callback=self.on_map_change,\n items=Map.all)\n hexpand = QSizePolicy(QSizePolicy.Expanding,\n QSizePolicy.Fixed)\n self.attr_combo.setSizePolicy(hexpand)\n self.map_combo.setSizePolicy(hexpand)\n\n url = urljoin('file:',\n pathname2url(os.path.join(\n os.path.dirname(__file__),\n 'resources',\n 'owgeomap.html')))\n self.webview = gui.WebviewWidget(self.controlArea, self, url=QUrl(url))\n self.controlArea.layout().addWidget(self.webview)\n\n QTimer.singleShot(\n 0, lambda: self.webview.evalJS('REGIONS = {};'.format({Map.WORLD: CC_WORLD,\n Map.EUROPE: CC_EUROPE,\n Map.USA: CC_USA})))\n\n def _repopulate_attr_combo(self, data):\n vars = [a for a in chain(data.domain.metas,\n data.domain.attributes,\n data.domain.class_vars)\n if a.is_string] if data else []\n self.varmodel.wrap(vars)\n # Select default attribute\n self.selected_attr = next((var.name\n for var in vars\n if var.name.lower().startswith(('country', 'location', 'region'))),\n vars[0].name if vars else '')\n\n def on_data(self, data):\n if data and not isinstance(data, Corpus):\n data = Corpus.from_table(data.domain, data)\n self.data = data\n self._repopulate_attr_combo(data)\n if not data:\n self.region_selected('')\n QTimer.singleShot(0, lambda: self.webview.evalJS('DATA = {}; renderMap();'))\n else:\n QTimer.singleShot(0, self.on_attr_change)\n\n def on_map_change(self, map_code=''):\n if map_code:\n self.map_combo.setCurrentIndex(self.map_combo.findData(map_code))\n else:\n map_code = self.map_combo.itemData(self.selected_map)\n\n inv_cc_map, cc_map = {Map.USA: (INV_CC_USA, CC_USA),\n Map.WORLD: (INV_CC_WORLD, CC_WORLD),\n Map.EUROPE: (INV_CC_EUROPE, CC_EUROPE)}[map_code]\n # Set country counts for JS\n data = defaultdict(int)\n for locations in self._iter_locations():\n keys = set(inv_cc_map.get(loc, loc) for loc in locations)\n for key in keys:\n if key in cc_map:\n data[key] += 1\n # Draw the new map\n self.webview.evalJS('DATA = {};'\n 'MAP_CODE = \"{}\";'\n 'SELECTED_REGIONS = {};'\n 'renderMap();'.format(dict(data),\n map_code,\n self.regions))\n\n def on_attr_change(self):\n if not self.selected_attr:\n return\n values = set(chain.from_iterable(self._iter_locations()))\n # Auto-select region map\n if 0 == len(values - SET_CC_USA):\n map_code = Map.USA\n elif 0 == len(values - SET_CC_EUROPE):\n map_code = Map.EUROPE\n else:\n map_code = Map.WORLD\n self.on_map_change(map_code)\n\n def _iter_locations(self):\n \"\"\" Iterator that yields an iterable per documents with all its's\n locations. \"\"\"\n attr = self.data.domain[self.selected_attr]\n for i in self.data.get_column_view(self.data.domain.index(attr))[0]:\n if len(i) > 3:\n yield map(lambda x: x.strip(), CC_NAMES.findall(i.lower()))\n else:\n yield (i, )\n\n\ndef main():\n from Orange.data import Table, Domain, StringVariable\n\n words = np.column_stack([\n 'Slovenia Slovenia SVN USA Iraq Iraq Iraq Iraq France FR'.split(),\n 'Slovenia Slovenia SVN France FR Austria NL GB GB GB'.split(),\n 'Alabama AL Texas TX TX TX MS Montana US-MT MT'.split(),\n ])\n metas = [\n StringVariable('World'),\n StringVariable('Europe'),\n StringVariable('USA'),\n ]\n domain = Domain([], metas=metas)\n table = Table.from_numpy(domain,\n X=np.zeros((len(words), 0)),\n metas=words)\n app = QApplication([''])\n w = OWGeoMap()\n w.on_data(table)\n w.show()\n app.exec()\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "orangecontrib/text/widgets/owgeomap.py"}]} | 2,329 | 613 |
gh_patches_debug_31606 | rasdani/github-patches | git_diff | fossasia__open-event-server-3128 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Do not show deleted orders in organiser ui and do not auto delete expired orders
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `app/helpers/scheduled_jobs.py`
Content:
```
1 from datetime import datetime, timedelta
2
3 from dateutil.relativedelta import relativedelta
4 from flask import url_for
5 from sqlalchemy_continuum import transaction_class
6
7 from app.helpers.data import DataManager, delete_from_db, save_to_db
8 from app.helpers.data_getter import DataGetter
9 from app.helpers.helpers import send_after_event, monthdelta, send_followup_email_for_monthly_fee_payment
10 from app.helpers.helpers import send_email_for_expired_orders, send_email_for_monthly_fee_payment
11 from app.helpers.payment import get_fee
12 from app.helpers.ticketing import TicketingManager
13 from app.models.event import Event
14 from app.models.event_invoice import EventInvoice
15 from app.models.order import Order
16 from app.models.session import Session
17 from app.models.user import User
18
19
20 def empty_trash():
21 from app import current_app as app
22 with app.app_context():
23 events = Event.query.filter_by(in_trash=True)
24 users = User.query.filter_by(in_trash=True)
25 sessions = Session.query.filter_by(in_trash=True)
26 orders = Order.query.filter_by(status="deleted")
27 pending_orders = Order.query.filter_by(status="pending")
28 expired_orders = Order.query.filter_by(status="expired")
29 for event in events:
30 if datetime.now() - event.trash_date >= timedelta(days=30):
31 DataManager.delete_event(event.id)
32
33 for user in users:
34 if datetime.now() - user.trash_date >= timedelta(days=30):
35 transaction = transaction_class(Event)
36 transaction.query.filter_by(user_id=user.id).delete()
37 delete_from_db(user, "User deleted permanently")
38
39 for session_ in sessions:
40 if datetime.now() - session_.trash_date >= timedelta(days=30):
41 delete_from_db(session_, "Session deleted permanently")
42
43 for order in orders:
44 if datetime.now() - order.trashed_at >= timedelta(days=30):
45 delete_from_db(order, "Order deleted permanently")
46
47 for pending_order in pending_orders:
48 if datetime.now() - pending_order.created_at >= timedelta(days=3):
49 pending_order.status = "expired"
50 save_to_db(pending_order, "Pending order expired.")
51
52 for expired_order in expired_orders:
53 if datetime.now() - expired_order.created_at >= timedelta(days=6):
54 expired_order.status = "deleted"
55 expired_order.trashed_at = datetime.now()
56 save_to_db(expired_order, "Expired order deleted")
57
58
59 def send_after_event_mail():
60 from app import current_app as app
61 with app.app_context():
62 events = Event.query.all()
63 for event in events:
64 upcoming_events = DataGetter.get_upcoming_events()
65 organizers = DataGetter.get_user_event_roles_by_role_name(
66 event.id, 'organizer')
67 speakers = DataGetter.get_user_event_roles_by_role_name(event.id,
68 'speaker')
69 if datetime.now() > event.end_time:
70 for speaker in speakers:
71 send_after_event(speaker.user.email, event.id,
72 upcoming_events)
73 for organizer in organizers:
74 send_after_event(organizer.user.email, event.id,
75 upcoming_events)
76
77
78 def send_mail_to_expired_orders():
79 from app import current_app as app
80 with app.app_context():
81 orders = DataGetter.get_expired_orders()
82 for order in orders:
83 send_email_for_expired_orders(order.user.email, order.event.name, order.get_invoice_number(),
84 url_for('ticketing.view_order_after_payment',
85 order_identifier=order.identifier, _external=True))
86
87
88 def send_event_fee_notification():
89 from app import current_app as app
90 with app.app_context():
91 events = Event.query.all()
92 for event in events:
93 latest_invoice = EventInvoice.filter_by(event_id=event.id).order_by(EventInvoice.created_at.desc()).first()
94
95 if latest_invoice:
96 orders = Order.query \
97 .filter_by(event_id=event.id) \
98 .filter_by(status='completed') \
99 .filter(Order.completed_at > latest_invoice.created_at).all()
100 else:
101 orders = Order.query.filter_by(event_id=event.id).filter_by(status='completed').all()
102
103 fee_total = 0
104 for order in orders:
105 for order_ticket in order.tickets:
106 ticket = TicketingManager.get_ticket(order_ticket.ticket_id)
107 if order.paid_via != 'free' and order.amount > 0 and ticket.price > 0:
108 fee = ticket.price * (get_fee(order.event.payment_currency) / 100.0)
109 fee_total += fee
110
111 if fee_total > 0:
112 new_invoice = EventInvoice(amount=fee_total, event_id=event.id, user_id=event.creator_id)
113
114 if event.discount_code_id and event.discount_code:
115 r = relativedelta(datetime.utcnow(), event.created_at)
116 if r <= event.discount_code.max_quantity:
117 new_invoice.amount = fee_total - (fee_total * (event.discount_code.value / 100.0))
118 new_invoice.discount_code_id = event.discount_code_id
119
120 save_to_db(new_invoice)
121 prev_month = monthdelta(new_invoice.created_at, 1).strftime("%b %Y") # Displayed as Aug 2016
122 send_email_for_monthly_fee_payment(new_invoice.user.email,
123 event.name,
124 prev_month,
125 new_invoice.amount,
126 url_for('event_invoicing.view_invoice',
127 invoice_identifier=new_invoice.identifier, _external=True))
128
129
130 def send_event_fee_notification_followup():
131 from app import current_app as app
132 with app.app_context():
133 incomplete_invoices = EventInvoice.query.filter(EventInvoice.status != 'completed').all()
134 for incomplete_invoice in incomplete_invoices:
135 if incomplete_invoice.amount > 0:
136 prev_month = monthdelta(incomplete_invoice.created_at, 1).strftime("%b %Y") # Displayed as Aug 2016
137 send_followup_email_for_monthly_fee_payment(incomplete_invoice.user.email,
138 incomplete_invoice.event.name,
139 prev_month,
140 incomplete_invoice.amount,
141 url_for('event_invoicing.view_invoice',
142 invoice_identifier=incomplete_invoice.identifier,
143 _external=True))
144
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/app/helpers/scheduled_jobs.py b/app/helpers/scheduled_jobs.py
--- a/app/helpers/scheduled_jobs.py
+++ b/app/helpers/scheduled_jobs.py
@@ -23,9 +23,8 @@
events = Event.query.filter_by(in_trash=True)
users = User.query.filter_by(in_trash=True)
sessions = Session.query.filter_by(in_trash=True)
- orders = Order.query.filter_by(status="deleted")
pending_orders = Order.query.filter_by(status="pending")
- expired_orders = Order.query.filter_by(status="expired")
+
for event in events:
if datetime.now() - event.trash_date >= timedelta(days=30):
DataManager.delete_event(event.id)
@@ -40,21 +39,11 @@
if datetime.now() - session_.trash_date >= timedelta(days=30):
delete_from_db(session_, "Session deleted permanently")
- for order in orders:
- if datetime.now() - order.trashed_at >= timedelta(days=30):
- delete_from_db(order, "Order deleted permanently")
-
for pending_order in pending_orders:
if datetime.now() - pending_order.created_at >= timedelta(days=3):
pending_order.status = "expired"
save_to_db(pending_order, "Pending order expired.")
- for expired_order in expired_orders:
- if datetime.now() - expired_order.created_at >= timedelta(days=6):
- expired_order.status = "deleted"
- expired_order.trashed_at = datetime.now()
- save_to_db(expired_order, "Expired order deleted")
-
def send_after_event_mail():
from app import current_app as app
| {"golden_diff": "diff --git a/app/helpers/scheduled_jobs.py b/app/helpers/scheduled_jobs.py\n--- a/app/helpers/scheduled_jobs.py\n+++ b/app/helpers/scheduled_jobs.py\n@@ -23,9 +23,8 @@\n events = Event.query.filter_by(in_trash=True)\n users = User.query.filter_by(in_trash=True)\n sessions = Session.query.filter_by(in_trash=True)\n- orders = Order.query.filter_by(status=\"deleted\")\n pending_orders = Order.query.filter_by(status=\"pending\")\n- expired_orders = Order.query.filter_by(status=\"expired\")\n+\n for event in events:\n if datetime.now() - event.trash_date >= timedelta(days=30):\n DataManager.delete_event(event.id)\n@@ -40,21 +39,11 @@\n if datetime.now() - session_.trash_date >= timedelta(days=30):\n delete_from_db(session_, \"Session deleted permanently\")\n \n- for order in orders:\n- if datetime.now() - order.trashed_at >= timedelta(days=30):\n- delete_from_db(order, \"Order deleted permanently\")\n-\n for pending_order in pending_orders:\n if datetime.now() - pending_order.created_at >= timedelta(days=3):\n pending_order.status = \"expired\"\n save_to_db(pending_order, \"Pending order expired.\")\n \n- for expired_order in expired_orders:\n- if datetime.now() - expired_order.created_at >= timedelta(days=6):\n- expired_order.status = \"deleted\"\n- expired_order.trashed_at = datetime.now()\n- save_to_db(expired_order, \"Expired order deleted\")\n-\n \n def send_after_event_mail():\n from app import current_app as app\n", "issue": "Do not show deleted orders in organiser ui and do not auto delete expired orders\n\n", "before_files": [{"content": "from datetime import datetime, timedelta\n\nfrom dateutil.relativedelta import relativedelta\nfrom flask import url_for\nfrom sqlalchemy_continuum import transaction_class\n\nfrom app.helpers.data import DataManager, delete_from_db, save_to_db\nfrom app.helpers.data_getter import DataGetter\nfrom app.helpers.helpers import send_after_event, monthdelta, send_followup_email_for_monthly_fee_payment\nfrom app.helpers.helpers import send_email_for_expired_orders, send_email_for_monthly_fee_payment\nfrom app.helpers.payment import get_fee\nfrom app.helpers.ticketing import TicketingManager\nfrom app.models.event import Event\nfrom app.models.event_invoice import EventInvoice\nfrom app.models.order import Order\nfrom app.models.session import Session\nfrom app.models.user import User\n\n\ndef empty_trash():\n from app import current_app as app\n with app.app_context():\n events = Event.query.filter_by(in_trash=True)\n users = User.query.filter_by(in_trash=True)\n sessions = Session.query.filter_by(in_trash=True)\n orders = Order.query.filter_by(status=\"deleted\")\n pending_orders = Order.query.filter_by(status=\"pending\")\n expired_orders = Order.query.filter_by(status=\"expired\")\n for event in events:\n if datetime.now() - event.trash_date >= timedelta(days=30):\n DataManager.delete_event(event.id)\n\n for user in users:\n if datetime.now() - user.trash_date >= timedelta(days=30):\n transaction = transaction_class(Event)\n transaction.query.filter_by(user_id=user.id).delete()\n delete_from_db(user, \"User deleted permanently\")\n\n for session_ in sessions:\n if datetime.now() - session_.trash_date >= timedelta(days=30):\n delete_from_db(session_, \"Session deleted permanently\")\n\n for order in orders:\n if datetime.now() - order.trashed_at >= timedelta(days=30):\n delete_from_db(order, \"Order deleted permanently\")\n\n for pending_order in pending_orders:\n if datetime.now() - pending_order.created_at >= timedelta(days=3):\n pending_order.status = \"expired\"\n save_to_db(pending_order, \"Pending order expired.\")\n\n for expired_order in expired_orders:\n if datetime.now() - expired_order.created_at >= timedelta(days=6):\n expired_order.status = \"deleted\"\n expired_order.trashed_at = datetime.now()\n save_to_db(expired_order, \"Expired order deleted\")\n\n\ndef send_after_event_mail():\n from app import current_app as app\n with app.app_context():\n events = Event.query.all()\n for event in events:\n upcoming_events = DataGetter.get_upcoming_events()\n organizers = DataGetter.get_user_event_roles_by_role_name(\n event.id, 'organizer')\n speakers = DataGetter.get_user_event_roles_by_role_name(event.id,\n 'speaker')\n if datetime.now() > event.end_time:\n for speaker in speakers:\n send_after_event(speaker.user.email, event.id,\n upcoming_events)\n for organizer in organizers:\n send_after_event(organizer.user.email, event.id,\n upcoming_events)\n\n\ndef send_mail_to_expired_orders():\n from app import current_app as app\n with app.app_context():\n orders = DataGetter.get_expired_orders()\n for order in orders:\n send_email_for_expired_orders(order.user.email, order.event.name, order.get_invoice_number(),\n url_for('ticketing.view_order_after_payment',\n order_identifier=order.identifier, _external=True))\n\n\ndef send_event_fee_notification():\n from app import current_app as app\n with app.app_context():\n events = Event.query.all()\n for event in events:\n latest_invoice = EventInvoice.filter_by(event_id=event.id).order_by(EventInvoice.created_at.desc()).first()\n\n if latest_invoice:\n orders = Order.query \\\n .filter_by(event_id=event.id) \\\n .filter_by(status='completed') \\\n .filter(Order.completed_at > latest_invoice.created_at).all()\n else:\n orders = Order.query.filter_by(event_id=event.id).filter_by(status='completed').all()\n\n fee_total = 0\n for order in orders:\n for order_ticket in order.tickets:\n ticket = TicketingManager.get_ticket(order_ticket.ticket_id)\n if order.paid_via != 'free' and order.amount > 0 and ticket.price > 0:\n fee = ticket.price * (get_fee(order.event.payment_currency) / 100.0)\n fee_total += fee\n\n if fee_total > 0:\n new_invoice = EventInvoice(amount=fee_total, event_id=event.id, user_id=event.creator_id)\n\n if event.discount_code_id and event.discount_code:\n r = relativedelta(datetime.utcnow(), event.created_at)\n if r <= event.discount_code.max_quantity:\n new_invoice.amount = fee_total - (fee_total * (event.discount_code.value / 100.0))\n new_invoice.discount_code_id = event.discount_code_id\n\n save_to_db(new_invoice)\n prev_month = monthdelta(new_invoice.created_at, 1).strftime(\"%b %Y\") # Displayed as Aug 2016\n send_email_for_monthly_fee_payment(new_invoice.user.email,\n event.name,\n prev_month,\n new_invoice.amount,\n url_for('event_invoicing.view_invoice',\n invoice_identifier=new_invoice.identifier, _external=True))\n\n\ndef send_event_fee_notification_followup():\n from app import current_app as app\n with app.app_context():\n incomplete_invoices = EventInvoice.query.filter(EventInvoice.status != 'completed').all()\n for incomplete_invoice in incomplete_invoices:\n if incomplete_invoice.amount > 0:\n prev_month = monthdelta(incomplete_invoice.created_at, 1).strftime(\"%b %Y\") # Displayed as Aug 2016\n send_followup_email_for_monthly_fee_payment(incomplete_invoice.user.email,\n incomplete_invoice.event.name,\n prev_month,\n incomplete_invoice.amount,\n url_for('event_invoicing.view_invoice',\n invoice_identifier=incomplete_invoice.identifier,\n _external=True))\n", "path": "app/helpers/scheduled_jobs.py"}], "after_files": [{"content": "from datetime import datetime, timedelta\n\nfrom dateutil.relativedelta import relativedelta\nfrom flask import url_for\nfrom sqlalchemy_continuum import transaction_class\n\nfrom app.helpers.data import DataManager, delete_from_db, save_to_db\nfrom app.helpers.data_getter import DataGetter\nfrom app.helpers.helpers import send_after_event, monthdelta, send_followup_email_for_monthly_fee_payment\nfrom app.helpers.helpers import send_email_for_expired_orders, send_email_for_monthly_fee_payment\nfrom app.helpers.payment import get_fee\nfrom app.helpers.ticketing import TicketingManager\nfrom app.models.event import Event\nfrom app.models.event_invoice import EventInvoice\nfrom app.models.order import Order\nfrom app.models.session import Session\nfrom app.models.user import User\n\n\ndef empty_trash():\n from app import current_app as app\n with app.app_context():\n events = Event.query.filter_by(in_trash=True)\n users = User.query.filter_by(in_trash=True)\n sessions = Session.query.filter_by(in_trash=True)\n pending_orders = Order.query.filter_by(status=\"pending\")\n\n for event in events:\n if datetime.now() - event.trash_date >= timedelta(days=30):\n DataManager.delete_event(event.id)\n\n for user in users:\n if datetime.now() - user.trash_date >= timedelta(days=30):\n transaction = transaction_class(Event)\n transaction.query.filter_by(user_id=user.id).delete()\n delete_from_db(user, \"User deleted permanently\")\n\n for session_ in sessions:\n if datetime.now() - session_.trash_date >= timedelta(days=30):\n delete_from_db(session_, \"Session deleted permanently\")\n\n for pending_order in pending_orders:\n if datetime.now() - pending_order.created_at >= timedelta(days=3):\n pending_order.status = \"expired\"\n save_to_db(pending_order, \"Pending order expired.\")\n\n\ndef send_after_event_mail():\n from app import current_app as app\n with app.app_context():\n events = Event.query.all()\n for event in events:\n upcoming_events = DataGetter.get_upcoming_events()\n organizers = DataGetter.get_user_event_roles_by_role_name(\n event.id, 'organizer')\n speakers = DataGetter.get_user_event_roles_by_role_name(event.id,\n 'speaker')\n if datetime.now() > event.end_time:\n for speaker in speakers:\n send_after_event(speaker.user.email, event.id,\n upcoming_events)\n for organizer in organizers:\n send_after_event(organizer.user.email, event.id,\n upcoming_events)\n\n\ndef send_mail_to_expired_orders():\n from app import current_app as app\n with app.app_context():\n orders = DataGetter.get_expired_orders()\n for order in orders:\n send_email_for_expired_orders(order.user.email, order.event.name, order.get_invoice_number(),\n url_for('ticketing.view_order_after_payment',\n order_identifier=order.identifier, _external=True))\n\n\ndef send_event_fee_notification():\n from app import current_app as app\n with app.app_context():\n events = Event.query.all()\n for event in events:\n latest_invoice = EventInvoice.filter_by(event_id=event.id).order_by(EventInvoice.created_at.desc()).first()\n\n if latest_invoice:\n orders = Order.query \\\n .filter_by(event_id=event.id) \\\n .filter_by(status='completed') \\\n .filter(Order.completed_at > latest_invoice.created_at).all()\n else:\n orders = Order.query.filter_by(event_id=event.id).filter_by(status='completed').all()\n\n fee_total = 0\n for order in orders:\n for order_ticket in order.tickets:\n ticket = TicketingManager.get_ticket(order_ticket.ticket_id)\n if order.paid_via != 'free' and order.amount > 0 and ticket.price > 0:\n fee = ticket.price * (get_fee(order.event.payment_currency) / 100.0)\n fee_total += fee\n\n if fee_total > 0:\n new_invoice = EventInvoice(amount=fee_total, event_id=event.id, user_id=event.creator_id)\n\n if event.discount_code_id and event.discount_code:\n r = relativedelta(datetime.utcnow(), event.created_at)\n if r <= event.discount_code.max_quantity:\n new_invoice.amount = fee_total - (fee_total * (event.discount_code.value / 100.0))\n new_invoice.discount_code_id = event.discount_code_id\n\n save_to_db(new_invoice)\n prev_month = monthdelta(new_invoice.created_at, 1).strftime(\"%b %Y\") # Displayed as Aug 2016\n send_email_for_monthly_fee_payment(new_invoice.user.email,\n event.name,\n prev_month,\n new_invoice.amount,\n url_for('event_invoicing.view_invoice',\n invoice_identifier=new_invoice.identifier, _external=True))\n\n\ndef send_event_fee_notification_followup():\n from app import current_app as app\n with app.app_context():\n incomplete_invoices = EventInvoice.query.filter(EventInvoice.status != 'completed').all()\n for incomplete_invoice in incomplete_invoices:\n if incomplete_invoice.amount > 0:\n prev_month = monthdelta(incomplete_invoice.created_at, 1).strftime(\"%b %Y\") # Displayed as Aug 2016\n send_followup_email_for_monthly_fee_payment(incomplete_invoice.user.email,\n incomplete_invoice.event.name,\n prev_month,\n incomplete_invoice.amount,\n url_for('event_invoicing.view_invoice',\n invoice_identifier=incomplete_invoice.identifier,\n _external=True))\n", "path": "app/helpers/scheduled_jobs.py"}]} | 1,883 | 356 |
gh_patches_debug_21723 | rasdani/github-patches | git_diff | graspologic-org__graspologic-85 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix notebooks on netlify
Issues with setting jupyter notebook kernels prevent them from running on netlify
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `graspy/embed/lse.py`
Content:
```
1 # ase.py
2 # Created by Ben Pedigo on 2018-09-26.
3 # Email: [email protected]
4 import warnings
5
6 from .embed import BaseEmbed
7 from .svd import selectSVD
8 from ..utils import import_graph, to_laplace, get_lcc, is_fully_connected
9
10
11 class LaplacianSpectralEmbed(BaseEmbed):
12 r"""
13 Class for computing the laplacian spectral embedding of a graph
14
15 The laplacian spectral embedding (LSE) is a k-dimensional Euclidean representation of
16 the graph based on its Laplacian matrix [1]_. It relies on an SVD to reduce the dimensionality
17 to the specified k, or if k is unspecified, can find a number of dimensions automatically.
18
19 Parameters
20 ----------
21 n_components : int or None, default = None
22 Desired dimensionality of output data. If "full",
23 n_components must be <= min(X.shape). Otherwise, n_components must be
24 < min(X.shape). If None, then optimal dimensions will be chosen by
25 ``select_dimension`` using ``n_elbows`` argument.
26 n_elbows : int, optional, default: 2
27 If `n_compoents=None`, then compute the optimal embedding dimension using
28 `select_dimension`. Otherwise, ignored.
29 algorithm : {'full', 'truncated' (default), 'randomized'}, optional
30 SVD solver to use:
31
32 - 'full'
33 Computes full svd using ``scipy.linalg.svd``
34 - 'truncated'
35 Computes truncated svd using ``scipy.sparse.linalg.svd``
36 - 'randomized'
37 Computes randomized svd using
38 ``sklearn.utils.extmath.randomized_svd``
39 n_iter : int, optional (default = 5)
40 Number of iterations for randomized SVD solver. Not used by 'full' or
41 'truncated'. The default is larger than the default in randomized_svd
42 to handle sparse matrices that may have large slowly decaying spectrum.
43 lcc : bool, optional (default=True)
44 If True, computes the largest connected component for the input graph.
45
46 Attributes
47 ----------
48 latent_left_ : array, shape (n_samples, n_components)
49 Estimated left latent positions of the graph.
50 latent_right_ : array, shape (n_samples, n_components), or None
51 Only computed when the graph is directed, or adjacency matrix is assymetric.
52 Estimated right latent positions of the graph. Otherwise, None.
53 singular_values_ : array, shape (n_components)
54 Singular values associated with the latent position matrices.
55 indices_ : array, or None
56 If ``lcc`` is True, these are the indices of the vertices that were kept.
57
58 See Also
59 --------
60 graspy.embed.selectSVD
61 graspy.embed.select_dimension
62 graspy.utils.to_laplace
63
64 Notes
65 -----
66 The singular value decomposition:
67
68 .. math:: A = U \Sigma V^T
69
70 is used to find an orthonormal basis for a matrix, which in our case is the Laplacian
71 matrix of the graph. These basis vectors (in the matrices U or V) are ordered according
72 to the amount of variance they explain in the original matrix. By selecting a subset of these
73 basis vectors (through our choice of dimensionality reduction) we can find a lower dimensional
74 space in which to represent the graph
75
76 References
77 ----------
78 .. [1] Sussman, D.L., Tang, M., Fishkind, D.E., Priebe, C.E. "A
79 Consistent Adjacency Spectral Embedding for Stochastic Blockmodel Graphs,"
80 Journal of the American Statistical Association, Vol. 107(499), 2012
81 """
82
83 def __init__(
84 self,
85 form='DAD',
86 n_components=None,
87 n_elbows=2,
88 algorithm='randomized',
89 n_iter=5,
90 ):
91 super().__init__(
92 n_components=n_components,
93 n_elbows=n_elbows,
94 algorithm=algorithm,
95 n_iter=n_iter,
96 )
97 self.form = form
98
99 def fit(self, graph):
100 """
101 Fit LSE model to input graph
102
103 By default, uses the Laplacian normalization of the form:
104
105 .. math:: L = D^{-1/2} A D^{-1/2}
106
107 Parameters
108 ----------
109 graph : array_like or networkx.Graph
110 Input graph to embed. see graphstats.utils.import_graph
111
112 form : {'DAD' (default), 'I-DAD'}, optional
113 Specifies the type of Laplacian normalization to use.
114
115 Returns
116 -------
117 self : returns an instance of self.
118 """
119 A = import_graph(graph)
120
121 if not is_fully_connected(A):
122 msg = """Input graph is not fully connected. Results may not \
123 be optimal. You can compute the largest connected component by \
124 using ``graspy.utils.get_lcc``."""
125 warnings.warn(msg, UserWarning)
126
127 L_norm = to_laplace(A, form=self.form)
128 self._reduce_dim(L_norm)
129 return self
130
```
Path: `graspy/embed/ase.py`
Content:
```
1 # ase.py
2 # Created by Ben Pedigo on 2018-09-15.
3 # Email: [email protected]
4 import warnings
5
6 from .embed import BaseEmbed
7 from .svd import selectSVD
8 from ..utils import import_graph, get_lcc, is_fully_connected
9
10
11 class AdjacencySpectralEmbed(BaseEmbed):
12 r"""
13 Class for computing the adjacency spectral embedding of a graph
14
15 The adjacency spectral embedding (ASE) is a k-dimensional Euclidean representation of
16 the graph based on its adjacency matrix [1]_. It relies on an SVD to reduce the dimensionality
17 to the specified k, or if k is unspecified, can find a number of dimensions automatically
18 (see graphstats.embed.svd.selectSVD).
19
20 Parameters
21 ----------
22 n_components : int or None, default = None
23 Desired dimensionality of output data. If "full",
24 n_components must be <= min(X.shape). Otherwise, n_components must be
25 < min(X.shape). If None, then optimal dimensions will be chosen by
26 ``select_dimension`` using ``n_elbows`` argument.
27 n_elbows : int, optional, default: 2
28 If `n_compoents=None`, then compute the optimal embedding dimension using
29 `select_dimension`. Otherwise, ignored.
30 algorithm : {'full', 'truncated' (default), 'randomized'}, optional
31 SVD solver to use:
32
33 - 'full'
34 Computes full svd using ``scipy.linalg.svd``
35 - 'truncated'
36 Computes truncated svd using ``scipy.sparse.linalg.svd``
37 - 'randomized'
38 Computes randomized svd using
39 ``sklearn.utils.extmath.randomized_svd``
40 n_iter : int, optional (default = 5)
41 Number of iterations for randomized SVD solver. Not used by 'full' or
42 'truncated'. The default is larger than the default in randomized_svd
43 to handle sparse matrices that may have large slowly decaying spectrum.
44 lcc : bool, optional (default=True)
45 If True, computes the largest connected component for the input graph.
46
47 Attributes
48 ----------
49 latent_left_ : array, shape (n_samples, n_components)
50 Estimated left latent positions of the graph.
51 latent_right_ : array, shape (n_samples, n_components), or None
52 Only computed when the graph is directed, or adjacency matrix is assymetric.
53 Estimated right latent positions of the graph. Otherwise, None.
54 singular_values_ : array, shape (n_components)
55 Singular values associated with the latent position matrices.
56 indices_ : array, or None
57 If ``lcc`` is True, these are the indices of the vertices that were kept.
58
59 See Also
60 --------
61 graspy.embed.selectSVD
62 graspy.embed.select_dimension
63
64 Notes
65 -----
66 The singular value decomposition:
67
68 .. math:: A = U \Sigma V^T
69
70 is used to find an orthonormal basis for a matrix, which in our case is the adjacency
71 matrix of the graph. These basis vectors (in the matrices U or V) are ordered according
72 to the amount of variance they explain in the original matrix. By selecting a subset of these
73 basis vectors (through our choice of dimensionality reduction) we can find a lower dimensional
74 space in which to represent the graph
75
76 References
77 ----------
78 .. [1] Sussman, D.L., Tang, M., Fishkind, D.E., Priebe, C.E. "A
79 Consistent Adjacency Spectral Embedding for Stochastic Blockmodel Graphs,"
80 Journal of the American Statistical Association, Vol. 107(499), 2012
81 """
82
83 def __init__(
84 self,
85 n_components=None,
86 n_elbows=2,
87 algorithm='randomized',
88 n_iter=5,
89 ):
90 super().__init__(
91 n_components=n_components,
92 n_elbows=n_elbows,
93 algorithm=algorithm,
94 n_iter=n_iter,
95 )
96
97 def fit(self, graph):
98 """
99 Fit ASE model to input graph
100
101 Parameters
102 ----------
103 graph : array_like or networkx.Graph
104 input graph to embed. see graphstats.utils.import_graph
105
106 Returns
107 -------
108 self : returns an instance of self.
109 """
110 A = import_graph(graph)
111
112 if not is_fully_connected(A):
113 msg = """Input graph is not fully connected. Results may not \
114 be optimal. You can compute the largest connected component by \
115 using ``graspy.utils.get_lcc``."""
116 warnings.warn(msg, UserWarning)
117
118 self._reduce_dim(A)
119 return self
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/graspy/embed/ase.py b/graspy/embed/ase.py
--- a/graspy/embed/ase.py
+++ b/graspy/embed/ase.py
@@ -41,8 +41,6 @@
Number of iterations for randomized SVD solver. Not used by 'full' or
'truncated'. The default is larger than the default in randomized_svd
to handle sparse matrices that may have large slowly decaying spectrum.
- lcc : bool, optional (default=True)
- If True, computes the largest connected component for the input graph.
Attributes
----------
diff --git a/graspy/embed/lse.py b/graspy/embed/lse.py
--- a/graspy/embed/lse.py
+++ b/graspy/embed/lse.py
@@ -40,8 +40,6 @@
Number of iterations for randomized SVD solver. Not used by 'full' or
'truncated'. The default is larger than the default in randomized_svd
to handle sparse matrices that may have large slowly decaying spectrum.
- lcc : bool, optional (default=True)
- If True, computes the largest connected component for the input graph.
Attributes
----------
| {"golden_diff": "diff --git a/graspy/embed/ase.py b/graspy/embed/ase.py\n--- a/graspy/embed/ase.py\n+++ b/graspy/embed/ase.py\n@@ -41,8 +41,6 @@\n Number of iterations for randomized SVD solver. Not used by 'full' or \n 'truncated'. The default is larger than the default in randomized_svd \n to handle sparse matrices that may have large slowly decaying spectrum.\n- lcc : bool, optional (default=True)\n- If True, computes the largest connected component for the input graph.\n \n Attributes\n ----------\ndiff --git a/graspy/embed/lse.py b/graspy/embed/lse.py\n--- a/graspy/embed/lse.py\n+++ b/graspy/embed/lse.py\n@@ -40,8 +40,6 @@\n Number of iterations for randomized SVD solver. Not used by 'full' or \n 'truncated'. The default is larger than the default in randomized_svd \n to handle sparse matrices that may have large slowly decaying spectrum.\n- lcc : bool, optional (default=True)\n- If True, computes the largest connected component for the input graph.\n \n Attributes\n ----------\n", "issue": "Fix notebooks on netlify\nIssues with setting jupyter notebook kernels prevent them from running on netlify\n", "before_files": [{"content": "# ase.py\n# Created by Ben Pedigo on 2018-09-26.\n# Email: [email protected]\nimport warnings\n\nfrom .embed import BaseEmbed\nfrom .svd import selectSVD\nfrom ..utils import import_graph, to_laplace, get_lcc, is_fully_connected\n\n\nclass LaplacianSpectralEmbed(BaseEmbed):\n r\"\"\"\n Class for computing the laplacian spectral embedding of a graph \n \n The laplacian spectral embedding (LSE) is a k-dimensional Euclidean representation of \n the graph based on its Laplacian matrix [1]_. It relies on an SVD to reduce the dimensionality\n to the specified k, or if k is unspecified, can find a number of dimensions automatically.\n\n Parameters\n ----------\n n_components : int or None, default = None\n Desired dimensionality of output data. If \"full\", \n n_components must be <= min(X.shape). Otherwise, n_components must be\n < min(X.shape). If None, then optimal dimensions will be chosen by\n ``select_dimension`` using ``n_elbows`` argument.\n n_elbows : int, optional, default: 2\n If `n_compoents=None`, then compute the optimal embedding dimension using\n `select_dimension`. Otherwise, ignored.\n algorithm : {'full', 'truncated' (default), 'randomized'}, optional\n SVD solver to use:\n\n - 'full'\n Computes full svd using ``scipy.linalg.svd``\n - 'truncated'\n Computes truncated svd using ``scipy.sparse.linalg.svd``\n - 'randomized'\n Computes randomized svd using \n ``sklearn.utils.extmath.randomized_svd``\n n_iter : int, optional (default = 5)\n Number of iterations for randomized SVD solver. Not used by 'full' or \n 'truncated'. The default is larger than the default in randomized_svd \n to handle sparse matrices that may have large slowly decaying spectrum.\n lcc : bool, optional (default=True)\n If True, computes the largest connected component for the input graph.\n\n Attributes\n ----------\n latent_left_ : array, shape (n_samples, n_components)\n Estimated left latent positions of the graph.\n latent_right_ : array, shape (n_samples, n_components), or None\n Only computed when the graph is directed, or adjacency matrix is assymetric.\n Estimated right latent positions of the graph. Otherwise, None.\n singular_values_ : array, shape (n_components)\n Singular values associated with the latent position matrices.\n indices_ : array, or None\n If ``lcc`` is True, these are the indices of the vertices that were kept.\n\n See Also\n --------\n graspy.embed.selectSVD\n graspy.embed.select_dimension\n graspy.utils.to_laplace\n\n Notes\n -----\n The singular value decomposition: \n\n .. math:: A = U \\Sigma V^T\n\n is used to find an orthonormal basis for a matrix, which in our case is the Laplacian\n matrix of the graph. These basis vectors (in the matrices U or V) are ordered according \n to the amount of variance they explain in the original matrix. By selecting a subset of these\n basis vectors (through our choice of dimensionality reduction) we can find a lower dimensional \n space in which to represent the graph\n\n References\n ----------\n .. [1] Sussman, D.L., Tang, M., Fishkind, D.E., Priebe, C.E. \"A\n Consistent Adjacency Spectral Embedding for Stochastic Blockmodel Graphs,\"\n Journal of the American Statistical Association, Vol. 107(499), 2012\n \"\"\"\n\n def __init__(\n self,\n form='DAD',\n n_components=None,\n n_elbows=2,\n algorithm='randomized',\n n_iter=5,\n ):\n super().__init__(\n n_components=n_components,\n n_elbows=n_elbows,\n algorithm=algorithm,\n n_iter=n_iter,\n )\n self.form = form\n\n def fit(self, graph):\n \"\"\"\n Fit LSE model to input graph\n\n By default, uses the Laplacian normalization of the form:\n\n .. math:: L = D^{-1/2} A D^{-1/2}\n\n Parameters\n ----------\n graph : array_like or networkx.Graph\n Input graph to embed. see graphstats.utils.import_graph\n\n form : {'DAD' (default), 'I-DAD'}, optional\n Specifies the type of Laplacian normalization to use.\n\n Returns\n -------\n self : returns an instance of self.\n \"\"\"\n A = import_graph(graph)\n\n if not is_fully_connected(A):\n msg = \"\"\"Input graph is not fully connected. Results may not \\\n be optimal. You can compute the largest connected component by \\\n using ``graspy.utils.get_lcc``.\"\"\"\n warnings.warn(msg, UserWarning)\n\n L_norm = to_laplace(A, form=self.form)\n self._reduce_dim(L_norm)\n return self\n", "path": "graspy/embed/lse.py"}, {"content": "# ase.py\n# Created by Ben Pedigo on 2018-09-15.\n# Email: [email protected]\nimport warnings\n\nfrom .embed import BaseEmbed\nfrom .svd import selectSVD\nfrom ..utils import import_graph, get_lcc, is_fully_connected\n\n\nclass AdjacencySpectralEmbed(BaseEmbed):\n r\"\"\"\n Class for computing the adjacency spectral embedding of a graph \n \n The adjacency spectral embedding (ASE) is a k-dimensional Euclidean representation of \n the graph based on its adjacency matrix [1]_. It relies on an SVD to reduce the dimensionality\n to the specified k, or if k is unspecified, can find a number of dimensions automatically\n (see graphstats.embed.svd.selectSVD).\n\n Parameters\n ----------\n n_components : int or None, default = None\n Desired dimensionality of output data. If \"full\", \n n_components must be <= min(X.shape). Otherwise, n_components must be\n < min(X.shape). If None, then optimal dimensions will be chosen by\n ``select_dimension`` using ``n_elbows`` argument.\n n_elbows : int, optional, default: 2\n If `n_compoents=None`, then compute the optimal embedding dimension using\n `select_dimension`. Otherwise, ignored.\n algorithm : {'full', 'truncated' (default), 'randomized'}, optional\n SVD solver to use:\n\n - 'full'\n Computes full svd using ``scipy.linalg.svd``\n - 'truncated'\n Computes truncated svd using ``scipy.sparse.linalg.svd``\n - 'randomized'\n Computes randomized svd using \n ``sklearn.utils.extmath.randomized_svd``\n n_iter : int, optional (default = 5)\n Number of iterations for randomized SVD solver. Not used by 'full' or \n 'truncated'. The default is larger than the default in randomized_svd \n to handle sparse matrices that may have large slowly decaying spectrum.\n lcc : bool, optional (default=True)\n If True, computes the largest connected component for the input graph.\n\n Attributes\n ----------\n latent_left_ : array, shape (n_samples, n_components)\n Estimated left latent positions of the graph. \n latent_right_ : array, shape (n_samples, n_components), or None\n Only computed when the graph is directed, or adjacency matrix is assymetric.\n Estimated right latent positions of the graph. Otherwise, None.\n singular_values_ : array, shape (n_components)\n Singular values associated with the latent position matrices. \n indices_ : array, or None\n If ``lcc`` is True, these are the indices of the vertices that were kept.\n\n See Also\n --------\n graspy.embed.selectSVD\n graspy.embed.select_dimension\n\n Notes\n -----\n The singular value decomposition: \n\n .. math:: A = U \\Sigma V^T\n\n is used to find an orthonormal basis for a matrix, which in our case is the adjacency\n matrix of the graph. These basis vectors (in the matrices U or V) are ordered according \n to the amount of variance they explain in the original matrix. By selecting a subset of these\n basis vectors (through our choice of dimensionality reduction) we can find a lower dimensional \n space in which to represent the graph\n\n References\n ----------\n .. [1] Sussman, D.L., Tang, M., Fishkind, D.E., Priebe, C.E. \"A\n Consistent Adjacency Spectral Embedding for Stochastic Blockmodel Graphs,\"\n Journal of the American Statistical Association, Vol. 107(499), 2012\n \"\"\"\n\n def __init__(\n self,\n n_components=None,\n n_elbows=2,\n algorithm='randomized',\n n_iter=5,\n ):\n super().__init__(\n n_components=n_components,\n n_elbows=n_elbows,\n algorithm=algorithm,\n n_iter=n_iter,\n )\n\n def fit(self, graph):\n \"\"\"\n Fit ASE model to input graph\n\n Parameters\n ----------\n graph : array_like or networkx.Graph\n input graph to embed. see graphstats.utils.import_graph\n\n Returns\n -------\n self : returns an instance of self.\n \"\"\"\n A = import_graph(graph)\n\n if not is_fully_connected(A):\n msg = \"\"\"Input graph is not fully connected. Results may not \\\n be optimal. You can compute the largest connected component by \\\n using ``graspy.utils.get_lcc``.\"\"\"\n warnings.warn(msg, UserWarning)\n\n self._reduce_dim(A)\n return self", "path": "graspy/embed/ase.py"}], "after_files": [{"content": "# ase.py\n# Created by Ben Pedigo on 2018-09-26.\n# Email: [email protected]\nimport warnings\n\nfrom .embed import BaseEmbed\nfrom .svd import selectSVD\nfrom ..utils import import_graph, to_laplace, get_lcc, is_fully_connected\n\n\nclass LaplacianSpectralEmbed(BaseEmbed):\n r\"\"\"\n Class for computing the laplacian spectral embedding of a graph \n \n The laplacian spectral embedding (LSE) is a k-dimensional Euclidean representation of \n the graph based on its Laplacian matrix [1]_. It relies on an SVD to reduce the dimensionality\n to the specified k, or if k is unspecified, can find a number of dimensions automatically.\n\n Parameters\n ----------\n n_components : int or None, default = None\n Desired dimensionality of output data. If \"full\", \n n_components must be <= min(X.shape). Otherwise, n_components must be\n < min(X.shape). If None, then optimal dimensions will be chosen by\n ``select_dimension`` using ``n_elbows`` argument.\n n_elbows : int, optional, default: 2\n If `n_compoents=None`, then compute the optimal embedding dimension using\n `select_dimension`. Otherwise, ignored.\n algorithm : {'full', 'truncated' (default), 'randomized'}, optional\n SVD solver to use:\n\n - 'full'\n Computes full svd using ``scipy.linalg.svd``\n - 'truncated'\n Computes truncated svd using ``scipy.sparse.linalg.svd``\n - 'randomized'\n Computes randomized svd using \n ``sklearn.utils.extmath.randomized_svd``\n n_iter : int, optional (default = 5)\n Number of iterations for randomized SVD solver. Not used by 'full' or \n 'truncated'. The default is larger than the default in randomized_svd \n to handle sparse matrices that may have large slowly decaying spectrum.\n\n Attributes\n ----------\n latent_left_ : array, shape (n_samples, n_components)\n Estimated left latent positions of the graph.\n latent_right_ : array, shape (n_samples, n_components), or None\n Only computed when the graph is directed, or adjacency matrix is assymetric.\n Estimated right latent positions of the graph. Otherwise, None.\n singular_values_ : array, shape (n_components)\n Singular values associated with the latent position matrices.\n indices_ : array, or None\n If ``lcc`` is True, these are the indices of the vertices that were kept.\n\n See Also\n --------\n graspy.embed.selectSVD\n graspy.embed.select_dimension\n graspy.utils.to_laplace\n\n Notes\n -----\n The singular value decomposition: \n\n .. math:: A = U \\Sigma V^T\n\n is used to find an orthonormal basis for a matrix, which in our case is the Laplacian\n matrix of the graph. These basis vectors (in the matrices U or V) are ordered according \n to the amount of variance they explain in the original matrix. By selecting a subset of these\n basis vectors (through our choice of dimensionality reduction) we can find a lower dimensional \n space in which to represent the graph\n\n References\n ----------\n .. [1] Sussman, D.L., Tang, M., Fishkind, D.E., Priebe, C.E. \"A\n Consistent Adjacency Spectral Embedding for Stochastic Blockmodel Graphs,\"\n Journal of the American Statistical Association, Vol. 107(499), 2012\n \"\"\"\n\n def __init__(\n self,\n form='DAD',\n n_components=None,\n n_elbows=2,\n algorithm='randomized',\n n_iter=5,\n ):\n super().__init__(\n n_components=n_components,\n n_elbows=n_elbows,\n algorithm=algorithm,\n n_iter=n_iter,\n )\n self.form = form\n\n def fit(self, graph):\n \"\"\"\n Fit LSE model to input graph\n\n By default, uses the Laplacian normalization of the form:\n\n .. math:: L = D^{-1/2} A D^{-1/2}\n\n Parameters\n ----------\n graph : array_like or networkx.Graph\n Input graph to embed. see graphstats.utils.import_graph\n\n form : {'DAD' (default), 'I-DAD'}, optional\n Specifies the type of Laplacian normalization to use.\n\n Returns\n -------\n self : returns an instance of self.\n \"\"\"\n A = import_graph(graph)\n\n if not is_fully_connected(A):\n msg = \"\"\"Input graph is not fully connected. Results may not \\\n be optimal. You can compute the largest connected component by \\\n using ``graspy.utils.get_lcc``.\"\"\"\n warnings.warn(msg, UserWarning)\n\n L_norm = to_laplace(A, form=self.form)\n self._reduce_dim(L_norm)\n return self\n", "path": "graspy/embed/lse.py"}, {"content": "# ase.py\n# Created by Ben Pedigo on 2018-09-15.\n# Email: [email protected]\nimport warnings\n\nfrom .embed import BaseEmbed\nfrom .svd import selectSVD\nfrom ..utils import import_graph, get_lcc, is_fully_connected\n\n\nclass AdjacencySpectralEmbed(BaseEmbed):\n r\"\"\"\n Class for computing the adjacency spectral embedding of a graph \n \n The adjacency spectral embedding (ASE) is a k-dimensional Euclidean representation of \n the graph based on its adjacency matrix [1]_. It relies on an SVD to reduce the dimensionality\n to the specified k, or if k is unspecified, can find a number of dimensions automatically\n (see graphstats.embed.svd.selectSVD).\n\n Parameters\n ----------\n n_components : int or None, default = None\n Desired dimensionality of output data. If \"full\", \n n_components must be <= min(X.shape). Otherwise, n_components must be\n < min(X.shape). If None, then optimal dimensions will be chosen by\n ``select_dimension`` using ``n_elbows`` argument.\n n_elbows : int, optional, default: 2\n If `n_compoents=None`, then compute the optimal embedding dimension using\n `select_dimension`. Otherwise, ignored.\n algorithm : {'full', 'truncated' (default), 'randomized'}, optional\n SVD solver to use:\n\n - 'full'\n Computes full svd using ``scipy.linalg.svd``\n - 'truncated'\n Computes truncated svd using ``scipy.sparse.linalg.svd``\n - 'randomized'\n Computes randomized svd using \n ``sklearn.utils.extmath.randomized_svd``\n n_iter : int, optional (default = 5)\n Number of iterations for randomized SVD solver. Not used by 'full' or \n 'truncated'. The default is larger than the default in randomized_svd \n to handle sparse matrices that may have large slowly decaying spectrum.\n\n Attributes\n ----------\n latent_left_ : array, shape (n_samples, n_components)\n Estimated left latent positions of the graph. \n latent_right_ : array, shape (n_samples, n_components), or None\n Only computed when the graph is directed, or adjacency matrix is assymetric.\n Estimated right latent positions of the graph. Otherwise, None.\n singular_values_ : array, shape (n_components)\n Singular values associated with the latent position matrices. \n indices_ : array, or None\n If ``lcc`` is True, these are the indices of the vertices that were kept.\n\n See Also\n --------\n graspy.embed.selectSVD\n graspy.embed.select_dimension\n\n Notes\n -----\n The singular value decomposition: \n\n .. math:: A = U \\Sigma V^T\n\n is used to find an orthonormal basis for a matrix, which in our case is the adjacency\n matrix of the graph. These basis vectors (in the matrices U or V) are ordered according \n to the amount of variance they explain in the original matrix. By selecting a subset of these\n basis vectors (through our choice of dimensionality reduction) we can find a lower dimensional \n space in which to represent the graph\n\n References\n ----------\n .. [1] Sussman, D.L., Tang, M., Fishkind, D.E., Priebe, C.E. \"A\n Consistent Adjacency Spectral Embedding for Stochastic Blockmodel Graphs,\"\n Journal of the American Statistical Association, Vol. 107(499), 2012\n \"\"\"\n\n def __init__(\n self,\n n_components=None,\n n_elbows=2,\n algorithm='randomized',\n n_iter=5,\n ):\n super().__init__(\n n_components=n_components,\n n_elbows=n_elbows,\n algorithm=algorithm,\n n_iter=n_iter,\n )\n\n def fit(self, graph):\n \"\"\"\n Fit ASE model to input graph\n\n Parameters\n ----------\n graph : array_like or networkx.Graph\n input graph to embed. see graphstats.utils.import_graph\n\n Returns\n -------\n self : returns an instance of self.\n \"\"\"\n A = import_graph(graph)\n\n if not is_fully_connected(A):\n msg = \"\"\"Input graph is not fully connected. Results may not \\\n be optimal. You can compute the largest connected component by \\\n using ``graspy.utils.get_lcc``.\"\"\"\n warnings.warn(msg, UserWarning)\n\n self._reduce_dim(A)\n return self", "path": "graspy/embed/ase.py"}]} | 2,991 | 268 |
gh_patches_debug_40331 | rasdani/github-patches | git_diff | searxng__searxng-3418 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Wikimedia Commons
**Working URL to the engine**
https://commons.wikimedia.org
**Why do you want to add this engine?**
Out of all of the Wikimedia projects, Wikimedia Commons is one of only two to not appear in any engine category in SearXNG, with the other being Wikispecies.
**Features of this engine**
It has a collection of [82,886,704](https://commons.wikimedia.org/wiki/Special:Statistics) [freely usable](https://commons.wikimedia.org/wiki/Commons:Reusing_content_outside_Wikimedia) media files.
**How can SearXNG fetch the information from this engine?**
`https://commons.wikimedia.org/w/index.php?search=%s` with `%s` being what you want to search.
**Applicable category of this engine**
General, files, images, music, videos.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `searx/engines/wikicommons.py`
Content:
```
1 # SPDX-License-Identifier: AGPL-3.0-or-later
2 """Wikimedia Commons (images)
3
4 """
5
6 from urllib.parse import urlencode
7
8 # about
9 about = {
10 "website": 'https://commons.wikimedia.org/',
11 "wikidata_id": 'Q565',
12 "official_api_documentation": 'https://commons.wikimedia.org/w/api.php',
13 "use_official_api": True,
14 "require_api_key": False,
15 "results": 'JSON',
16 }
17
18 base_url = "https://commons.wikimedia.org"
19 search_prefix = (
20 '?action=query'
21 '&format=json'
22 '&generator=search'
23 '&gsrnamespace=6'
24 '&gsrprop=snippet'
25 '&prop=info|imageinfo'
26 '&iiprop=url|size|mime'
27 '&iiurlheight=180' # needed for the thumb url
28 )
29 paging = True
30 number_of_results = 10
31
32
33 def request(query, params):
34 language = 'en'
35 if params['language'] != 'all':
36 language = params['language'].split('-')[0]
37
38 args = {
39 'uselang': language,
40 'gsrlimit': number_of_results,
41 'gsroffset': number_of_results * (params["pageno"] - 1),
42 'gsrsearch': "filetype:bitmap|drawing " + query,
43 }
44
45 params["url"] = f"{base_url}/w/api.php{search_prefix}&{urlencode(args, safe=':|')}"
46 return params
47
48
49 def response(resp):
50 results = []
51 json = resp.json()
52
53 if not json.get("query", {}).get("pages"):
54 return results
55
56 for item in json["query"]["pages"].values():
57 imageinfo = item["imageinfo"][0]
58 title = item["title"].replace("File:", "").rsplit('.', 1)[0]
59 result = {
60 'url': imageinfo["descriptionurl"],
61 'title': title,
62 'content': item["snippet"],
63 'img_src': imageinfo["url"],
64 'resolution': f'{imageinfo["width"]} x {imageinfo["height"]}',
65 'thumbnail_src': imageinfo["thumburl"],
66 'template': 'images.html',
67 }
68 results.append(result)
69
70 return results
71
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/searx/engines/wikicommons.py b/searx/engines/wikicommons.py
--- a/searx/engines/wikicommons.py
+++ b/searx/engines/wikicommons.py
@@ -3,6 +3,8 @@
"""
+import datetime
+
from urllib.parse import urlencode
# about
@@ -14,6 +16,8 @@
"require_api_key": False,
"results": 'JSON',
}
+categories = ['images']
+search_type = 'images'
base_url = "https://commons.wikimedia.org"
search_prefix = (
@@ -29,17 +33,29 @@
paging = True
number_of_results = 10
+search_types = {
+ 'images': 'bitmap|drawing',
+ 'videos': 'video',
+ 'audio': 'audio',
+ 'files': 'multimedia|office|archive|3d',
+}
+
def request(query, params):
language = 'en'
if params['language'] != 'all':
language = params['language'].split('-')[0]
+ if search_type not in search_types:
+ raise ValueError(f"Unsupported search type: {search_type}")
+
+ filetype = search_types[search_type]
+
args = {
'uselang': language,
'gsrlimit': number_of_results,
'gsroffset': number_of_results * (params["pageno"] - 1),
- 'gsrsearch': "filetype:bitmap|drawing " + query,
+ 'gsrsearch': f"filetype:{filetype} {query}",
}
params["url"] = f"{base_url}/w/api.php{search_prefix}&{urlencode(args, safe=':|')}"
@@ -52,7 +68,6 @@
if not json.get("query", {}).get("pages"):
return results
-
for item in json["query"]["pages"].values():
imageinfo = item["imageinfo"][0]
title = item["title"].replace("File:", "").rsplit('.', 1)[0]
@@ -60,11 +75,28 @@
'url': imageinfo["descriptionurl"],
'title': title,
'content': item["snippet"],
- 'img_src': imageinfo["url"],
- 'resolution': f'{imageinfo["width"]} x {imageinfo["height"]}',
- 'thumbnail_src': imageinfo["thumburl"],
- 'template': 'images.html',
}
+
+ if search_type == "images":
+ result['template'] = 'images.html'
+ result['img_src'] = imageinfo["url"]
+ result['thumbnail_src'] = imageinfo["thumburl"]
+ result['resolution'] = f'{imageinfo["width"]} x {imageinfo["height"]}'
+ else:
+ result['thumbnail'] = imageinfo["thumburl"]
+
+ if search_type == "videos":
+ result['template'] = 'videos.html'
+ if imageinfo.get('duration'):
+ result['length'] = datetime.timedelta(seconds=int(imageinfo['duration']))
+ result['iframe_src'] = imageinfo['url']
+ elif search_type == "files":
+ result['template'] = 'files.html'
+ result['metadata'] = imageinfo['mime']
+ result['size'] = imageinfo['size']
+ elif search_type == "audio":
+ result['iframe_src'] = imageinfo['url']
+
results.append(result)
return results
| {"golden_diff": "diff --git a/searx/engines/wikicommons.py b/searx/engines/wikicommons.py\n--- a/searx/engines/wikicommons.py\n+++ b/searx/engines/wikicommons.py\n@@ -3,6 +3,8 @@\n \n \"\"\"\n \n+import datetime\n+\n from urllib.parse import urlencode\n \n # about\n@@ -14,6 +16,8 @@\n \"require_api_key\": False,\n \"results\": 'JSON',\n }\n+categories = ['images']\n+search_type = 'images'\n \n base_url = \"https://commons.wikimedia.org\"\n search_prefix = (\n@@ -29,17 +33,29 @@\n paging = True\n number_of_results = 10\n \n+search_types = {\n+ 'images': 'bitmap|drawing',\n+ 'videos': 'video',\n+ 'audio': 'audio',\n+ 'files': 'multimedia|office|archive|3d',\n+}\n+\n \n def request(query, params):\n language = 'en'\n if params['language'] != 'all':\n language = params['language'].split('-')[0]\n \n+ if search_type not in search_types:\n+ raise ValueError(f\"Unsupported search type: {search_type}\")\n+\n+ filetype = search_types[search_type]\n+\n args = {\n 'uselang': language,\n 'gsrlimit': number_of_results,\n 'gsroffset': number_of_results * (params[\"pageno\"] - 1),\n- 'gsrsearch': \"filetype:bitmap|drawing \" + query,\n+ 'gsrsearch': f\"filetype:{filetype} {query}\",\n }\n \n params[\"url\"] = f\"{base_url}/w/api.php{search_prefix}&{urlencode(args, safe=':|')}\"\n@@ -52,7 +68,6 @@\n \n if not json.get(\"query\", {}).get(\"pages\"):\n return results\n-\n for item in json[\"query\"][\"pages\"].values():\n imageinfo = item[\"imageinfo\"][0]\n title = item[\"title\"].replace(\"File:\", \"\").rsplit('.', 1)[0]\n@@ -60,11 +75,28 @@\n 'url': imageinfo[\"descriptionurl\"],\n 'title': title,\n 'content': item[\"snippet\"],\n- 'img_src': imageinfo[\"url\"],\n- 'resolution': f'{imageinfo[\"width\"]} x {imageinfo[\"height\"]}',\n- 'thumbnail_src': imageinfo[\"thumburl\"],\n- 'template': 'images.html',\n }\n+\n+ if search_type == \"images\":\n+ result['template'] = 'images.html'\n+ result['img_src'] = imageinfo[\"url\"]\n+ result['thumbnail_src'] = imageinfo[\"thumburl\"]\n+ result['resolution'] = f'{imageinfo[\"width\"]} x {imageinfo[\"height\"]}'\n+ else:\n+ result['thumbnail'] = imageinfo[\"thumburl\"]\n+\n+ if search_type == \"videos\":\n+ result['template'] = 'videos.html'\n+ if imageinfo.get('duration'):\n+ result['length'] = datetime.timedelta(seconds=int(imageinfo['duration']))\n+ result['iframe_src'] = imageinfo['url']\n+ elif search_type == \"files\":\n+ result['template'] = 'files.html'\n+ result['metadata'] = imageinfo['mime']\n+ result['size'] = imageinfo['size']\n+ elif search_type == \"audio\":\n+ result['iframe_src'] = imageinfo['url']\n+\n results.append(result)\n \n return results\n", "issue": "Wikimedia Commons\n**Working URL to the engine**\r\nhttps://commons.wikimedia.org\r\n\r\n**Why do you want to add this engine?**\r\nOut of all of the Wikimedia projects, Wikimedia Commons is one of only two to not appear in any engine category in SearXNG, with the other being Wikispecies.\r\n\r\n**Features of this engine**\r\nIt has a collection of [82,886,704](https://commons.wikimedia.org/wiki/Special:Statistics) [freely usable](https://commons.wikimedia.org/wiki/Commons:Reusing_content_outside_Wikimedia) media files.\r\n\r\n**How can SearXNG fetch the information from this engine?**\r\n`https://commons.wikimedia.org/w/index.php?search=%s` with `%s` being what you want to search.\r\n\r\n**Applicable category of this engine**\r\nGeneral, files, images, music, videos.\n", "before_files": [{"content": "# SPDX-License-Identifier: AGPL-3.0-or-later\n\"\"\"Wikimedia Commons (images)\n\n\"\"\"\n\nfrom urllib.parse import urlencode\n\n# about\nabout = {\n \"website\": 'https://commons.wikimedia.org/',\n \"wikidata_id\": 'Q565',\n \"official_api_documentation\": 'https://commons.wikimedia.org/w/api.php',\n \"use_official_api\": True,\n \"require_api_key\": False,\n \"results\": 'JSON',\n}\n\nbase_url = \"https://commons.wikimedia.org\"\nsearch_prefix = (\n '?action=query'\n '&format=json'\n '&generator=search'\n '&gsrnamespace=6'\n '&gsrprop=snippet'\n '&prop=info|imageinfo'\n '&iiprop=url|size|mime'\n '&iiurlheight=180' # needed for the thumb url\n)\npaging = True\nnumber_of_results = 10\n\n\ndef request(query, params):\n language = 'en'\n if params['language'] != 'all':\n language = params['language'].split('-')[0]\n\n args = {\n 'uselang': language,\n 'gsrlimit': number_of_results,\n 'gsroffset': number_of_results * (params[\"pageno\"] - 1),\n 'gsrsearch': \"filetype:bitmap|drawing \" + query,\n }\n\n params[\"url\"] = f\"{base_url}/w/api.php{search_prefix}&{urlencode(args, safe=':|')}\"\n return params\n\n\ndef response(resp):\n results = []\n json = resp.json()\n\n if not json.get(\"query\", {}).get(\"pages\"):\n return results\n\n for item in json[\"query\"][\"pages\"].values():\n imageinfo = item[\"imageinfo\"][0]\n title = item[\"title\"].replace(\"File:\", \"\").rsplit('.', 1)[0]\n result = {\n 'url': imageinfo[\"descriptionurl\"],\n 'title': title,\n 'content': item[\"snippet\"],\n 'img_src': imageinfo[\"url\"],\n 'resolution': f'{imageinfo[\"width\"]} x {imageinfo[\"height\"]}',\n 'thumbnail_src': imageinfo[\"thumburl\"],\n 'template': 'images.html',\n }\n results.append(result)\n\n return results\n", "path": "searx/engines/wikicommons.py"}], "after_files": [{"content": "# SPDX-License-Identifier: AGPL-3.0-or-later\n\"\"\"Wikimedia Commons (images)\n\n\"\"\"\n\nimport datetime\n\nfrom urllib.parse import urlencode\n\n# about\nabout = {\n \"website\": 'https://commons.wikimedia.org/',\n \"wikidata_id\": 'Q565',\n \"official_api_documentation\": 'https://commons.wikimedia.org/w/api.php',\n \"use_official_api\": True,\n \"require_api_key\": False,\n \"results\": 'JSON',\n}\ncategories = ['images']\nsearch_type = 'images'\n\nbase_url = \"https://commons.wikimedia.org\"\nsearch_prefix = (\n '?action=query'\n '&format=json'\n '&generator=search'\n '&gsrnamespace=6'\n '&gsrprop=snippet'\n '&prop=info|imageinfo'\n '&iiprop=url|size|mime'\n '&iiurlheight=180' # needed for the thumb url\n)\npaging = True\nnumber_of_results = 10\n\nsearch_types = {\n 'images': 'bitmap|drawing',\n 'videos': 'video',\n 'audio': 'audio',\n 'files': 'multimedia|office|archive|3d',\n}\n\n\ndef request(query, params):\n language = 'en'\n if params['language'] != 'all':\n language = params['language'].split('-')[0]\n\n if search_type not in search_types:\n raise ValueError(f\"Unsupported search type: {search_type}\")\n\n filetype = search_types[search_type]\n\n args = {\n 'uselang': language,\n 'gsrlimit': number_of_results,\n 'gsroffset': number_of_results * (params[\"pageno\"] - 1),\n 'gsrsearch': f\"filetype:{filetype} {query}\",\n }\n\n params[\"url\"] = f\"{base_url}/w/api.php{search_prefix}&{urlencode(args, safe=':|')}\"\n return params\n\n\ndef response(resp):\n results = []\n json = resp.json()\n\n if not json.get(\"query\", {}).get(\"pages\"):\n return results\n for item in json[\"query\"][\"pages\"].values():\n imageinfo = item[\"imageinfo\"][0]\n title = item[\"title\"].replace(\"File:\", \"\").rsplit('.', 1)[0]\n result = {\n 'url': imageinfo[\"descriptionurl\"],\n 'title': title,\n 'content': item[\"snippet\"],\n }\n\n if search_type == \"images\":\n result['template'] = 'images.html'\n result['img_src'] = imageinfo[\"url\"]\n result['thumbnail_src'] = imageinfo[\"thumburl\"]\n result['resolution'] = f'{imageinfo[\"width\"]} x {imageinfo[\"height\"]}'\n else:\n result['thumbnail'] = imageinfo[\"thumburl\"]\n\n if search_type == \"videos\":\n result['template'] = 'videos.html'\n if imageinfo.get('duration'):\n result['length'] = datetime.timedelta(seconds=int(imageinfo['duration']))\n result['iframe_src'] = imageinfo['url']\n elif search_type == \"files\":\n result['template'] = 'files.html'\n result['metadata'] = imageinfo['mime']\n result['size'] = imageinfo['size']\n elif search_type == \"audio\":\n result['iframe_src'] = imageinfo['url']\n\n results.append(result)\n\n return results\n", "path": "searx/engines/wikicommons.py"}]} | 1,084 | 786 |
gh_patches_debug_60750 | rasdani/github-patches | git_diff | larq__larq-80 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add docs on how to define your own quantizer
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `larq/quantizers.py`
Content:
```
1 """A Quantizer defines the way of transforming a full precision input to a
2 quantized output and the pseudo-gradient method used for the backwards pass."""
3
4 import tensorflow as tf
5 from larq import utils
6
7
8 def sign(x):
9 """A sign function that will never be zero"""
10 return tf.sign(tf.sign(x) + 0.1)
11
12
13 @tf.custom_gradient
14 def _binarize_with_identity_grad(x):
15 def grad(dy):
16 return dy
17
18 return sign(x), grad
19
20
21 @tf.custom_gradient
22 def _binarize_with_weighted_grad(x):
23 def grad(dy):
24 return (1 - tf.abs(x)) * 2 * dy
25
26 return sign(x), grad
27
28
29 @utils.register_keras_custom_object
30 def ste_sign(x):
31 r"""
32 Sign binarization function.
33 \\[
34 q(x) = \begin{cases}
35 -1 & x < 0 \\\
36 1 & x \geq 0
37 \end{cases}
38 \\]
39
40 The gradient is estimated using the Straight-Through Estimator
41 (essentially the binarization is replaced by a clipped identity on the
42 backward pass).
43 \\[\frac{\partial q(x)}{\partial x} = \begin{cases}
44 1 & \left|x\right| \leq 1 \\\
45 0 & \left|x\right| > 1
46 \end{cases}\\]
47
48 # Arguments
49 x: Input tensor.
50
51 # Returns
52 Binarized tensor.
53
54 # References
55 - [Binarized Neural Networks: Training Deep Neural Networks with Weights and
56 Activations Constrained to +1 or -1](http://arxiv.org/abs/1602.02830)
57 """
58
59 x = tf.clip_by_value(x, -1, 1)
60
61 return _binarize_with_identity_grad(x)
62
63
64 @utils.register_keras_custom_object
65 def magnitude_aware_sign(x):
66 r"""
67 Magnitude-aware sign for birealnet.
68
69
70 # Arguments
71 x: Input tensor
72
73 # Returns
74 Scaled binarized tensor (with values in $\{-a, a\}$, where $a$ is a float).
75
76 # References
77 - [Bi-Real Net: Enhancing the Performance of 1-bit CNNs With Improved
78 Representational Capability and Advanced Training
79 Algorithm](https://arxiv.org/abs/1808.00278)
80
81 """
82 scale_factor = tf.stop_gradient(
83 tf.reduce_mean(tf.abs(x), axis=list(range(len(x.shape) - 1)))
84 )
85 return scale_factor * ste_sign(x)
86
87
88 @utils.register_keras_custom_object
89 def approx_sign(x):
90 r"""
91 Sign binarization function.
92 \\[
93 q(x) = \begin{cases}
94 -1 & x < 0 \\\
95 1 & x \geq 0
96 \end{cases}
97 \\]
98
99 The gradient is estimated using the ApproxSign method.
100 \\[\frac{\partial q(x)}{\partial x} = \begin{cases}
101 (2 - 2 \left|x\right|) & \left|x\right| \leq 1 \\\
102 0 & \left|x\right| > 1
103 \end{cases}
104 \\]
105
106 # Arguments
107 x: Input tensor.
108
109 # Returns
110 Binarized tensor.
111
112 # References
113 - [Bi-Real Net: Enhancing the Performance of 1-bit CNNs With Improved
114 Representational Capability and Advanced
115 Training Algorithm](http://arxiv.org/abs/1808.00278)
116 """
117
118 x = tf.clip_by_value(x, -1, 1)
119
120 return _binarize_with_weighted_grad(x)
121
122
123 def serialize(initializer):
124 return tf.keras.utils.serialize_keras_object(initializer)
125
126
127 def deserialize(name, custom_objects=None):
128 return tf.keras.utils.deserialize_keras_object(
129 name,
130 module_objects=globals(),
131 custom_objects=custom_objects,
132 printable_module_name="quantization function",
133 )
134
135
136 def get(identifier):
137 if identifier is None:
138 return None
139 if isinstance(identifier, str):
140 return deserialize(str(identifier))
141 if callable(identifier):
142 return identifier
143 raise ValueError(
144 f"Could not interpret quantization function identifier: {identifier}"
145 )
146
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/larq/quantizers.py b/larq/quantizers.py
--- a/larq/quantizers.py
+++ b/larq/quantizers.py
@@ -64,7 +64,7 @@
@utils.register_keras_custom_object
def magnitude_aware_sign(x):
r"""
- Magnitude-aware sign for birealnet.
+ Magnitude-aware sign for Bi-Real Net.
# Arguments
| {"golden_diff": "diff --git a/larq/quantizers.py b/larq/quantizers.py\n--- a/larq/quantizers.py\n+++ b/larq/quantizers.py\n@@ -64,7 +64,7 @@\n @utils.register_keras_custom_object\n def magnitude_aware_sign(x):\n r\"\"\"\n- Magnitude-aware sign for birealnet.\n+ Magnitude-aware sign for Bi-Real Net.\n \n \n # Arguments\n", "issue": "Add docs on how to define your own quantizer\n\n", "before_files": [{"content": "\"\"\"A Quantizer defines the way of transforming a full precision input to a\nquantized output and the pseudo-gradient method used for the backwards pass.\"\"\"\n\nimport tensorflow as tf\nfrom larq import utils\n\n\ndef sign(x):\n \"\"\"A sign function that will never be zero\"\"\"\n return tf.sign(tf.sign(x) + 0.1)\n\n\[email protected]_gradient\ndef _binarize_with_identity_grad(x):\n def grad(dy):\n return dy\n\n return sign(x), grad\n\n\[email protected]_gradient\ndef _binarize_with_weighted_grad(x):\n def grad(dy):\n return (1 - tf.abs(x)) * 2 * dy\n\n return sign(x), grad\n\n\[email protected]_keras_custom_object\ndef ste_sign(x):\n r\"\"\"\n Sign binarization function.\n \\\\[\n q(x) = \\begin{cases}\n -1 & x < 0 \\\\\\\n 1 & x \\geq 0\n \\end{cases}\n \\\\]\n\n The gradient is estimated using the Straight-Through Estimator\n (essentially the binarization is replaced by a clipped identity on the\n backward pass).\n \\\\[\\frac{\\partial q(x)}{\\partial x} = \\begin{cases}\n 1 & \\left|x\\right| \\leq 1 \\\\\\\n 0 & \\left|x\\right| > 1\n \\end{cases}\\\\]\n\n # Arguments\n x: Input tensor.\n\n # Returns\n Binarized tensor.\n\n # References\n - [Binarized Neural Networks: Training Deep Neural Networks with Weights and\n Activations Constrained to +1 or -1](http://arxiv.org/abs/1602.02830)\n \"\"\"\n\n x = tf.clip_by_value(x, -1, 1)\n\n return _binarize_with_identity_grad(x)\n\n\[email protected]_keras_custom_object\ndef magnitude_aware_sign(x):\n r\"\"\"\n Magnitude-aware sign for birealnet.\n\n\n # Arguments\n x: Input tensor\n\n # Returns\n Scaled binarized tensor (with values in $\\{-a, a\\}$, where $a$ is a float).\n\n # References\n - [Bi-Real Net: Enhancing the Performance of 1-bit CNNs With Improved\n Representational Capability and Advanced Training\n Algorithm](https://arxiv.org/abs/1808.00278)\n\n \"\"\"\n scale_factor = tf.stop_gradient(\n tf.reduce_mean(tf.abs(x), axis=list(range(len(x.shape) - 1)))\n )\n return scale_factor * ste_sign(x)\n\n\[email protected]_keras_custom_object\ndef approx_sign(x):\n r\"\"\"\n Sign binarization function.\n \\\\[\n q(x) = \\begin{cases}\n -1 & x < 0 \\\\\\\n 1 & x \\geq 0\n \\end{cases}\n \\\\]\n\n The gradient is estimated using the ApproxSign method.\n \\\\[\\frac{\\partial q(x)}{\\partial x} = \\begin{cases}\n (2 - 2 \\left|x\\right|) & \\left|x\\right| \\leq 1 \\\\\\\n 0 & \\left|x\\right| > 1\n \\end{cases}\n \\\\]\n\n # Arguments\n x: Input tensor.\n\n # Returns\n Binarized tensor.\n\n # References\n - [Bi-Real Net: Enhancing the Performance of 1-bit CNNs With Improved\n Representational Capability and Advanced\n Training Algorithm](http://arxiv.org/abs/1808.00278)\n \"\"\"\n\n x = tf.clip_by_value(x, -1, 1)\n\n return _binarize_with_weighted_grad(x)\n\n\ndef serialize(initializer):\n return tf.keras.utils.serialize_keras_object(initializer)\n\n\ndef deserialize(name, custom_objects=None):\n return tf.keras.utils.deserialize_keras_object(\n name,\n module_objects=globals(),\n custom_objects=custom_objects,\n printable_module_name=\"quantization function\",\n )\n\n\ndef get(identifier):\n if identifier is None:\n return None\n if isinstance(identifier, str):\n return deserialize(str(identifier))\n if callable(identifier):\n return identifier\n raise ValueError(\n f\"Could not interpret quantization function identifier: {identifier}\"\n )\n", "path": "larq/quantizers.py"}], "after_files": [{"content": "\"\"\"A Quantizer defines the way of transforming a full precision input to a\nquantized output and the pseudo-gradient method used for the backwards pass.\"\"\"\n\nimport tensorflow as tf\nfrom larq import utils\n\n\ndef sign(x):\n \"\"\"A sign function that will never be zero\"\"\"\n return tf.sign(tf.sign(x) + 0.1)\n\n\[email protected]_gradient\ndef _binarize_with_identity_grad(x):\n def grad(dy):\n return dy\n\n return sign(x), grad\n\n\[email protected]_gradient\ndef _binarize_with_weighted_grad(x):\n def grad(dy):\n return (1 - tf.abs(x)) * 2 * dy\n\n return sign(x), grad\n\n\[email protected]_keras_custom_object\ndef ste_sign(x):\n r\"\"\"\n Sign binarization function.\n \\\\[\n q(x) = \\begin{cases}\n -1 & x < 0 \\\\\\\n 1 & x \\geq 0\n \\end{cases}\n \\\\]\n\n The gradient is estimated using the Straight-Through Estimator\n (essentially the binarization is replaced by a clipped identity on the\n backward pass).\n \\\\[\\frac{\\partial q(x)}{\\partial x} = \\begin{cases}\n 1 & \\left|x\\right| \\leq 1 \\\\\\\n 0 & \\left|x\\right| > 1\n \\end{cases}\\\\]\n\n # Arguments\n x: Input tensor.\n\n # Returns\n Binarized tensor.\n\n # References\n - [Binarized Neural Networks: Training Deep Neural Networks with Weights and\n Activations Constrained to +1 or -1](http://arxiv.org/abs/1602.02830)\n \"\"\"\n\n x = tf.clip_by_value(x, -1, 1)\n\n return _binarize_with_identity_grad(x)\n\n\[email protected]_keras_custom_object\ndef magnitude_aware_sign(x):\n r\"\"\"\n Magnitude-aware sign for Bi-Real Net.\n\n\n # Arguments\n x: Input tensor\n\n # Returns\n Scaled binarized tensor (with values in $\\{-a, a\\}$, where $a$ is a float).\n\n # References\n - [Bi-Real Net: Enhancing the Performance of 1-bit CNNs With Improved\n Representational Capability and Advanced Training\n Algorithm](https://arxiv.org/abs/1808.00278)\n\n \"\"\"\n scale_factor = tf.stop_gradient(\n tf.reduce_mean(tf.abs(x), axis=list(range(len(x.shape) - 1)))\n )\n return scale_factor * ste_sign(x)\n\n\[email protected]_keras_custom_object\ndef approx_sign(x):\n r\"\"\"\n Sign binarization function.\n \\\\[\n q(x) = \\begin{cases}\n -1 & x < 0 \\\\\\\n 1 & x \\geq 0\n \\end{cases}\n \\\\]\n\n The gradient is estimated using the ApproxSign method.\n \\\\[\\frac{\\partial q(x)}{\\partial x} = \\begin{cases}\n (2 - 2 \\left|x\\right|) & \\left|x\\right| \\leq 1 \\\\\\\n 0 & \\left|x\\right| > 1\n \\end{cases}\n \\\\]\n\n # Arguments\n x: Input tensor.\n\n # Returns\n Binarized tensor.\n\n # References\n - [Bi-Real Net: Enhancing the Performance of 1-bit CNNs With Improved\n Representational Capability and Advanced\n Training Algorithm](http://arxiv.org/abs/1808.00278)\n \"\"\"\n\n x = tf.clip_by_value(x, -1, 1)\n\n return _binarize_with_weighted_grad(x)\n\n\ndef serialize(initializer):\n return tf.keras.utils.serialize_keras_object(initializer)\n\n\ndef deserialize(name, custom_objects=None):\n return tf.keras.utils.deserialize_keras_object(\n name,\n module_objects=globals(),\n custom_objects=custom_objects,\n printable_module_name=\"quantization function\",\n )\n\n\ndef get(identifier):\n if identifier is None:\n return None\n if isinstance(identifier, str):\n return deserialize(str(identifier))\n if callable(identifier):\n return identifier\n raise ValueError(\n f\"Could not interpret quantization function identifier: {identifier}\"\n )\n", "path": "larq/quantizers.py"}]} | 1,578 | 99 |
gh_patches_debug_6197 | rasdani/github-patches | git_diff | gratipay__gratipay.com-2491 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
can't save any profile info
Reported by @chrisamaphone on [Twitter](https://twitter.com/chrisamaphone/status/476775868778704896).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gittip/security/csrf.py`
Content:
```
1 """Cross Site Request Forgery middleware, borrowed from Django.
2
3 See also:
4
5 https://github.com/django/django/blob/master/django/middleware/csrf.py
6 https://docs.djangoproject.com/en/dev/ref/contrib/csrf/
7 https://github.com/gittip/www.gittip.com/issues/88
8
9 """
10
11 from datetime import timedelta
12 import re
13 import urlparse
14 from aspen import log_dammit
15
16
17 #from django.utils.cache import patch_vary_headers
18 cc_delim_re = re.compile(r'\s*,\s*')
19 def patch_vary_headers(response, newheaders):
20 """
21 Adds (or updates) the "Vary" header in the given HttpResponse object.
22 newheaders is a list of header names that should be in "Vary". Existing
23 headers in "Vary" aren't removed.
24 """
25 # Note that we need to keep the original order intact, because cache
26 # implementations may rely on the order of the Vary contents in, say,
27 # computing an MD5 hash.
28 if 'Vary' in response.headers:
29 vary_headers = cc_delim_re.split(response.headers['Vary'])
30 else:
31 vary_headers = []
32 # Use .lower() here so we treat headers as case-insensitive.
33 existing_headers = set([header.lower() for header in vary_headers])
34 additional_headers = [newheader for newheader in newheaders
35 if newheader.lower() not in existing_headers]
36 response.headers['Vary'] = ', '.join(vary_headers + additional_headers)
37
38
39 #from django.utils.http import same_origin
40 def same_origin(url1, url2):
41 """
42 Checks if two URLs are 'same-origin'
43 """
44 p1, p2 = urlparse.urlparse(url1), urlparse.urlparse(url2)
45 return (p1.scheme, p1.hostname, p1.port) == (p2.scheme, p2.hostname, p2.port)
46
47
48 from aspen import Response
49 from crypto import constant_time_compare, get_random_string
50
51 REASON_NO_REFERER = "Referer checking failed - no Referer."
52 REASON_BAD_REFERER = "Referer checking failed - %s does not match %s."
53 REASON_NO_CSRF_COOKIE = "CSRF cookie not set."
54 REASON_BAD_TOKEN = "CSRF token missing or incorrect."
55
56 TOKEN_LENGTH = 32
57 CSRF_TIMEOUT = timedelta(days=7)
58
59
60 def _get_new_csrf_key():
61 return get_random_string(TOKEN_LENGTH)
62
63
64 def _sanitize_token(token):
65 # Allow only alphanum, and ensure we return a 'str' for the sake
66 # of the post processing middleware.
67 if len(token) > TOKEN_LENGTH:
68 return _get_new_csrf_key()
69 token = re.sub('[^a-zA-Z0-9]+', '', str(token.decode('ascii', 'ignore')))
70 if token == "":
71 # In case the cookie has been truncated to nothing at some point.
72 return _get_new_csrf_key()
73 return token
74
75 def _is_secure(request):
76 import gittip
77 return gittip.canonical_scheme == 'https'
78
79 def _get_host(request):
80 """Returns the HTTP host using the request headers.
81 """
82 return request.headers.get('X-Forwarded-Host', request.headers['Host'])
83
84
85
86 def inbound(request):
87 """Given a Request object, reject it if it's a forgery.
88 """
89 if request.line.uri.startswith('/assets/'): return
90
91 try:
92 csrf_token = request.headers.cookie.get('csrf_token')
93 csrf_token = '' if csrf_token is None else csrf_token.value
94 csrf_token = _sanitize_token(csrf_token)
95 except KeyError:
96 csrf_token = _get_new_csrf_key()
97
98 request.context['csrf_token'] = csrf_token
99
100 # Assume that anything not defined as 'safe' by RC2616 needs protection
101 if request.line.method not in ('GET', 'HEAD', 'OPTIONS', 'TRACE'):
102
103 if _is_secure(request):
104 # Suppose user visits http://example.com/
105 # An active network attacker (man-in-the-middle, MITM) sends a
106 # POST form that targets https://example.com/detonate-bomb/ and
107 # submits it via JavaScript.
108 #
109 # The attacker will need to provide a CSRF cookie and token, but
110 # that's no problem for a MITM and the session-independent
111 # nonce we're using. So the MITM can circumvent the CSRF
112 # protection. This is true for any HTTP connection, but anyone
113 # using HTTPS expects better! For this reason, for
114 # https://example.com/ we need additional protection that treats
115 # http://example.com/ as completely untrusted. Under HTTPS,
116 # Barth et al. found that the Referer header is missing for
117 # same-domain requests in only about 0.2% of cases or less, so
118 # we can use strict Referer checking.
119 referer = request.headers.get('Referer')
120 if referer is None:
121 raise Response(403, REASON_NO_REFERER)
122
123 good_referer = 'https://%s/' % _get_host(request)
124 if not same_origin(referer, good_referer):
125 reason = REASON_BAD_REFERER % (referer, good_referer)
126 log_dammit(reason)
127 raise Response(403, reason)
128
129 if csrf_token is None:
130 raise Response(403, REASON_NO_CSRF_COOKIE)
131
132 # Check non-cookie token for match.
133 request_csrf_token = ""
134 if request.line.method == "POST":
135 request_csrf_token = request.body.get('csrf_token', '')
136
137 if request_csrf_token == "":
138 # Fall back to X-CSRF-TOKEN, to make things easier for AJAX,
139 # and possible for PUT/DELETE.
140 request_csrf_token = request.headers.get('X-CSRF-TOKEN', '')
141
142 if not constant_time_compare(request_csrf_token, csrf_token):
143 raise Response(403, REASON_BAD_TOKEN)
144
145
146 def outbound(request, response):
147 """Store the latest CSRF token as a cookie.
148 """
149 csrf_token = request.context.get('csrf_token')
150 if csrf_token:
151 response.set_cookie('csrf_token', csrf_token, expires=CSRF_TIMEOUT)
152
153 # Content varies with the CSRF cookie, so set the Vary header.
154 patch_vary_headers(response, ('Cookie',))
155
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/gittip/security/csrf.py b/gittip/security/csrf.py
--- a/gittip/security/csrf.py
+++ b/gittip/security/csrf.py
@@ -148,7 +148,7 @@
"""
csrf_token = request.context.get('csrf_token')
if csrf_token:
- response.set_cookie('csrf_token', csrf_token, expires=CSRF_TIMEOUT)
+ response.set_cookie('csrf_token', csrf_token, expires=CSRF_TIMEOUT, httponly=False)
# Content varies with the CSRF cookie, so set the Vary header.
patch_vary_headers(response, ('Cookie',))
| {"golden_diff": "diff --git a/gittip/security/csrf.py b/gittip/security/csrf.py\n--- a/gittip/security/csrf.py\n+++ b/gittip/security/csrf.py\n@@ -148,7 +148,7 @@\n \"\"\"\n csrf_token = request.context.get('csrf_token')\n if csrf_token:\n- response.set_cookie('csrf_token', csrf_token, expires=CSRF_TIMEOUT)\n+ response.set_cookie('csrf_token', csrf_token, expires=CSRF_TIMEOUT, httponly=False)\n \n # Content varies with the CSRF cookie, so set the Vary header.\n patch_vary_headers(response, ('Cookie',))\n", "issue": "can't save any profile info\nReported by @chrisamaphone on [Twitter](https://twitter.com/chrisamaphone/status/476775868778704896).\n\n", "before_files": [{"content": "\"\"\"Cross Site Request Forgery middleware, borrowed from Django.\n\nSee also:\n\n https://github.com/django/django/blob/master/django/middleware/csrf.py\n https://docs.djangoproject.com/en/dev/ref/contrib/csrf/\n https://github.com/gittip/www.gittip.com/issues/88\n\n\"\"\"\n\nfrom datetime import timedelta\nimport re\nimport urlparse\nfrom aspen import log_dammit\n\n\n#from django.utils.cache import patch_vary_headers\ncc_delim_re = re.compile(r'\\s*,\\s*')\ndef patch_vary_headers(response, newheaders):\n \"\"\"\n Adds (or updates) the \"Vary\" header in the given HttpResponse object.\n newheaders is a list of header names that should be in \"Vary\". Existing\n headers in \"Vary\" aren't removed.\n \"\"\"\n # Note that we need to keep the original order intact, because cache\n # implementations may rely on the order of the Vary contents in, say,\n # computing an MD5 hash.\n if 'Vary' in response.headers:\n vary_headers = cc_delim_re.split(response.headers['Vary'])\n else:\n vary_headers = []\n # Use .lower() here so we treat headers as case-insensitive.\n existing_headers = set([header.lower() for header in vary_headers])\n additional_headers = [newheader for newheader in newheaders\n if newheader.lower() not in existing_headers]\n response.headers['Vary'] = ', '.join(vary_headers + additional_headers)\n\n\n#from django.utils.http import same_origin\ndef same_origin(url1, url2):\n \"\"\"\n Checks if two URLs are 'same-origin'\n \"\"\"\n p1, p2 = urlparse.urlparse(url1), urlparse.urlparse(url2)\n return (p1.scheme, p1.hostname, p1.port) == (p2.scheme, p2.hostname, p2.port)\n\n\nfrom aspen import Response\nfrom crypto import constant_time_compare, get_random_string\n\nREASON_NO_REFERER = \"Referer checking failed - no Referer.\"\nREASON_BAD_REFERER = \"Referer checking failed - %s does not match %s.\"\nREASON_NO_CSRF_COOKIE = \"CSRF cookie not set.\"\nREASON_BAD_TOKEN = \"CSRF token missing or incorrect.\"\n\nTOKEN_LENGTH = 32\nCSRF_TIMEOUT = timedelta(days=7)\n\n\ndef _get_new_csrf_key():\n return get_random_string(TOKEN_LENGTH)\n\n\ndef _sanitize_token(token):\n # Allow only alphanum, and ensure we return a 'str' for the sake\n # of the post processing middleware.\n if len(token) > TOKEN_LENGTH:\n return _get_new_csrf_key()\n token = re.sub('[^a-zA-Z0-9]+', '', str(token.decode('ascii', 'ignore')))\n if token == \"\":\n # In case the cookie has been truncated to nothing at some point.\n return _get_new_csrf_key()\n return token\n\ndef _is_secure(request):\n import gittip\n return gittip.canonical_scheme == 'https'\n\ndef _get_host(request):\n \"\"\"Returns the HTTP host using the request headers.\n \"\"\"\n return request.headers.get('X-Forwarded-Host', request.headers['Host'])\n\n\n\ndef inbound(request):\n \"\"\"Given a Request object, reject it if it's a forgery.\n \"\"\"\n if request.line.uri.startswith('/assets/'): return\n\n try:\n csrf_token = request.headers.cookie.get('csrf_token')\n csrf_token = '' if csrf_token is None else csrf_token.value\n csrf_token = _sanitize_token(csrf_token)\n except KeyError:\n csrf_token = _get_new_csrf_key()\n\n request.context['csrf_token'] = csrf_token\n\n # Assume that anything not defined as 'safe' by RC2616 needs protection\n if request.line.method not in ('GET', 'HEAD', 'OPTIONS', 'TRACE'):\n\n if _is_secure(request):\n # Suppose user visits http://example.com/\n # An active network attacker (man-in-the-middle, MITM) sends a\n # POST form that targets https://example.com/detonate-bomb/ and\n # submits it via JavaScript.\n #\n # The attacker will need to provide a CSRF cookie and token, but\n # that's no problem for a MITM and the session-independent\n # nonce we're using. So the MITM can circumvent the CSRF\n # protection. This is true for any HTTP connection, but anyone\n # using HTTPS expects better! For this reason, for\n # https://example.com/ we need additional protection that treats\n # http://example.com/ as completely untrusted. Under HTTPS,\n # Barth et al. found that the Referer header is missing for\n # same-domain requests in only about 0.2% of cases or less, so\n # we can use strict Referer checking.\n referer = request.headers.get('Referer')\n if referer is None:\n raise Response(403, REASON_NO_REFERER)\n\n good_referer = 'https://%s/' % _get_host(request)\n if not same_origin(referer, good_referer):\n reason = REASON_BAD_REFERER % (referer, good_referer)\n log_dammit(reason)\n raise Response(403, reason)\n\n if csrf_token is None:\n raise Response(403, REASON_NO_CSRF_COOKIE)\n\n # Check non-cookie token for match.\n request_csrf_token = \"\"\n if request.line.method == \"POST\":\n request_csrf_token = request.body.get('csrf_token', '')\n\n if request_csrf_token == \"\":\n # Fall back to X-CSRF-TOKEN, to make things easier for AJAX,\n # and possible for PUT/DELETE.\n request_csrf_token = request.headers.get('X-CSRF-TOKEN', '')\n\n if not constant_time_compare(request_csrf_token, csrf_token):\n raise Response(403, REASON_BAD_TOKEN)\n\n\ndef outbound(request, response):\n \"\"\"Store the latest CSRF token as a cookie.\n \"\"\"\n csrf_token = request.context.get('csrf_token')\n if csrf_token:\n response.set_cookie('csrf_token', csrf_token, expires=CSRF_TIMEOUT)\n\n # Content varies with the CSRF cookie, so set the Vary header.\n patch_vary_headers(response, ('Cookie',))\n", "path": "gittip/security/csrf.py"}], "after_files": [{"content": "\"\"\"Cross Site Request Forgery middleware, borrowed from Django.\n\nSee also:\n\n https://github.com/django/django/blob/master/django/middleware/csrf.py\n https://docs.djangoproject.com/en/dev/ref/contrib/csrf/\n https://github.com/gittip/www.gittip.com/issues/88\n\n\"\"\"\n\nfrom datetime import timedelta\nimport re\nimport urlparse\nfrom aspen import log_dammit\n\n\n#from django.utils.cache import patch_vary_headers\ncc_delim_re = re.compile(r'\\s*,\\s*')\ndef patch_vary_headers(response, newheaders):\n \"\"\"\n Adds (or updates) the \"Vary\" header in the given HttpResponse object.\n newheaders is a list of header names that should be in \"Vary\". Existing\n headers in \"Vary\" aren't removed.\n \"\"\"\n # Note that we need to keep the original order intact, because cache\n # implementations may rely on the order of the Vary contents in, say,\n # computing an MD5 hash.\n if 'Vary' in response.headers:\n vary_headers = cc_delim_re.split(response.headers['Vary'])\n else:\n vary_headers = []\n # Use .lower() here so we treat headers as case-insensitive.\n existing_headers = set([header.lower() for header in vary_headers])\n additional_headers = [newheader for newheader in newheaders\n if newheader.lower() not in existing_headers]\n response.headers['Vary'] = ', '.join(vary_headers + additional_headers)\n\n\n#from django.utils.http import same_origin\ndef same_origin(url1, url2):\n \"\"\"\n Checks if two URLs are 'same-origin'\n \"\"\"\n p1, p2 = urlparse.urlparse(url1), urlparse.urlparse(url2)\n return (p1.scheme, p1.hostname, p1.port) == (p2.scheme, p2.hostname, p2.port)\n\n\nfrom aspen import Response\nfrom crypto import constant_time_compare, get_random_string\n\nREASON_NO_REFERER = \"Referer checking failed - no Referer.\"\nREASON_BAD_REFERER = \"Referer checking failed - %s does not match %s.\"\nREASON_NO_CSRF_COOKIE = \"CSRF cookie not set.\"\nREASON_BAD_TOKEN = \"CSRF token missing or incorrect.\"\n\nTOKEN_LENGTH = 32\nCSRF_TIMEOUT = timedelta(days=7)\n\n\ndef _get_new_csrf_key():\n return get_random_string(TOKEN_LENGTH)\n\n\ndef _sanitize_token(token):\n # Allow only alphanum, and ensure we return a 'str' for the sake\n # of the post processing middleware.\n if len(token) > TOKEN_LENGTH:\n return _get_new_csrf_key()\n token = re.sub('[^a-zA-Z0-9]+', '', str(token.decode('ascii', 'ignore')))\n if token == \"\":\n # In case the cookie has been truncated to nothing at some point.\n return _get_new_csrf_key()\n return token\n\ndef _is_secure(request):\n import gittip\n return gittip.canonical_scheme == 'https'\n\ndef _get_host(request):\n \"\"\"Returns the HTTP host using the request headers.\n \"\"\"\n return request.headers.get('X-Forwarded-Host', request.headers['Host'])\n\n\n\ndef inbound(request):\n \"\"\"Given a Request object, reject it if it's a forgery.\n \"\"\"\n if request.line.uri.startswith('/assets/'): return\n\n try:\n csrf_token = request.headers.cookie.get('csrf_token')\n csrf_token = '' if csrf_token is None else csrf_token.value\n csrf_token = _sanitize_token(csrf_token)\n except KeyError:\n csrf_token = _get_new_csrf_key()\n\n request.context['csrf_token'] = csrf_token\n\n # Assume that anything not defined as 'safe' by RC2616 needs protection\n if request.line.method not in ('GET', 'HEAD', 'OPTIONS', 'TRACE'):\n\n if _is_secure(request):\n # Suppose user visits http://example.com/\n # An active network attacker (man-in-the-middle, MITM) sends a\n # POST form that targets https://example.com/detonate-bomb/ and\n # submits it via JavaScript.\n #\n # The attacker will need to provide a CSRF cookie and token, but\n # that's no problem for a MITM and the session-independent\n # nonce we're using. So the MITM can circumvent the CSRF\n # protection. This is true for any HTTP connection, but anyone\n # using HTTPS expects better! For this reason, for\n # https://example.com/ we need additional protection that treats\n # http://example.com/ as completely untrusted. Under HTTPS,\n # Barth et al. found that the Referer header is missing for\n # same-domain requests in only about 0.2% of cases or less, so\n # we can use strict Referer checking.\n referer = request.headers.get('Referer')\n if referer is None:\n raise Response(403, REASON_NO_REFERER)\n\n good_referer = 'https://%s/' % _get_host(request)\n if not same_origin(referer, good_referer):\n reason = REASON_BAD_REFERER % (referer, good_referer)\n log_dammit(reason)\n raise Response(403, reason)\n\n if csrf_token is None:\n raise Response(403, REASON_NO_CSRF_COOKIE)\n\n # Check non-cookie token for match.\n request_csrf_token = \"\"\n if request.line.method == \"POST\":\n request_csrf_token = request.body.get('csrf_token', '')\n\n if request_csrf_token == \"\":\n # Fall back to X-CSRF-TOKEN, to make things easier for AJAX,\n # and possible for PUT/DELETE.\n request_csrf_token = request.headers.get('X-CSRF-TOKEN', '')\n\n if not constant_time_compare(request_csrf_token, csrf_token):\n raise Response(403, REASON_BAD_TOKEN)\n\n\ndef outbound(request, response):\n \"\"\"Store the latest CSRF token as a cookie.\n \"\"\"\n csrf_token = request.context.get('csrf_token')\n if csrf_token:\n response.set_cookie('csrf_token', csrf_token, expires=CSRF_TIMEOUT, httponly=False)\n\n # Content varies with the CSRF cookie, so set the Vary header.\n patch_vary_headers(response, ('Cookie',))\n", "path": "gittip/security/csrf.py"}]} | 2,049 | 141 |
gh_patches_debug_16585 | rasdani/github-patches | git_diff | cloud-custodian__cloud-custodian-3111 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Marking emr cluster for termination throws exception
When marking EMR cluster for termination throws exception in version c7n-0.8.31.2. I see the tag created in cluster
`````
policies:
- name: emr-mark-clusters-for-termination
resource: emr
filters:
- type: value
key: "Id"
op: in
value:
- 'abcdefghij'
actions:
- type: mark-for-op
tag: 'custodian-emr-terminate'
op: terminate
days: 4
`````
this policy throws exception
2018-09-27 19:20:30,262: custodian.actions:INFO Tagging 1 resources for terminate on 2018/10/01
2018-09-27 19:20:31,720: custodian.actions:ERROR Exception with tags: [{u'Value': u'Resource does not meet policy: terminate@2018/10/01', u'Key': 'custodian-emr-terminate'}] on resources: abcdefghij
'dict' object is not callable
`
Though the EMR is marked with tag ''custodian-emr-terminate', filtering on type: marked-for-op, returns 0 resources.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `c7n/resources/emr.py`
Content:
```
1 # Copyright 2016-2017 Capital One Services, LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from __future__ import absolute_import, division, print_function, unicode_literals
15
16 import logging
17 import time
18
19 import six
20
21 from c7n.actions import ActionRegistry, BaseAction
22 from c7n.exceptions import PolicyValidationError
23 from c7n.filters import FilterRegistry, MetricsFilter
24 from c7n.manager import resources
25 from c7n.query import QueryResourceManager
26 from c7n.utils import (
27 local_session, type_schema, get_retry)
28 from c7n.tags import (
29 TagDelayedAction, RemoveTag, TagActionFilter, Tag)
30
31 filters = FilterRegistry('emr.filters')
32 actions = ActionRegistry('emr.actions')
33 log = logging.getLogger('custodian.emr')
34
35 filters.register('marked-for-op', TagActionFilter)
36
37
38 @resources.register('emr')
39 class EMRCluster(QueryResourceManager):
40 """Resource manager for Elastic MapReduce clusters
41 """
42
43 class resource_type(object):
44 service = 'emr'
45 type = 'emr'
46 cluster_states = ['WAITING', 'BOOTSTRAPPING', 'RUNNING', 'STARTING']
47 enum_spec = ('list_clusters', 'Clusters', {'ClusterStates': cluster_states})
48 name = 'Name'
49 id = 'Id'
50 date = "Status.Timeline.CreationDateTime"
51 filter_name = None
52 dimension = None
53
54 action_registry = actions
55 filter_registry = filters
56 retry = staticmethod(get_retry(('ThrottlingException',)))
57
58 def __init__(self, ctx, data):
59 super(EMRCluster, self).__init__(ctx, data)
60 self.queries = QueryFilter.parse(
61 self.data.get('query', [
62 {'ClusterStates': [
63 'running', 'bootstrapping', 'waiting']}]))
64
65 @classmethod
66 def get_permissions(cls):
67 return ("elasticmapreduce:ListClusters",
68 "elasticmapreduce:DescribeCluster")
69
70 def get_resources(self, ids):
71 # no filtering by id set supported at the api
72 client = local_session(self.session_factory).client('emr')
73 results = []
74 for jid in ids:
75 results.append(
76 client.describe_cluster(ClusterId=jid)['Cluster'])
77 return results
78
79 def resources(self, query=None):
80 q = self.consolidate_query_filter()
81 if q is not None:
82 query = query or {}
83 for i in range(0, len(q)):
84 query[q[i]['Name']] = q[i]['Values']
85 return super(EMRCluster, self).resources(query=query)
86
87 def consolidate_query_filter(self):
88 result = []
89 names = set()
90 # allow same name to be specified multiple times and append the queries
91 # under the same name
92 for q in self.queries:
93 query_filter = q.query()
94 if query_filter['Name'] in names:
95 for filt in result:
96 if query_filter['Name'] == filt['Name']:
97 filt['Values'].extend(query_filter['Values'])
98 else:
99 names.add(query_filter['Name'])
100 result.append(query_filter)
101 if 'ClusterStates' not in names:
102 # include default query
103 result.append(
104 {
105 'Name': 'ClusterStates',
106 'Values': ['WAITING', 'RUNNING', 'BOOTSTRAPPING'],
107 }
108 )
109 return result
110
111 def augment(self, resources):
112 client = local_session(
113 self.get_resource_manager('emr').session_factory).client('emr')
114 result = []
115 # remap for cwmetrics
116 for r in resources:
117 cluster = self.retry(
118 client.describe_cluster, ClusterId=r['Id'])['Cluster']
119 result.append(cluster)
120 return result
121
122
123 @EMRCluster.filter_registry.register('metrics')
124 class EMRMetrics(MetricsFilter):
125
126 def get_dimensions(self, resource):
127 # Job flow id is legacy name for cluster id
128 return [{'Name': 'JobFlowId', 'Value': resource['Id']}]
129
130
131 @actions.register('mark-for-op')
132 class TagDelayedAction(TagDelayedAction):
133 """Action to specify an action to occur at a later date
134
135 :example:
136
137 .. code-block:: yaml
138
139 policies:
140 - name: emr-mark-for-op
141 resource: emr
142 filters:
143 - "tag:Name": absent
144 actions:
145 - type: mark-for-op
146 tag: custodian_cleanup
147 op: terminate
148 days: 4
149 msg: "Cluster does not have required tags"
150 """
151
152 permission = ('elasticmapreduce:AddTags',)
153 batch_size = 1
154 retry = staticmethod(get_retry(('ThrottlingException',)))
155
156 def process_resource_set(self, resources, tags):
157 client = local_session(
158 self.manager.session_factory).client('emr')
159 for r in resources:
160 self.retry(client.add_tags(ResourceId=r['Id'], Tags=tags))
161
162
163 @actions.register('tag')
164 class TagTable(Tag):
165 """Action to create tag(s) on a resource
166
167 :example:
168
169 .. code-block:: yaml
170
171 policies:
172 - name: emr-tag-table
173 resource: emr
174 filters:
175 - "tag:target-tag": absent
176 actions:
177 - type: tag
178 key: target-tag
179 value: target-tag-value
180 """
181
182 permissions = ('elasticmapreduce:AddTags',)
183 batch_size = 1
184 retry = staticmethod(get_retry(('ThrottlingException',)))
185
186 def process_resource_set(self, resources, tags):
187 client = local_session(self.manager.session_factory).client('emr')
188 for r in resources:
189 self.retry(client.add_tags(ResourceId=r['Id'], Tags=tags))
190
191
192 @actions.register('remove-tag')
193 class UntagTable(RemoveTag):
194 """Action to remove tag(s) on a resource
195
196 :example:
197
198 .. code-block:: yaml
199
200 policies:
201 - name: emr-remove-tag
202 resource: emr
203 filters:
204 - "tag:target-tag": present
205 actions:
206 - type: remove-tag
207 tags: ["target-tag"]
208 """
209
210 concurrency = 2
211 batch_size = 5
212 permissions = ('elasticmapreduce:RemoveTags',)
213
214 def process_resource_set(self, resources, tag_keys):
215 client = local_session(
216 self.manager.session_factory).client('emr')
217 for r in resources:
218 client.remove_tags(
219 ResourceId=r['Id'], TagKeys=tag_keys)
220
221
222 @actions.register('terminate')
223 class Terminate(BaseAction):
224 """Action to terminate EMR cluster(s)
225
226 It is recommended to apply a filter to the terminate action to avoid
227 termination of all EMR clusters
228
229 :example:
230
231 .. code-block:: yaml
232
233 policies:
234 - name: emr-terminate
235 resource: emr
236 query:
237 - ClusterStates: [STARTING, BOOTSTRAPPING, RUNNING, WAITING]
238 actions:
239 - terminate
240 """
241
242 schema = type_schema('terminate', force={'type': 'boolean'})
243 permissions = ("elasticmapreduce:TerminateJobFlows",)
244 delay = 5
245
246 def process(self, emrs):
247 client = local_session(self.manager.session_factory).client('emr')
248 cluster_ids = [emr['Id'] for emr in emrs]
249 if self.data.get('force'):
250 client.set_termination_protection(
251 JobFlowIds=cluster_ids, TerminationProtected=False)
252 time.sleep(self.delay)
253 client.terminate_job_flows(JobFlowIds=cluster_ids)
254 self.log.info("Deleted emrs: %s", cluster_ids)
255 return emrs
256
257
258 # Valid EMR Query Filters
259 EMR_VALID_FILTERS = set(('CreatedAfter', 'CreatedBefore', 'ClusterStates'))
260
261
262 class QueryFilter(object):
263
264 @classmethod
265 def parse(cls, data):
266 results = []
267 for d in data:
268 if not isinstance(d, dict):
269 raise PolicyValidationError(
270 "EMR Query Filter Invalid structure %s" % d)
271 results.append(cls(d).validate())
272 return results
273
274 def __init__(self, data):
275 self.data = data
276 self.key = None
277 self.value = None
278
279 def validate(self):
280 if not len(list(self.data.keys())) == 1:
281 raise PolicyValidationError(
282 "EMR Query Filter Invalid %s" % self.data)
283 self.key = list(self.data.keys())[0]
284 self.value = list(self.data.values())[0]
285
286 if self.key not in EMR_VALID_FILTERS and not self.key.startswith(
287 'tag:'):
288 raise PolicyValidationError(
289 "EMR Query Filter invalid filter name %s" % (self.data))
290
291 if self.value is None:
292 raise PolicyValidationError(
293 "EMR Query Filters must have a value, use tag-key"
294 " w/ tag name as value for tag present checks"
295 " %s" % self.data)
296 return self
297
298 def query(self):
299 value = self.value
300 if isinstance(self.value, six.string_types):
301 value = [self.value]
302
303 return {'Name': self.key, 'Values': value}
304
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/c7n/resources/emr.py b/c7n/resources/emr.py
--- a/c7n/resources/emr.py
+++ b/c7n/resources/emr.py
@@ -157,7 +157,7 @@
client = local_session(
self.manager.session_factory).client('emr')
for r in resources:
- self.retry(client.add_tags(ResourceId=r['Id'], Tags=tags))
+ self.retry(client.add_tags, ResourceId=r['Id'], Tags=tags)
@actions.register('tag')
@@ -186,7 +186,7 @@
def process_resource_set(self, resources, tags):
client = local_session(self.manager.session_factory).client('emr')
for r in resources:
- self.retry(client.add_tags(ResourceId=r['Id'], Tags=tags))
+ self.retry(client.add_tags, ResourceId=r['Id'], Tags=tags)
@actions.register('remove-tag')
| {"golden_diff": "diff --git a/c7n/resources/emr.py b/c7n/resources/emr.py\n--- a/c7n/resources/emr.py\n+++ b/c7n/resources/emr.py\n@@ -157,7 +157,7 @@\n client = local_session(\n self.manager.session_factory).client('emr')\n for r in resources:\n- self.retry(client.add_tags(ResourceId=r['Id'], Tags=tags))\n+ self.retry(client.add_tags, ResourceId=r['Id'], Tags=tags)\n \n \n @actions.register('tag')\n@@ -186,7 +186,7 @@\n def process_resource_set(self, resources, tags):\n client = local_session(self.manager.session_factory).client('emr')\n for r in resources:\n- self.retry(client.add_tags(ResourceId=r['Id'], Tags=tags))\n+ self.retry(client.add_tags, ResourceId=r['Id'], Tags=tags)\n \n \n @actions.register('remove-tag')\n", "issue": "Marking emr cluster for termination throws exception\nWhen marking EMR cluster for termination throws exception in version c7n-0.8.31.2. I see the tag created in cluster\r\n`````\r\npolicies:\r\n- name: emr-mark-clusters-for-termination\r\n resource: emr\r\n filters:\r\n - type: value\r\n key: \"Id\"\r\n op: in\r\n value:\r\n - 'abcdefghij'\r\n actions:\r\n - type: mark-for-op\r\n tag: 'custodian-emr-terminate'\r\n op: terminate\r\n days: 4\r\n`````\r\n\r\nthis policy throws exception \r\n\r\n2018-09-27 19:20:30,262: custodian.actions:INFO Tagging 1 resources for terminate on 2018/10/01\r\n2018-09-27 19:20:31,720: custodian.actions:ERROR Exception with tags: [{u'Value': u'Resource does not meet policy: terminate@2018/10/01', u'Key': 'custodian-emr-terminate'}] on resources: abcdefghij\r\n 'dict' object is not callable\r\n`\r\nThough the EMR is marked with tag ''custodian-emr-terminate', filtering on type: marked-for-op, returns 0 resources.\n", "before_files": [{"content": "# Copyright 2016-2017 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport logging\nimport time\n\nimport six\n\nfrom c7n.actions import ActionRegistry, BaseAction\nfrom c7n.exceptions import PolicyValidationError\nfrom c7n.filters import FilterRegistry, MetricsFilter\nfrom c7n.manager import resources\nfrom c7n.query import QueryResourceManager\nfrom c7n.utils import (\n local_session, type_schema, get_retry)\nfrom c7n.tags import (\n TagDelayedAction, RemoveTag, TagActionFilter, Tag)\n\nfilters = FilterRegistry('emr.filters')\nactions = ActionRegistry('emr.actions')\nlog = logging.getLogger('custodian.emr')\n\nfilters.register('marked-for-op', TagActionFilter)\n\n\[email protected]('emr')\nclass EMRCluster(QueryResourceManager):\n \"\"\"Resource manager for Elastic MapReduce clusters\n \"\"\"\n\n class resource_type(object):\n service = 'emr'\n type = 'emr'\n cluster_states = ['WAITING', 'BOOTSTRAPPING', 'RUNNING', 'STARTING']\n enum_spec = ('list_clusters', 'Clusters', {'ClusterStates': cluster_states})\n name = 'Name'\n id = 'Id'\n date = \"Status.Timeline.CreationDateTime\"\n filter_name = None\n dimension = None\n\n action_registry = actions\n filter_registry = filters\n retry = staticmethod(get_retry(('ThrottlingException',)))\n\n def __init__(self, ctx, data):\n super(EMRCluster, self).__init__(ctx, data)\n self.queries = QueryFilter.parse(\n self.data.get('query', [\n {'ClusterStates': [\n 'running', 'bootstrapping', 'waiting']}]))\n\n @classmethod\n def get_permissions(cls):\n return (\"elasticmapreduce:ListClusters\",\n \"elasticmapreduce:DescribeCluster\")\n\n def get_resources(self, ids):\n # no filtering by id set supported at the api\n client = local_session(self.session_factory).client('emr')\n results = []\n for jid in ids:\n results.append(\n client.describe_cluster(ClusterId=jid)['Cluster'])\n return results\n\n def resources(self, query=None):\n q = self.consolidate_query_filter()\n if q is not None:\n query = query or {}\n for i in range(0, len(q)):\n query[q[i]['Name']] = q[i]['Values']\n return super(EMRCluster, self).resources(query=query)\n\n def consolidate_query_filter(self):\n result = []\n names = set()\n # allow same name to be specified multiple times and append the queries\n # under the same name\n for q in self.queries:\n query_filter = q.query()\n if query_filter['Name'] in names:\n for filt in result:\n if query_filter['Name'] == filt['Name']:\n filt['Values'].extend(query_filter['Values'])\n else:\n names.add(query_filter['Name'])\n result.append(query_filter)\n if 'ClusterStates' not in names:\n # include default query\n result.append(\n {\n 'Name': 'ClusterStates',\n 'Values': ['WAITING', 'RUNNING', 'BOOTSTRAPPING'],\n }\n )\n return result\n\n def augment(self, resources):\n client = local_session(\n self.get_resource_manager('emr').session_factory).client('emr')\n result = []\n # remap for cwmetrics\n for r in resources:\n cluster = self.retry(\n client.describe_cluster, ClusterId=r['Id'])['Cluster']\n result.append(cluster)\n return result\n\n\[email protected]_registry.register('metrics')\nclass EMRMetrics(MetricsFilter):\n\n def get_dimensions(self, resource):\n # Job flow id is legacy name for cluster id\n return [{'Name': 'JobFlowId', 'Value': resource['Id']}]\n\n\[email protected]('mark-for-op')\nclass TagDelayedAction(TagDelayedAction):\n \"\"\"Action to specify an action to occur at a later date\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: emr-mark-for-op\n resource: emr\n filters:\n - \"tag:Name\": absent\n actions:\n - type: mark-for-op\n tag: custodian_cleanup\n op: terminate\n days: 4\n msg: \"Cluster does not have required tags\"\n \"\"\"\n\n permission = ('elasticmapreduce:AddTags',)\n batch_size = 1\n retry = staticmethod(get_retry(('ThrottlingException',)))\n\n def process_resource_set(self, resources, tags):\n client = local_session(\n self.manager.session_factory).client('emr')\n for r in resources:\n self.retry(client.add_tags(ResourceId=r['Id'], Tags=tags))\n\n\[email protected]('tag')\nclass TagTable(Tag):\n \"\"\"Action to create tag(s) on a resource\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: emr-tag-table\n resource: emr\n filters:\n - \"tag:target-tag\": absent\n actions:\n - type: tag\n key: target-tag\n value: target-tag-value\n \"\"\"\n\n permissions = ('elasticmapreduce:AddTags',)\n batch_size = 1\n retry = staticmethod(get_retry(('ThrottlingException',)))\n\n def process_resource_set(self, resources, tags):\n client = local_session(self.manager.session_factory).client('emr')\n for r in resources:\n self.retry(client.add_tags(ResourceId=r['Id'], Tags=tags))\n\n\[email protected]('remove-tag')\nclass UntagTable(RemoveTag):\n \"\"\"Action to remove tag(s) on a resource\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: emr-remove-tag\n resource: emr\n filters:\n - \"tag:target-tag\": present\n actions:\n - type: remove-tag\n tags: [\"target-tag\"]\n \"\"\"\n\n concurrency = 2\n batch_size = 5\n permissions = ('elasticmapreduce:RemoveTags',)\n\n def process_resource_set(self, resources, tag_keys):\n client = local_session(\n self.manager.session_factory).client('emr')\n for r in resources:\n client.remove_tags(\n ResourceId=r['Id'], TagKeys=tag_keys)\n\n\[email protected]('terminate')\nclass Terminate(BaseAction):\n \"\"\"Action to terminate EMR cluster(s)\n\n It is recommended to apply a filter to the terminate action to avoid\n termination of all EMR clusters\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: emr-terminate\n resource: emr\n query:\n - ClusterStates: [STARTING, BOOTSTRAPPING, RUNNING, WAITING]\n actions:\n - terminate\n \"\"\"\n\n schema = type_schema('terminate', force={'type': 'boolean'})\n permissions = (\"elasticmapreduce:TerminateJobFlows\",)\n delay = 5\n\n def process(self, emrs):\n client = local_session(self.manager.session_factory).client('emr')\n cluster_ids = [emr['Id'] for emr in emrs]\n if self.data.get('force'):\n client.set_termination_protection(\n JobFlowIds=cluster_ids, TerminationProtected=False)\n time.sleep(self.delay)\n client.terminate_job_flows(JobFlowIds=cluster_ids)\n self.log.info(\"Deleted emrs: %s\", cluster_ids)\n return emrs\n\n\n# Valid EMR Query Filters\nEMR_VALID_FILTERS = set(('CreatedAfter', 'CreatedBefore', 'ClusterStates'))\n\n\nclass QueryFilter(object):\n\n @classmethod\n def parse(cls, data):\n results = []\n for d in data:\n if not isinstance(d, dict):\n raise PolicyValidationError(\n \"EMR Query Filter Invalid structure %s\" % d)\n results.append(cls(d).validate())\n return results\n\n def __init__(self, data):\n self.data = data\n self.key = None\n self.value = None\n\n def validate(self):\n if not len(list(self.data.keys())) == 1:\n raise PolicyValidationError(\n \"EMR Query Filter Invalid %s\" % self.data)\n self.key = list(self.data.keys())[0]\n self.value = list(self.data.values())[0]\n\n if self.key not in EMR_VALID_FILTERS and not self.key.startswith(\n 'tag:'):\n raise PolicyValidationError(\n \"EMR Query Filter invalid filter name %s\" % (self.data))\n\n if self.value is None:\n raise PolicyValidationError(\n \"EMR Query Filters must have a value, use tag-key\"\n \" w/ tag name as value for tag present checks\"\n \" %s\" % self.data)\n return self\n\n def query(self):\n value = self.value\n if isinstance(self.value, six.string_types):\n value = [self.value]\n\n return {'Name': self.key, 'Values': value}\n", "path": "c7n/resources/emr.py"}], "after_files": [{"content": "# Copyright 2016-2017 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport logging\nimport time\n\nimport six\n\nfrom c7n.actions import ActionRegistry, BaseAction\nfrom c7n.exceptions import PolicyValidationError\nfrom c7n.filters import FilterRegistry, MetricsFilter\nfrom c7n.manager import resources\nfrom c7n.query import QueryResourceManager\nfrom c7n.utils import (\n local_session, type_schema, get_retry)\nfrom c7n.tags import (\n TagDelayedAction, RemoveTag, TagActionFilter, Tag)\n\nfilters = FilterRegistry('emr.filters')\nactions = ActionRegistry('emr.actions')\nlog = logging.getLogger('custodian.emr')\n\nfilters.register('marked-for-op', TagActionFilter)\n\n\[email protected]('emr')\nclass EMRCluster(QueryResourceManager):\n \"\"\"Resource manager for Elastic MapReduce clusters\n \"\"\"\n\n class resource_type(object):\n service = 'emr'\n type = 'emr'\n cluster_states = ['WAITING', 'BOOTSTRAPPING', 'RUNNING', 'STARTING']\n enum_spec = ('list_clusters', 'Clusters', {'ClusterStates': cluster_states})\n name = 'Name'\n id = 'Id'\n date = \"Status.Timeline.CreationDateTime\"\n filter_name = None\n dimension = None\n\n action_registry = actions\n filter_registry = filters\n retry = staticmethod(get_retry(('ThrottlingException',)))\n\n def __init__(self, ctx, data):\n super(EMRCluster, self).__init__(ctx, data)\n self.queries = QueryFilter.parse(\n self.data.get('query', [\n {'ClusterStates': [\n 'running', 'bootstrapping', 'waiting']}]))\n\n @classmethod\n def get_permissions(cls):\n return (\"elasticmapreduce:ListClusters\",\n \"elasticmapreduce:DescribeCluster\")\n\n def get_resources(self, ids):\n # no filtering by id set supported at the api\n client = local_session(self.session_factory).client('emr')\n results = []\n for jid in ids:\n results.append(\n client.describe_cluster(ClusterId=jid)['Cluster'])\n return results\n\n def resources(self, query=None):\n q = self.consolidate_query_filter()\n if q is not None:\n query = query or {}\n for i in range(0, len(q)):\n query[q[i]['Name']] = q[i]['Values']\n return super(EMRCluster, self).resources(query=query)\n\n def consolidate_query_filter(self):\n result = []\n names = set()\n # allow same name to be specified multiple times and append the queries\n # under the same name\n for q in self.queries:\n query_filter = q.query()\n if query_filter['Name'] in names:\n for filt in result:\n if query_filter['Name'] == filt['Name']:\n filt['Values'].extend(query_filter['Values'])\n else:\n names.add(query_filter['Name'])\n result.append(query_filter)\n if 'ClusterStates' not in names:\n # include default query\n result.append(\n {\n 'Name': 'ClusterStates',\n 'Values': ['WAITING', 'RUNNING', 'BOOTSTRAPPING'],\n }\n )\n return result\n\n def augment(self, resources):\n client = local_session(\n self.get_resource_manager('emr').session_factory).client('emr')\n result = []\n # remap for cwmetrics\n for r in resources:\n cluster = self.retry(\n client.describe_cluster, ClusterId=r['Id'])['Cluster']\n result.append(cluster)\n return result\n\n\[email protected]_registry.register('metrics')\nclass EMRMetrics(MetricsFilter):\n\n def get_dimensions(self, resource):\n # Job flow id is legacy name for cluster id\n return [{'Name': 'JobFlowId', 'Value': resource['Id']}]\n\n\[email protected]('mark-for-op')\nclass TagDelayedAction(TagDelayedAction):\n \"\"\"Action to specify an action to occur at a later date\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: emr-mark-for-op\n resource: emr\n filters:\n - \"tag:Name\": absent\n actions:\n - type: mark-for-op\n tag: custodian_cleanup\n op: terminate\n days: 4\n msg: \"Cluster does not have required tags\"\n \"\"\"\n\n permission = ('elasticmapreduce:AddTags',)\n batch_size = 1\n retry = staticmethod(get_retry(('ThrottlingException',)))\n\n def process_resource_set(self, resources, tags):\n client = local_session(\n self.manager.session_factory).client('emr')\n for r in resources:\n self.retry(client.add_tags, ResourceId=r['Id'], Tags=tags)\n\n\[email protected]('tag')\nclass TagTable(Tag):\n \"\"\"Action to create tag(s) on a resource\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: emr-tag-table\n resource: emr\n filters:\n - \"tag:target-tag\": absent\n actions:\n - type: tag\n key: target-tag\n value: target-tag-value\n \"\"\"\n\n permissions = ('elasticmapreduce:AddTags',)\n batch_size = 1\n retry = staticmethod(get_retry(('ThrottlingException',)))\n\n def process_resource_set(self, resources, tags):\n client = local_session(self.manager.session_factory).client('emr')\n for r in resources:\n self.retry(client.add_tags, ResourceId=r['Id'], Tags=tags)\n\n\[email protected]('remove-tag')\nclass UntagTable(RemoveTag):\n \"\"\"Action to remove tag(s) on a resource\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: emr-remove-tag\n resource: emr\n filters:\n - \"tag:target-tag\": present\n actions:\n - type: remove-tag\n tags: [\"target-tag\"]\n \"\"\"\n\n concurrency = 2\n batch_size = 5\n permissions = ('elasticmapreduce:RemoveTags',)\n\n def process_resource_set(self, resources, tag_keys):\n client = local_session(\n self.manager.session_factory).client('emr')\n for r in resources:\n client.remove_tags(\n ResourceId=r['Id'], TagKeys=tag_keys)\n\n\[email protected]('terminate')\nclass Terminate(BaseAction):\n \"\"\"Action to terminate EMR cluster(s)\n\n It is recommended to apply a filter to the terminate action to avoid\n termination of all EMR clusters\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: emr-terminate\n resource: emr\n query:\n - ClusterStates: [STARTING, BOOTSTRAPPING, RUNNING, WAITING]\n actions:\n - terminate\n \"\"\"\n\n schema = type_schema('terminate', force={'type': 'boolean'})\n permissions = (\"elasticmapreduce:TerminateJobFlows\",)\n delay = 5\n\n def process(self, emrs):\n client = local_session(self.manager.session_factory).client('emr')\n cluster_ids = [emr['Id'] for emr in emrs]\n if self.data.get('force'):\n client.set_termination_protection(\n JobFlowIds=cluster_ids, TerminationProtected=False)\n time.sleep(self.delay)\n client.terminate_job_flows(JobFlowIds=cluster_ids)\n self.log.info(\"Deleted emrs: %s\", cluster_ids)\n return emrs\n\n\n# Valid EMR Query Filters\nEMR_VALID_FILTERS = set(('CreatedAfter', 'CreatedBefore', 'ClusterStates'))\n\n\nclass QueryFilter(object):\n\n @classmethod\n def parse(cls, data):\n results = []\n for d in data:\n if not isinstance(d, dict):\n raise PolicyValidationError(\n \"EMR Query Filter Invalid structure %s\" % d)\n results.append(cls(d).validate())\n return results\n\n def __init__(self, data):\n self.data = data\n self.key = None\n self.value = None\n\n def validate(self):\n if not len(list(self.data.keys())) == 1:\n raise PolicyValidationError(\n \"EMR Query Filter Invalid %s\" % self.data)\n self.key = list(self.data.keys())[0]\n self.value = list(self.data.values())[0]\n\n if self.key not in EMR_VALID_FILTERS and not self.key.startswith(\n 'tag:'):\n raise PolicyValidationError(\n \"EMR Query Filter invalid filter name %s\" % (self.data))\n\n if self.value is None:\n raise PolicyValidationError(\n \"EMR Query Filters must have a value, use tag-key\"\n \" w/ tag name as value for tag present checks\"\n \" %s\" % self.data)\n return self\n\n def query(self):\n value = self.value\n if isinstance(self.value, six.string_types):\n value = [self.value]\n\n return {'Name': self.key, 'Values': value}\n", "path": "c7n/resources/emr.py"}]} | 3,473 | 208 |
gh_patches_debug_17274 | rasdani/github-patches | git_diff | CTPUG__wafer-307 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add support for Django's redirect app to wafer
It's useful to be able to add a redirect if a page is moved to a different point in the hierachy.
Django's already got support for this, so we should leverage that.
The potentially problematic part is how this iteracts with the static site generation, as django-medusa's handling of redirects is far from ideal.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `wafer/settings.py`
Content:
```
1 import os
2
3 from django.utils.translation import ugettext_lazy as _
4
5 try:
6 from localsettings import *
7 except ImportError:
8 pass
9
10 # Django settings for wafer project.
11
12 ADMINS = (
13 # The logging config below mails admins
14 # ('Your Name', '[email protected]'),
15 )
16
17 DATABASES = {
18 'default': {
19 'ENGINE': 'django.db.backends.sqlite3',
20 'NAME': 'wafer.db',
21 }
22 }
23
24 if os.environ.get('TESTDB', None) == 'postgres':
25 DATABASES['default'].update({
26 'ENGINE': 'django.db.backends.postgresql_psycopg2',
27 'USER': 'postgres',
28 'NAME': 'wafer',
29 })
30
31 # Hosts/domain names that are valid for this site; required if DEBUG is False
32 # See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
33 ALLOWED_HOSTS = []
34
35 # Local time zone for this installation. Choices can be found here:
36 # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
37 # although not all choices may be available on all operating systems.
38 # In a Windows environment this must be set to your system time zone.
39 TIME_ZONE = 'UTC'
40
41 # Language code for this installation. All choices can be found here:
42 # http://www.i18nguy.com/unicode/language-identifiers.html
43 LANGUAGE_CODE = 'en-us'
44
45 SITE_ID = 1
46
47 # If you set this to False, Django will make some optimizations so as not
48 # to load the internationalization machinery.
49 USE_I18N = True
50
51 # If you set this to False, Django will not format dates, numbers and
52 # calendars according to the current locale.
53 USE_L10N = True
54
55 # If you set this to False, Django will not use timezone-aware datetimes.
56 USE_TZ = True
57
58 # Absolute filesystem path to the directory that will hold user-uploaded files.
59 # Example: "/var/www/example.com/media/"
60 project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
61 MEDIA_ROOT = os.path.join(project_root, 'media')
62
63 # URL that handles the media served from MEDIA_ROOT. Make sure to use a
64 # trailing slash.
65 # Examples: "http://example.com/media/", "http://media.example.com/"
66 MEDIA_URL = '/media/'
67
68 # Absolute path to the directory static files should be collected to.
69 # Don't put anything in this directory yourself; store your static files
70 # in apps' "static/" subdirectories and in STATICFILES_DIRS.
71 # Example: "/var/www/example.com/static/"
72 STATIC_ROOT = ''
73
74 # URL prefix for static files.
75 # Example: "http://example.com/static/", "http://static.example.com/"
76 STATIC_URL = '/static/'
77
78 # Additional locations of static files
79 STATICFILES_DIRS = (
80 # Put strings here, like "/home/html/static" or "C:/www/django/static".
81 # Always use forward slashes, even on Windows.
82 # Don't forget to use absolute paths, not relative paths.
83 os.path.join(project_root, 'bower_components'),
84 )
85
86 # List of finder classes that know how to find static files in
87 # various locations.
88 STATICFILES_FINDERS = (
89 'django.contrib.staticfiles.finders.FileSystemFinder',
90 'django.contrib.staticfiles.finders.AppDirectoriesFinder',
91 # 'django.contrib.staticfiles.finders.DefaultStorageFinder',
92 )
93
94 # Make this unique, and don't share it with anybody.
95 SECRET_KEY = '8iysa30^no&oi5kv$k1w)#gsxzrylr-h6%)loz71expnbf7z%)'
96
97 # List of callables that know how to import templates from various sources.
98 TEMPLATE_LOADERS = (
99 'django.template.loaders.filesystem.Loader',
100 'django.template.loaders.app_directories.Loader',
101 # 'django.template.loaders.eggs.Loader',
102 )
103
104 MIDDLEWARE_CLASSES = (
105 'django.middleware.common.CommonMiddleware',
106 'django.contrib.sessions.middleware.SessionMiddleware',
107 'django.middleware.csrf.CsrfViewMiddleware',
108 'django.contrib.auth.middleware.AuthenticationMiddleware',
109 'django.contrib.messages.middleware.MessageMiddleware',
110 # Uncomment the next line for simple clickjacking protection:
111 # 'django.middleware.clickjacking.XFrameOptionsMiddleware',
112 )
113
114 ROOT_URLCONF = 'wafer.urls'
115
116 # Python dotted path to the WSGI application used by Django's runserver.
117 WSGI_APPLICATION = 'wafer.wsgi.application'
118
119 TEMPLATE_DIRS = (
120 # Put strings here, like "/home/html/django_templates" or
121 # "C:/www/django/templates". Always use forward slashes, even on Windows.
122 # Don't forget to use absolute paths, not relative paths.
123 )
124
125 TEMPLATE_CONTEXT_PROCESSORS = (
126 'django.contrib.auth.context_processors.auth',
127 'django.core.context_processors.debug',
128 'django.core.context_processors.i18n',
129 'django.core.context_processors.media',
130 'django.core.context_processors.static',
131 'django.core.context_processors.tz',
132 'django.contrib.messages.context_processors.messages',
133 'wafer.context_processors.site_info',
134 'wafer.context_processors.navigation_info',
135 'wafer.context_processors.menu_info',
136 'wafer.context_processors.registration_settings',
137 )
138
139 INSTALLED_APPS = (
140 'django.contrib.auth',
141 'django.contrib.contenttypes',
142 'django.contrib.sessions',
143 'django.contrib.sites',
144 'django.contrib.messages',
145 'django.contrib.staticfiles',
146 'reversion',
147 'django_medusa',
148 'crispy_forms',
149 'django_nose',
150 'markitup',
151 'rest_framework',
152 'easy_select2',
153 'wafer',
154 'wafer.kv',
155 'wafer.registration',
156 'wafer.talks',
157 'wafer.schedule',
158 'wafer.users',
159 'wafer.sponsors',
160 'wafer.pages',
161 'wafer.tickets',
162 'wafer.compare',
163 # Django isn't finding the overridden templates
164 'registration',
165 'django.contrib.admin',
166 )
167
168 TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
169
170 # A sample logging configuration. The only tangible logging
171 # performed by this configuration is to send an email to
172 # the site admins on every HTTP 500 error when DEBUG=False.
173 # See http://docs.djangoproject.com/en/dev/topics/logging for
174 # more details on how to customize your logging configuration.
175 LOGGING = {
176 'version': 1,
177 'disable_existing_loggers': False,
178 'filters': {
179 'require_debug_false': {
180 '()': 'django.utils.log.RequireDebugFalse'
181 }
182 },
183 'handlers': {
184 'mail_admins': {
185 'level': 'ERROR',
186 'filters': ['require_debug_false'],
187 'class': 'django.utils.log.AdminEmailHandler'
188 }
189 },
190 'loggers': {
191 'django.request': {
192 'handlers': ['mail_admins'],
193 'level': 'ERROR',
194 'propagate': True,
195 },
196 }
197 }
198
199 # Django registration:
200 ACCOUNT_ACTIVATION_DAYS = 7
201
202 AUTH_USER_MODEL = 'auth.User'
203
204 # Forms:
205 CRISPY_TEMPLATE_PACK = 'bootstrap3'
206
207 # Wafer cache settings
208 # We assume that the WAFER_CACHE is cross-process
209 WAFER_CACHE = 'wafer_cache'
210 CACHES = {
211 'default': {
212 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
213 },
214 WAFER_CACHE: {
215 'BACKEND': 'django.core.cache.backends.db.DatabaseCache',
216 'LOCATION': 'wafer_cache_table',
217 },
218 }
219
220
221 # Wafer menu settings
222
223 WAFER_MENUS = ()
224 # Example menus entries:
225 #
226 # {"label": _("Home"),
227 # "url": '/'},
228 # {"menu": "sponsors",
229 # "label": _("Sponsors"),
230 # "items": [
231 # {"name": "sponsors", "label": _("Our sponsors"),
232 # "url": reverse_lazy("wafer_sponsors")},
233 # {"name": "packages", "label": _("Sponsorship packages"),
234 # "url": reverse_lazy("wafer_sponsorship_packages")},
235 # ]},
236 # {"label": _("Talks"),
237 # "url": reverse_lazy("wafer_users_talks")},
238
239 WAFER_DYNAMIC_MENUS = (
240 'wafer.pages.models.page_menus',
241 )
242
243 # Enabled SSO mechanims:
244 WAFER_SSO = (
245 # 'github',
246 # 'debian',
247 )
248
249 # Log in with GitHub:
250 # WAFER_GITHUB_CLIENT_ID = 'register on github'
251 # WAFER_GITHUB_CLIENT_SECRET = 'to get these secrets'
252
253 # Log in with Debian SSO:
254 # Requires some Apache config:
255 # SSLCACertificateFile /srv/sso.debian.org/etc/debsso.crt
256 # SSLCARevocationCheck chain
257 # SSLCARevocationFile /srv/sso.debian.org/etc/debsso.crl
258 # <Location /accounts/debian-login/>
259 # SSLOptions +StdEnvVars
260 # SSLVerifyClient optional
261 # </Location>
262 # WAFER_DEBIAN_NM_API_KEY = 'obtain one from https://nm.debian.org/apikeys/'
263
264 # Set this to true to disable the login button on the navigation toolbar
265 WAFER_HIDE_LOGIN = False
266
267 # Talk submissions open
268 # Set this to False to disable talk submissions
269 WAFER_TALKS_OPEN = True
270
271 # The form used for talk submission
272 WAFER_TALK_FORM = 'wafer.talks.forms.TalkForm'
273
274 # Set this to False to disable registration
275 WAFER_REGISTRATION_OPEN = True
276 # Can be 'ticket' for Quicket tickets or 'form' for a classic form
277 WAFER_REGISTRATION_MODE = 'ticket'
278
279 # For REGISTRATION_MODE == 'form', the form to present
280 WAFER_REGISTRATION_FORM = 'wafer.users.forms.ExampleRegistrationForm'
281
282 # Allow registered and anonymous users to see registered users
283 WAFER_PUBLIC_ATTENDEE_LIST = True
284
285 # Ticket registration with Quicket
286 # WAFER_TICKET_SECRET = "i'm a shared secret"
287
288 # django_medusa -- disk-based renderer
289 MEDUSA_RENDERER_CLASS = "wafer.management.static.WaferDiskStaticSiteRenderer"
290 MEDUSA_DEPLOY_DIR = os.path.join(project_root, 'static_mirror')
291 MARKITUP_FILTER = ('markdown.markdown', {'safe_mode': True})
292
293 REST_FRAMEWORK = {
294 'DEFAULT_PERMISSION_CLASSES': ('rest_framework.permissions.IsAdminUser',),
295 'PAGE_SIZE': 50
296 }
297
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/wafer/settings.py b/wafer/settings.py
--- a/wafer/settings.py
+++ b/wafer/settings.py
@@ -107,6 +107,7 @@
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
+ 'django.contrib.redirects.middleware.RedirectFallbackMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
@@ -143,6 +144,7 @@
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
+ 'django.contrib.redirects',
'reversion',
'django_medusa',
'crispy_forms',
| {"golden_diff": "diff --git a/wafer/settings.py b/wafer/settings.py\n--- a/wafer/settings.py\n+++ b/wafer/settings.py\n@@ -107,6 +107,7 @@\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n+ 'django.contrib.redirects.middleware.RedirectFallbackMiddleware',\n # Uncomment the next line for simple clickjacking protection:\n # 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n )\n@@ -143,6 +144,7 @@\n 'django.contrib.sites',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n+ 'django.contrib.redirects',\n 'reversion',\n 'django_medusa',\n 'crispy_forms',\n", "issue": "Add support for Django's redirect app to wafer\nIt's useful to be able to add a redirect if a page is moved to a different point in the hierachy.\n\nDjango's already got support for this, so we should leverage that.\n\nThe potentially problematic part is how this iteracts with the static site generation, as django-medusa's handling of redirects is far from ideal.\n\n", "before_files": [{"content": "import os\n\nfrom django.utils.translation import ugettext_lazy as _\n\ntry:\n from localsettings import *\nexcept ImportError:\n pass\n\n# Django settings for wafer project.\n\nADMINS = (\n # The logging config below mails admins\n # ('Your Name', '[email protected]'),\n)\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': 'wafer.db',\n }\n}\n\nif os.environ.get('TESTDB', None) == 'postgres':\n DATABASES['default'].update({\n 'ENGINE': 'django.db.backends.postgresql_psycopg2',\n 'USER': 'postgres',\n 'NAME': 'wafer',\n })\n\n# Hosts/domain names that are valid for this site; required if DEBUG is False\n# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts\nALLOWED_HOSTS = []\n\n# Local time zone for this installation. Choices can be found here:\n# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name\n# although not all choices may be available on all operating systems.\n# In a Windows environment this must be set to your system time zone.\nTIME_ZONE = 'UTC'\n\n# Language code for this installation. All choices can be found here:\n# http://www.i18nguy.com/unicode/language-identifiers.html\nLANGUAGE_CODE = 'en-us'\n\nSITE_ID = 1\n\n# If you set this to False, Django will make some optimizations so as not\n# to load the internationalization machinery.\nUSE_I18N = True\n\n# If you set this to False, Django will not format dates, numbers and\n# calendars according to the current locale.\nUSE_L10N = True\n\n# If you set this to False, Django will not use timezone-aware datetimes.\nUSE_TZ = True\n\n# Absolute filesystem path to the directory that will hold user-uploaded files.\n# Example: \"/var/www/example.com/media/\"\nproject_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))\nMEDIA_ROOT = os.path.join(project_root, 'media')\n\n# URL that handles the media served from MEDIA_ROOT. Make sure to use a\n# trailing slash.\n# Examples: \"http://example.com/media/\", \"http://media.example.com/\"\nMEDIA_URL = '/media/'\n\n# Absolute path to the directory static files should be collected to.\n# Don't put anything in this directory yourself; store your static files\n# in apps' \"static/\" subdirectories and in STATICFILES_DIRS.\n# Example: \"/var/www/example.com/static/\"\nSTATIC_ROOT = ''\n\n# URL prefix for static files.\n# Example: \"http://example.com/static/\", \"http://static.example.com/\"\nSTATIC_URL = '/static/'\n\n# Additional locations of static files\nSTATICFILES_DIRS = (\n # Put strings here, like \"/home/html/static\" or \"C:/www/django/static\".\n # Always use forward slashes, even on Windows.\n # Don't forget to use absolute paths, not relative paths.\n os.path.join(project_root, 'bower_components'),\n)\n\n# List of finder classes that know how to find static files in\n# various locations.\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n # 'django.contrib.staticfiles.finders.DefaultStorageFinder',\n)\n\n# Make this unique, and don't share it with anybody.\nSECRET_KEY = '8iysa30^no&oi5kv$k1w)#gsxzrylr-h6%)loz71expnbf7z%)'\n\n# List of callables that know how to import templates from various sources.\nTEMPLATE_LOADERS = (\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n # 'django.template.loaders.eggs.Loader',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n # Uncomment the next line for simple clickjacking protection:\n # 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\nROOT_URLCONF = 'wafer.urls'\n\n# Python dotted path to the WSGI application used by Django's runserver.\nWSGI_APPLICATION = 'wafer.wsgi.application'\n\nTEMPLATE_DIRS = (\n # Put strings here, like \"/home/html/django_templates\" or\n # \"C:/www/django/templates\". Always use forward slashes, even on Windows.\n # Don't forget to use absolute paths, not relative paths.\n)\n\nTEMPLATE_CONTEXT_PROCESSORS = (\n 'django.contrib.auth.context_processors.auth',\n 'django.core.context_processors.debug',\n 'django.core.context_processors.i18n',\n 'django.core.context_processors.media',\n 'django.core.context_processors.static',\n 'django.core.context_processors.tz',\n 'django.contrib.messages.context_processors.messages',\n 'wafer.context_processors.site_info',\n 'wafer.context_processors.navigation_info',\n 'wafer.context_processors.menu_info',\n 'wafer.context_processors.registration_settings',\n)\n\nINSTALLED_APPS = (\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'reversion',\n 'django_medusa',\n 'crispy_forms',\n 'django_nose',\n 'markitup',\n 'rest_framework',\n 'easy_select2',\n 'wafer',\n 'wafer.kv',\n 'wafer.registration',\n 'wafer.talks',\n 'wafer.schedule',\n 'wafer.users',\n 'wafer.sponsors',\n 'wafer.pages',\n 'wafer.tickets',\n 'wafer.compare',\n # Django isn't finding the overridden templates\n 'registration',\n 'django.contrib.admin',\n)\n\nTEST_RUNNER = 'django_nose.NoseTestSuiteRunner'\n\n# A sample logging configuration. The only tangible logging\n# performed by this configuration is to send an email to\n# the site admins on every HTTP 500 error when DEBUG=False.\n# See http://docs.djangoproject.com/en/dev/topics/logging for\n# more details on how to customize your logging configuration.\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n 'handlers': {\n 'mail_admins': {\n 'level': 'ERROR',\n 'filters': ['require_debug_false'],\n 'class': 'django.utils.log.AdminEmailHandler'\n }\n },\n 'loggers': {\n 'django.request': {\n 'handlers': ['mail_admins'],\n 'level': 'ERROR',\n 'propagate': True,\n },\n }\n}\n\n# Django registration:\nACCOUNT_ACTIVATION_DAYS = 7\n\nAUTH_USER_MODEL = 'auth.User'\n\n# Forms:\nCRISPY_TEMPLATE_PACK = 'bootstrap3'\n\n# Wafer cache settings\n# We assume that the WAFER_CACHE is cross-process\nWAFER_CACHE = 'wafer_cache'\nCACHES = {\n 'default': {\n 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',\n },\n WAFER_CACHE: {\n 'BACKEND': 'django.core.cache.backends.db.DatabaseCache',\n 'LOCATION': 'wafer_cache_table',\n },\n}\n\n\n# Wafer menu settings\n\nWAFER_MENUS = ()\n# Example menus entries:\n#\n# {\"label\": _(\"Home\"),\n# \"url\": '/'},\n# {\"menu\": \"sponsors\",\n# \"label\": _(\"Sponsors\"),\n# \"items\": [\n# {\"name\": \"sponsors\", \"label\": _(\"Our sponsors\"),\n# \"url\": reverse_lazy(\"wafer_sponsors\")},\n# {\"name\": \"packages\", \"label\": _(\"Sponsorship packages\"),\n# \"url\": reverse_lazy(\"wafer_sponsorship_packages\")},\n# ]},\n# {\"label\": _(\"Talks\"),\n# \"url\": reverse_lazy(\"wafer_users_talks\")},\n\nWAFER_DYNAMIC_MENUS = (\n 'wafer.pages.models.page_menus',\n)\n\n# Enabled SSO mechanims:\nWAFER_SSO = (\n # 'github',\n # 'debian',\n)\n\n# Log in with GitHub:\n# WAFER_GITHUB_CLIENT_ID = 'register on github'\n# WAFER_GITHUB_CLIENT_SECRET = 'to get these secrets'\n\n# Log in with Debian SSO:\n# Requires some Apache config:\n# SSLCACertificateFile /srv/sso.debian.org/etc/debsso.crt\n# SSLCARevocationCheck chain\n# SSLCARevocationFile /srv/sso.debian.org/etc/debsso.crl\n# <Location /accounts/debian-login/>\n# SSLOptions +StdEnvVars\n# SSLVerifyClient optional\n# </Location>\n# WAFER_DEBIAN_NM_API_KEY = 'obtain one from https://nm.debian.org/apikeys/'\n\n# Set this to true to disable the login button on the navigation toolbar\nWAFER_HIDE_LOGIN = False\n\n# Talk submissions open\n# Set this to False to disable talk submissions\nWAFER_TALKS_OPEN = True\n\n# The form used for talk submission\nWAFER_TALK_FORM = 'wafer.talks.forms.TalkForm'\n\n# Set this to False to disable registration\nWAFER_REGISTRATION_OPEN = True\n# Can be 'ticket' for Quicket tickets or 'form' for a classic form\nWAFER_REGISTRATION_MODE = 'ticket'\n\n# For REGISTRATION_MODE == 'form', the form to present\nWAFER_REGISTRATION_FORM = 'wafer.users.forms.ExampleRegistrationForm'\n\n# Allow registered and anonymous users to see registered users\nWAFER_PUBLIC_ATTENDEE_LIST = True\n\n# Ticket registration with Quicket\n# WAFER_TICKET_SECRET = \"i'm a shared secret\"\n\n# django_medusa -- disk-based renderer\nMEDUSA_RENDERER_CLASS = \"wafer.management.static.WaferDiskStaticSiteRenderer\"\nMEDUSA_DEPLOY_DIR = os.path.join(project_root, 'static_mirror')\nMARKITUP_FILTER = ('markdown.markdown', {'safe_mode': True})\n\nREST_FRAMEWORK = {\n 'DEFAULT_PERMISSION_CLASSES': ('rest_framework.permissions.IsAdminUser',),\n 'PAGE_SIZE': 50\n}\n", "path": "wafer/settings.py"}], "after_files": [{"content": "import os\n\nfrom django.utils.translation import ugettext_lazy as _\n\ntry:\n from localsettings import *\nexcept ImportError:\n pass\n\n# Django settings for wafer project.\n\nADMINS = (\n # The logging config below mails admins\n # ('Your Name', '[email protected]'),\n)\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': 'wafer.db',\n }\n}\n\nif os.environ.get('TESTDB', None) == 'postgres':\n DATABASES['default'].update({\n 'ENGINE': 'django.db.backends.postgresql_psycopg2',\n 'USER': 'postgres',\n 'NAME': 'wafer',\n })\n\n# Hosts/domain names that are valid for this site; required if DEBUG is False\n# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts\nALLOWED_HOSTS = []\n\n# Local time zone for this installation. Choices can be found here:\n# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name\n# although not all choices may be available on all operating systems.\n# In a Windows environment this must be set to your system time zone.\nTIME_ZONE = 'UTC'\n\n# Language code for this installation. All choices can be found here:\n# http://www.i18nguy.com/unicode/language-identifiers.html\nLANGUAGE_CODE = 'en-us'\n\nSITE_ID = 1\n\n# If you set this to False, Django will make some optimizations so as not\n# to load the internationalization machinery.\nUSE_I18N = True\n\n# If you set this to False, Django will not format dates, numbers and\n# calendars according to the current locale.\nUSE_L10N = True\n\n# If you set this to False, Django will not use timezone-aware datetimes.\nUSE_TZ = True\n\n# Absolute filesystem path to the directory that will hold user-uploaded files.\n# Example: \"/var/www/example.com/media/\"\nproject_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))\nMEDIA_ROOT = os.path.join(project_root, 'media')\n\n# URL that handles the media served from MEDIA_ROOT. Make sure to use a\n# trailing slash.\n# Examples: \"http://example.com/media/\", \"http://media.example.com/\"\nMEDIA_URL = '/media/'\n\n# Absolute path to the directory static files should be collected to.\n# Don't put anything in this directory yourself; store your static files\n# in apps' \"static/\" subdirectories and in STATICFILES_DIRS.\n# Example: \"/var/www/example.com/static/\"\nSTATIC_ROOT = ''\n\n# URL prefix for static files.\n# Example: \"http://example.com/static/\", \"http://static.example.com/\"\nSTATIC_URL = '/static/'\n\n# Additional locations of static files\nSTATICFILES_DIRS = (\n # Put strings here, like \"/home/html/static\" or \"C:/www/django/static\".\n # Always use forward slashes, even on Windows.\n # Don't forget to use absolute paths, not relative paths.\n os.path.join(project_root, 'bower_components'),\n)\n\n# List of finder classes that know how to find static files in\n# various locations.\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n # 'django.contrib.staticfiles.finders.DefaultStorageFinder',\n)\n\n# Make this unique, and don't share it with anybody.\nSECRET_KEY = '8iysa30^no&oi5kv$k1w)#gsxzrylr-h6%)loz71expnbf7z%)'\n\n# List of callables that know how to import templates from various sources.\nTEMPLATE_LOADERS = (\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n # 'django.template.loaders.eggs.Loader',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.contrib.redirects.middleware.RedirectFallbackMiddleware',\n # Uncomment the next line for simple clickjacking protection:\n # 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\nROOT_URLCONF = 'wafer.urls'\n\n# Python dotted path to the WSGI application used by Django's runserver.\nWSGI_APPLICATION = 'wafer.wsgi.application'\n\nTEMPLATE_DIRS = (\n # Put strings here, like \"/home/html/django_templates\" or\n # \"C:/www/django/templates\". Always use forward slashes, even on Windows.\n # Don't forget to use absolute paths, not relative paths.\n)\n\nTEMPLATE_CONTEXT_PROCESSORS = (\n 'django.contrib.auth.context_processors.auth',\n 'django.core.context_processors.debug',\n 'django.core.context_processors.i18n',\n 'django.core.context_processors.media',\n 'django.core.context_processors.static',\n 'django.core.context_processors.tz',\n 'django.contrib.messages.context_processors.messages',\n 'wafer.context_processors.site_info',\n 'wafer.context_processors.navigation_info',\n 'wafer.context_processors.menu_info',\n 'wafer.context_processors.registration_settings',\n)\n\nINSTALLED_APPS = (\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.redirects',\n 'reversion',\n 'django_medusa',\n 'crispy_forms',\n 'django_nose',\n 'markitup',\n 'rest_framework',\n 'easy_select2',\n 'wafer',\n 'wafer.kv',\n 'wafer.registration',\n 'wafer.talks',\n 'wafer.schedule',\n 'wafer.users',\n 'wafer.sponsors',\n 'wafer.pages',\n 'wafer.tickets',\n 'wafer.compare',\n # Django isn't finding the overridden templates\n 'registration',\n 'django.contrib.admin',\n)\n\nTEST_RUNNER = 'django_nose.NoseTestSuiteRunner'\n\n# A sample logging configuration. The only tangible logging\n# performed by this configuration is to send an email to\n# the site admins on every HTTP 500 error when DEBUG=False.\n# See http://docs.djangoproject.com/en/dev/topics/logging for\n# more details on how to customize your logging configuration.\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n 'handlers': {\n 'mail_admins': {\n 'level': 'ERROR',\n 'filters': ['require_debug_false'],\n 'class': 'django.utils.log.AdminEmailHandler'\n }\n },\n 'loggers': {\n 'django.request': {\n 'handlers': ['mail_admins'],\n 'level': 'ERROR',\n 'propagate': True,\n },\n }\n}\n\n# Django registration:\nACCOUNT_ACTIVATION_DAYS = 7\n\nAUTH_USER_MODEL = 'auth.User'\n\n# Forms:\nCRISPY_TEMPLATE_PACK = 'bootstrap3'\n\n# Wafer cache settings\n# We assume that the WAFER_CACHE is cross-process\nWAFER_CACHE = 'wafer_cache'\nCACHES = {\n 'default': {\n 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',\n },\n WAFER_CACHE: {\n 'BACKEND': 'django.core.cache.backends.db.DatabaseCache',\n 'LOCATION': 'wafer_cache_table',\n },\n}\n\n\n# Wafer menu settings\n\nWAFER_MENUS = ()\n# Example menus entries:\n#\n# {\"label\": _(\"Home\"),\n# \"url\": '/'},\n# {\"menu\": \"sponsors\",\n# \"label\": _(\"Sponsors\"),\n# \"items\": [\n# {\"name\": \"sponsors\", \"label\": _(\"Our sponsors\"),\n# \"url\": reverse_lazy(\"wafer_sponsors\")},\n# {\"name\": \"packages\", \"label\": _(\"Sponsorship packages\"),\n# \"url\": reverse_lazy(\"wafer_sponsorship_packages\")},\n# ]},\n# {\"label\": _(\"Talks\"),\n# \"url\": reverse_lazy(\"wafer_users_talks\")},\n\nWAFER_DYNAMIC_MENUS = (\n 'wafer.pages.models.page_menus',\n)\n\n# Enabled SSO mechanims:\nWAFER_SSO = (\n # 'github',\n # 'debian',\n)\n\n# Log in with GitHub:\n# WAFER_GITHUB_CLIENT_ID = 'register on github'\n# WAFER_GITHUB_CLIENT_SECRET = 'to get these secrets'\n\n# Log in with Debian SSO:\n# Requires some Apache config:\n# SSLCACertificateFile /srv/sso.debian.org/etc/debsso.crt\n# SSLCARevocationCheck chain\n# SSLCARevocationFile /srv/sso.debian.org/etc/debsso.crl\n# <Location /accounts/debian-login/>\n# SSLOptions +StdEnvVars\n# SSLVerifyClient optional\n# </Location>\n# WAFER_DEBIAN_NM_API_KEY = 'obtain one from https://nm.debian.org/apikeys/'\n\n# Set this to true to disable the login button on the navigation toolbar\nWAFER_HIDE_LOGIN = False\n\n# Talk submissions open\n# Set this to False to disable talk submissions\nWAFER_TALKS_OPEN = True\n\n# The form used for talk submission\nWAFER_TALK_FORM = 'wafer.talks.forms.TalkForm'\n\n# Set this to False to disable registration\nWAFER_REGISTRATION_OPEN = True\n# Can be 'ticket' for Quicket tickets or 'form' for a classic form\nWAFER_REGISTRATION_MODE = 'ticket'\n\n# For REGISTRATION_MODE == 'form', the form to present\nWAFER_REGISTRATION_FORM = 'wafer.users.forms.ExampleRegistrationForm'\n\n# Allow registered and anonymous users to see registered users\nWAFER_PUBLIC_ATTENDEE_LIST = True\n\n# Ticket registration with Quicket\n# WAFER_TICKET_SECRET = \"i'm a shared secret\"\n\n# django_medusa -- disk-based renderer\nMEDUSA_RENDERER_CLASS = \"wafer.management.static.WaferDiskStaticSiteRenderer\"\nMEDUSA_DEPLOY_DIR = os.path.join(project_root, 'static_mirror')\nMARKITUP_FILTER = ('markdown.markdown', {'safe_mode': True})\n\nREST_FRAMEWORK = {\n 'DEFAULT_PERMISSION_CLASSES': ('rest_framework.permissions.IsAdminUser',),\n 'PAGE_SIZE': 50\n}\n", "path": "wafer/settings.py"}]} | 3,381 | 174 |
gh_patches_debug_33924 | rasdani/github-patches | git_diff | PrefectHQ__prefect-710 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Context docs are broken
For some reason the actual `context` class signature is not being documented.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/prefect/utilities/context.py`
Content:
```
1 # Licensed under LICENSE.md; also available at https://www.prefect.io/licenses/beta-eula
2
3 """
4 This module implements the Prefect context that is available when tasks run.
5
6 Tasks can import prefect.context and access attributes that will be overwritten
7 when the task is run.
8
9 Example:
10
11 ```python
12 import prefect.context
13 with prefect.context(a=1, b=2):
14 print(prefect.context.a) # 1
15 print (prefect.context.a) # undefined
16 ```
17
18 Prefect provides various key / value pairs in context that are always available during task runs:
19
20 | Variable | Description |
21 | :--- | --- |
22 | `scheduled_start_time` | an actual datetime object representing the scheduled start time for the Flow run; falls back to `now` for unscheduled runs |
23 | `date` | an actual datetime object representing the current time |
24 | `today` | the current date formatted as `YYYY-MM-DD`|
25 | `today_nodash` | the current date formatted as `YYYYMMDD`|
26 | `yesterday` | yesterday's date formatted as `YYYY-MM-DD`|
27 | `yesterday_nodash` | yesterday's date formatted as `YYYYMMDD`|
28 | `tomorrow` | tomorrow's date formatted as `YYYY-MM-DD`|
29 | `tomorrow_nodash` | tomorrow's date formatted as `YYYYMMDD`|
30 | `task_name` | the name of the current task |
31 """
32
33 import contextlib
34 import threading
35 from typing import Any, Iterator, MutableMapping
36
37 from prefect.configuration import config
38 from prefect.utilities.collections import DotDict
39
40
41 class Context(DotDict, threading.local):
42 """
43 A thread safe context store for Prefect data.
44
45 The `Context` is a `DotDict` subclass, and can be instantiated the same way.
46
47 Args:
48 - *args (Any): arguments to provide to the `DotDict` constructor (e.g.,
49 an initial dictionary)
50 - *kwargs (Any): any key / value pairs to initialize this context with
51 """
52
53 def __init__(self, *args, **kwargs) -> None:
54 super().__init__(*args, **kwargs)
55 if "context" in config:
56 self.update(config.context)
57
58 def __repr__(self) -> str:
59 return "<Context>"
60
61 @contextlib.contextmanager
62 def __call__(self, *args: MutableMapping, **kwargs: Any) -> Iterator["Context"]:
63 """
64 A context manager for setting / resetting the Prefect context
65
66 Example:
67 import prefect.context
68 with prefect.context(dict(a=1, b=2), c=3):
69 print(prefect.context.a) # 1
70 """
71 previous_context = self.copy()
72 try:
73 self.update(*args, **kwargs)
74 yield self
75 finally:
76 self.clear()
77 self.update(previous_context)
78
79
80 context = Context()
81
```
Path: `src/prefect/tasks/templates/jinja2.py`
Content:
```
1 # Licensed under LICENSE.md; also available at https://www.prefect.io/licenses/beta-eula
2
3 from typing import Any
4
5 from jinja2 import Template
6
7 import prefect
8 from prefect import Task
9
10
11 class JinjaTemplateTask(Task):
12 """
13 This task contains a Jinja template which is formatted with the results of any
14 upstream tasks and returned.
15
16 Variables from `prefect.context` will also be used for rendering.
17
18 Args:
19 - template (str, optional): the optional _default_ template string to render at runtime;
20 can also be provided as a keyword to `run`, which takes precendence over this default.
21 - **kwargs (optional): additional keyword arguments to pass to the
22 standard Task constructor
23 """
24
25 def __init__(self, template: str = None, **kwargs: Any):
26 self.template = Template(template or "")
27 super().__init__(**kwargs)
28
29 def run(self, template: str = None, **format_kwargs: Any) -> str: # type: ignore
30 """
31 Formats the Jinja Template with the provided kwargs.
32
33 Args:
34 - template (str, optional): the template string to render; if not
35 provided, `self.template` will be used
36 - **format_kwargs (optional): keyword arguments to use for
37 rendering; note that variables from `prefect.context` will also be used
38
39 Returns:
40 - str: the rendered string
41 """
42 template = self.template if template is None else Template(template)
43 with prefect.context(**format_kwargs) as data:
44 return template.render(**data)
45
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/prefect/tasks/templates/jinja2.py b/src/prefect/tasks/templates/jinja2.py
--- a/src/prefect/tasks/templates/jinja2.py
+++ b/src/prefect/tasks/templates/jinja2.py
@@ -6,6 +6,7 @@
import prefect
from prefect import Task
+from prefect.utilities.tasks import defaults_from_attrs
class JinjaTemplateTask(Task):
@@ -23,9 +24,10 @@
"""
def __init__(self, template: str = None, **kwargs: Any):
- self.template = Template(template or "")
+ self.template = template or ""
super().__init__(**kwargs)
+ @defaults_from_attrs("template")
def run(self, template: str = None, **format_kwargs: Any) -> str: # type: ignore
"""
Formats the Jinja Template with the provided kwargs.
@@ -39,6 +41,6 @@
Returns:
- str: the rendered string
"""
- template = self.template if template is None else Template(template)
+ template = Template(template)
with prefect.context(**format_kwargs) as data:
return template.render(**data)
diff --git a/src/prefect/utilities/context.py b/src/prefect/utilities/context.py
--- a/src/prefect/utilities/context.py
+++ b/src/prefect/utilities/context.py
@@ -10,9 +10,11 @@
```python
import prefect.context
+
with prefect.context(a=1, b=2):
print(prefect.context.a) # 1
-print (prefect.context.a) # undefined
+
+print(prefect.context.a) # undefined
```
Prefect provides various key / value pairs in context that are always available during task runs:
@@ -28,6 +30,8 @@
| `tomorrow` | tomorrow's date formatted as `YYYY-MM-DD`|
| `tomorrow_nodash` | tomorrow's date formatted as `YYYYMMDD`|
| `task_name` | the name of the current task |
+
+Users can also provide values to context at runtime.
"""
import contextlib
| {"golden_diff": "diff --git a/src/prefect/tasks/templates/jinja2.py b/src/prefect/tasks/templates/jinja2.py\n--- a/src/prefect/tasks/templates/jinja2.py\n+++ b/src/prefect/tasks/templates/jinja2.py\n@@ -6,6 +6,7 @@\n \n import prefect\n from prefect import Task\n+from prefect.utilities.tasks import defaults_from_attrs\n \n \n class JinjaTemplateTask(Task):\n@@ -23,9 +24,10 @@\n \"\"\"\n \n def __init__(self, template: str = None, **kwargs: Any):\n- self.template = Template(template or \"\")\n+ self.template = template or \"\"\n super().__init__(**kwargs)\n \n+ @defaults_from_attrs(\"template\")\n def run(self, template: str = None, **format_kwargs: Any) -> str: # type: ignore\n \"\"\"\n Formats the Jinja Template with the provided kwargs.\n@@ -39,6 +41,6 @@\n Returns:\n - str: the rendered string\n \"\"\"\n- template = self.template if template is None else Template(template)\n+ template = Template(template)\n with prefect.context(**format_kwargs) as data:\n return template.render(**data)\ndiff --git a/src/prefect/utilities/context.py b/src/prefect/utilities/context.py\n--- a/src/prefect/utilities/context.py\n+++ b/src/prefect/utilities/context.py\n@@ -10,9 +10,11 @@\n \n ```python\n import prefect.context\n+\n with prefect.context(a=1, b=2):\n print(prefect.context.a) # 1\n-print (prefect.context.a) # undefined\n+\n+print(prefect.context.a) # undefined\n ```\n \n Prefect provides various key / value pairs in context that are always available during task runs:\n@@ -28,6 +30,8 @@\n | `tomorrow` | tomorrow's date formatted as `YYYY-MM-DD`|\n | `tomorrow_nodash` | tomorrow's date formatted as `YYYYMMDD`|\n | `task_name` | the name of the current task |\n+\n+Users can also provide values to context at runtime.\n \"\"\"\n \n import contextlib\n", "issue": "Context docs are broken\nFor some reason the actual `context` class signature is not being documented.\n", "before_files": [{"content": "# Licensed under LICENSE.md; also available at https://www.prefect.io/licenses/beta-eula\n\n\"\"\"\nThis module implements the Prefect context that is available when tasks run.\n\nTasks can import prefect.context and access attributes that will be overwritten\nwhen the task is run.\n\nExample:\n\n```python\nimport prefect.context\nwith prefect.context(a=1, b=2):\n print(prefect.context.a) # 1\nprint (prefect.context.a) # undefined\n```\n\nPrefect provides various key / value pairs in context that are always available during task runs:\n\n| Variable | Description |\n| :--- | --- |\n| `scheduled_start_time` | an actual datetime object representing the scheduled start time for the Flow run; falls back to `now` for unscheduled runs |\n| `date` | an actual datetime object representing the current time |\n| `today` | the current date formatted as `YYYY-MM-DD`|\n| `today_nodash` | the current date formatted as `YYYYMMDD`|\n| `yesterday` | yesterday's date formatted as `YYYY-MM-DD`|\n| `yesterday_nodash` | yesterday's date formatted as `YYYYMMDD`|\n| `tomorrow` | tomorrow's date formatted as `YYYY-MM-DD`|\n| `tomorrow_nodash` | tomorrow's date formatted as `YYYYMMDD`|\n| `task_name` | the name of the current task |\n\"\"\"\n\nimport contextlib\nimport threading\nfrom typing import Any, Iterator, MutableMapping\n\nfrom prefect.configuration import config\nfrom prefect.utilities.collections import DotDict\n\n\nclass Context(DotDict, threading.local):\n \"\"\"\n A thread safe context store for Prefect data.\n\n The `Context` is a `DotDict` subclass, and can be instantiated the same way.\n\n Args:\n - *args (Any): arguments to provide to the `DotDict` constructor (e.g.,\n an initial dictionary)\n - *kwargs (Any): any key / value pairs to initialize this context with\n \"\"\"\n\n def __init__(self, *args, **kwargs) -> None:\n super().__init__(*args, **kwargs)\n if \"context\" in config:\n self.update(config.context)\n\n def __repr__(self) -> str:\n return \"<Context>\"\n\n @contextlib.contextmanager\n def __call__(self, *args: MutableMapping, **kwargs: Any) -> Iterator[\"Context\"]:\n \"\"\"\n A context manager for setting / resetting the Prefect context\n\n Example:\n import prefect.context\n with prefect.context(dict(a=1, b=2), c=3):\n print(prefect.context.a) # 1\n \"\"\"\n previous_context = self.copy()\n try:\n self.update(*args, **kwargs)\n yield self\n finally:\n self.clear()\n self.update(previous_context)\n\n\ncontext = Context()\n", "path": "src/prefect/utilities/context.py"}, {"content": "# Licensed under LICENSE.md; also available at https://www.prefect.io/licenses/beta-eula\n\nfrom typing import Any\n\nfrom jinja2 import Template\n\nimport prefect\nfrom prefect import Task\n\n\nclass JinjaTemplateTask(Task):\n \"\"\"\n This task contains a Jinja template which is formatted with the results of any\n upstream tasks and returned.\n\n Variables from `prefect.context` will also be used for rendering.\n\n Args:\n - template (str, optional): the optional _default_ template string to render at runtime;\n can also be provided as a keyword to `run`, which takes precendence over this default.\n - **kwargs (optional): additional keyword arguments to pass to the\n standard Task constructor\n \"\"\"\n\n def __init__(self, template: str = None, **kwargs: Any):\n self.template = Template(template or \"\")\n super().__init__(**kwargs)\n\n def run(self, template: str = None, **format_kwargs: Any) -> str: # type: ignore\n \"\"\"\n Formats the Jinja Template with the provided kwargs.\n\n Args:\n - template (str, optional): the template string to render; if not\n provided, `self.template` will be used\n - **format_kwargs (optional): keyword arguments to use for\n rendering; note that variables from `prefect.context` will also be used\n\n Returns:\n - str: the rendered string\n \"\"\"\n template = self.template if template is None else Template(template)\n with prefect.context(**format_kwargs) as data:\n return template.render(**data)\n", "path": "src/prefect/tasks/templates/jinja2.py"}], "after_files": [{"content": "# Licensed under LICENSE.md; also available at https://www.prefect.io/licenses/beta-eula\n\n\"\"\"\nThis module implements the Prefect context that is available when tasks run.\n\nTasks can import prefect.context and access attributes that will be overwritten\nwhen the task is run.\n\nExample:\n\n```python\nimport prefect.context\n\nwith prefect.context(a=1, b=2):\n print(prefect.context.a) # 1\n\nprint(prefect.context.a) # undefined\n```\n\nPrefect provides various key / value pairs in context that are always available during task runs:\n\n| Variable | Description |\n| :--- | --- |\n| `scheduled_start_time` | an actual datetime object representing the scheduled start time for the Flow run; falls back to `now` for unscheduled runs |\n| `date` | an actual datetime object representing the current time |\n| `today` | the current date formatted as `YYYY-MM-DD`|\n| `today_nodash` | the current date formatted as `YYYYMMDD`|\n| `yesterday` | yesterday's date formatted as `YYYY-MM-DD`|\n| `yesterday_nodash` | yesterday's date formatted as `YYYYMMDD`|\n| `tomorrow` | tomorrow's date formatted as `YYYY-MM-DD`|\n| `tomorrow_nodash` | tomorrow's date formatted as `YYYYMMDD`|\n| `task_name` | the name of the current task |\n\nUsers can also provide values to context at runtime.\n\"\"\"\n\nimport contextlib\nimport threading\nfrom typing import Any, Iterator, MutableMapping\n\nfrom prefect.configuration import config\nfrom prefect.utilities.collections import DotDict\n\n\nclass Context(DotDict, threading.local):\n \"\"\"\n A thread safe context store for Prefect data.\n\n The `Context` is a `DotDict` subclass, and can be instantiated the same way.\n\n Args:\n - *args (Any): arguments to provide to the `DotDict` constructor (e.g.,\n an initial dictionary)\n - *kwargs (Any): any key / value pairs to initialize this context with\n \"\"\"\n\n def __init__(self, *args, **kwargs) -> None:\n super().__init__(*args, **kwargs)\n if \"context\" in config:\n self.update(config.context)\n\n def __repr__(self) -> str:\n return \"<Context>\"\n\n @contextlib.contextmanager\n def __call__(self, *args: MutableMapping, **kwargs: Any) -> Iterator[\"Context\"]:\n \"\"\"\n A context manager for setting / resetting the Prefect context\n\n Example:\n import prefect.context\n with prefect.context(dict(a=1, b=2), c=3):\n print(prefect.context.a) # 1\n \"\"\"\n previous_context = self.copy()\n try:\n self.update(*args, **kwargs)\n yield self\n finally:\n self.clear()\n self.update(previous_context)\n\n\ncontext = Context()\n", "path": "src/prefect/utilities/context.py"}, {"content": "# Licensed under LICENSE.md; also available at https://www.prefect.io/licenses/beta-eula\n\nfrom typing import Any\n\nfrom jinja2 import Template\n\nimport prefect\nfrom prefect import Task\nfrom prefect.utilities.tasks import defaults_from_attrs\n\n\nclass JinjaTemplateTask(Task):\n \"\"\"\n This task contains a Jinja template which is formatted with the results of any\n upstream tasks and returned.\n\n Variables from `prefect.context` will also be used for rendering.\n\n Args:\n - template (str, optional): the optional _default_ template string to render at runtime;\n can also be provided as a keyword to `run`, which takes precendence over this default.\n - **kwargs (optional): additional keyword arguments to pass to the\n standard Task constructor\n \"\"\"\n\n def __init__(self, template: str = None, **kwargs: Any):\n self.template = template or \"\"\n super().__init__(**kwargs)\n\n @defaults_from_attrs(\"template\")\n def run(self, template: str = None, **format_kwargs: Any) -> str: # type: ignore\n \"\"\"\n Formats the Jinja Template with the provided kwargs.\n\n Args:\n - template (str, optional): the template string to render; if not\n provided, `self.template` will be used\n - **format_kwargs (optional): keyword arguments to use for\n rendering; note that variables from `prefect.context` will also be used\n\n Returns:\n - str: the rendered string\n \"\"\"\n template = Template(template)\n with prefect.context(**format_kwargs) as data:\n return template.render(**data)\n", "path": "src/prefect/tasks/templates/jinja2.py"}]} | 1,492 | 469 |
gh_patches_debug_10742 | rasdani/github-patches | git_diff | electricitymaps__electricitymaps-contrib-1238 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
United States NY ISO calculation on Dual Fuel is probably wrong
Just found your site today, really cool. I'd actually done a tech demo with streaming data from the nyiso as part of an MQTT / Kubernetes talk. Demo site at http://ny-power.org.
The Dual Fuel column is getting mapped to pretty dirty fossil fuel systems, which I think is probably inaccurate. My understanding of the dual fuel plants is they mostly burn Natural Gas, but can burn Oil instead when Natural Gas is constrained and targeted for heating (typically during peak heating demand in the winter). I looked up the total Oil burned for electricity generation in 2016 (the last year that numbers are available), and it was actually really low. So when I was simulating it I gave it a kind of 30% Oil / 70% NG number as an approximation. It's probably as good a guess as one can get. But the worse than coal numbers are I think pretty far off.
Is there a way in the system to provide a custom value, or is it just mapping back to the IPCC averaged numbers?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `parsers/US_NY.py`
Content:
```
1 #!/usr/bin/env python3
2
3 """Real time parser for the state of New York."""
4 from collections import defaultdict
5 from datetime import timedelta
6 from operator import itemgetter
7 from urllib.error import HTTPError
8
9 import arrow
10 import pandas as pd
11
12 mapping = {
13 'Dual Fuel': 'unknown',
14 'Natural Gas': 'gas',
15 'Nuclear': 'nuclear',
16 'Other Fossil Fuels': 'unknown',
17 'Other Renewables': 'unknown',
18 'Wind': 'wind',
19 'Hydro': 'hydro'
20 }
21
22
23 def read_csv_data(url):
24 """
25 Gets csv data from a url and returns a dataframe.
26 """
27
28 csv_data = pd.read_csv(url)
29
30 return csv_data
31
32
33 def timestamp_converter(timestamp_string):
34 """
35 Converts timestamps in nyiso data into aware datetime objects.
36 """
37
38 dt_naive = arrow.get(timestamp_string, 'MM/DD/YYYY HH:mm:ss')
39 dt_aware = dt_naive.replace(tzinfo='America/New_York').datetime
40
41 return dt_aware
42
43
44 def data_parser(df):
45 """
46 Takes dataframe and loops over rows to form dictionaries consisting of datetime and generation type.
47 Merges these dictionaries using datetime key.
48 Maps to type and returns a list of tuples containing datetime string and production.
49 """
50
51 chunks = []
52 for row in df.itertuples():
53 piece = {}
54 piece['datetime'] = row[1]
55 piece[row[3]] = row[4]
56 chunks.append(piece)
57
58 # Join dicts on shared 'datetime' keys.
59 combine = defaultdict(dict)
60 for elem in chunks:
61 combine[elem['datetime']].update(elem)
62
63 ordered = sorted(combine.values(), key=itemgetter("datetime"))
64
65 mapped_generation = []
66 for item in ordered:
67 mapped_types = [(mapping.get(k, k), v) for k, v in item.items()]
68
69 # Need to avoid multiple 'unknown' keys overwriting.
70 complete_production = defaultdict(lambda: 0.0)
71 for key, val in mapped_types:
72 try:
73 complete_production[key] += val
74 except TypeError:
75 # Datetime is a string at this point!
76 complete_production[key] = val
77
78 dt = complete_production.pop('datetime')
79 final = (dt, dict(complete_production))
80 mapped_generation.append(final)
81
82 return mapped_generation
83
84
85 def fetch_production(zone_key='US-NY', session=None, target_datetime=None, logger=None):
86 """
87 Requests the last known production mix (in MW) of a given zone
88
89 Arguments:
90 zone_key: used in case a parser is able to fetch multiple zones
91 session: requests session passed in order to re-use an existing session,
92 not used here due to difficulty providing it to pandas
93 target_datetime: the datetime for which we want production data. If not provided, we should
94 default it to now. The provided target_datetime is timezone-aware in UTC.
95 logger: an instance of a `logging.Logger`; all raised exceptions are also logged automatically
96
97 Return:
98 A list of dictionaries in the form:
99 {
100 'zoneKey': 'FR',
101 'datetime': '2017-01-01T00:00:00Z',
102 'production': {
103 'biomass': 0.0,
104 'coal': 0.0,
105 'gas': 0.0,
106 'hydro': 0.0,
107 'nuclear': null,
108 'oil': 0.0,
109 'solar': 0.0,
110 'wind': 0.0,
111 'geothermal': 0.0,
112 'unknown': 0.0
113 },
114 'storage': {
115 'hydro': -10.0,
116 },
117 'source': 'mysource.com'
118 }
119 """
120 if target_datetime:
121 # ensure we have an arrow object
122 target_datetime = arrow.get(target_datetime)
123 else:
124 target_datetime = arrow.now('America/New_York')
125
126 ny_date = target_datetime.format('YYYYMMDD')
127 mix_url = 'http://mis.nyiso.com/public/csv/rtfuelmix/{}rtfuelmix.csv'.format(ny_date)
128 try:
129 raw_data = read_csv_data(mix_url)
130 except HTTPError:
131 # this can happen when target_datetime has no data available
132 return None
133
134 clean_data = data_parser(raw_data)
135
136 production_mix = []
137 for datapoint in clean_data:
138 data = {
139 'zoneKey': zone_key,
140 'datetime': timestamp_converter(datapoint[0]),
141 'production': datapoint[1],
142 'storage': {'hydro': None},
143 'source': 'nyiso.com'
144 }
145
146 production_mix.append(data)
147
148 return production_mix
149
150
151 def fetch_exchange(zone_key1, zone_key2, session=None, target_datetime=None, logger=None):
152 """Requests the last known power exchange (in MW) between two zones
153
154 Arguments:
155 zone_key1, zone_key2: specifies which exchange to get
156 session: requests session passed in order to re-use an existing session,
157 not used here due to difficulty providing it to pandas
158 target_datetime: the datetime for which we want production data. If not provided, we should
159 default it to now. The provided target_datetime is timezone-aware in UTC.
160 logger: an instance of a `logging.Logger`; all raised exceptions are also logged automatically
161
162 Return:
163 A list of dictionaries in the form:
164 {
165 'sortedZoneKeys': 'DK->NO',
166 'datetime': '2017-01-01T00:00:00Z',
167 'netFlow': 0.0,
168 'source': 'mysource.com'
169 }
170 where net flow is from DK into NO
171 """
172 url = 'http://mis.nyiso.com/public/csv/ExternalLimitsFlows/{}ExternalLimitsFlows.csv'
173
174 sorted_zone_keys = '->'.join(sorted([zone_key1, zone_key2]))
175
176 # In the source CSV, positive is flow into NY, negative is flow out of NY.
177 # In Electricity Map, A->B means flow to B is positive.
178 if sorted_zone_keys == 'US-NEISO->US-NY':
179 direction = 1
180 relevant_exchanges = ['SCH - NE - NY', 'SCH - NPX_1385', 'SCH - NPX_CSC']
181 elif sorted_zone_keys == 'US-NY->US-PJM':
182 direction = -1
183 relevant_exchanges = ['SCH - PJ - NY', 'SCH - PJM_HTP', 'SCH - PJM_NEPTUNE', 'SCH - PJM_VFT']
184 elif sorted_zone_keys == 'CA-ON->US-NY':
185 direction = 1
186 relevant_exchanges = ['SCH - OH - NY']
187 elif sorted_zone_keys == 'CA-QC->US-NY':
188 direction = 1
189 relevant_exchanges = ['SCH - HQ_CEDARS', 'SCH - HQ - NY']
190 else:
191 raise NotImplementedError('Exchange pair not supported: {}'.format(sorted_zone_keys))
192
193 if target_datetime:
194 # ensure we have an arrow object
195 target_datetime = arrow.get(target_datetime)
196 else:
197 target_datetime = arrow.now('America/New_York')
198 ny_date = target_datetime.format('YYYYMMDD')
199 exchange_url = url.format(ny_date)
200
201 try:
202 exchange_data = read_csv_data(exchange_url)
203 except HTTPError:
204 # this can happen when target_datetime has no data available
205 return None
206
207 new_england_exs = exchange_data.loc[exchange_data['Interface Name'].isin(relevant_exchanges)]
208 consolidated_flows = new_england_exs.reset_index().groupby("Timestamp").sum()
209
210 now = arrow.utcnow()
211
212 exchange_5min = []
213 for row in consolidated_flows.itertuples():
214 flow = float(row[3]) * direction
215 # Timestamp for exchange does not include seconds.
216 dt = timestamp_converter(row[0] + ':00')
217
218 if (dt > now) and ((dt - now) < timedelta(seconds=300)):
219 # NYISO exchanges CSV (and only the exchanges CSV) includes data
220 # up to 5 minutes in the future (but only 5 minutes in the future).
221 # This also happens on their official website.
222 # Electricity Map raises error with data in the future, so skip
223 # that datapoint. If it's more than 5 minutes in the future,
224 # it's weird/unexpected and thus worthy of failure and logging.
225 continue
226
227 exchange = {
228 'sortedZoneKeys': sorted_zone_keys,
229 'datetime': dt,
230 'netFlow': flow,
231 'source': 'nyiso.com'
232 }
233
234 exchange_5min.append(exchange)
235
236 return exchange_5min
237
238
239 if __name__ == '__main__':
240 """Main method, never used by the Electricity Map backend, but handy for testing."""
241
242 from pprint import pprint
243 print('fetch_production() ->')
244 pprint(fetch_production())
245
246 print('fetch_production(target_datetime=arrow.get("2018-03-13T12:00Z") ->')
247 pprint(fetch_production(target_datetime=arrow.get("2018-03-13T12:00Z")))
248
249 print('fetch_production(target_datetime=arrow.get("2007-03-13T12:00Z") ->')
250 pprint(fetch_production(target_datetime=arrow.get("2007-03-13T12:00Z")))
251
252 print('fetch_exchange(US-NY, US-NEISO)')
253 pprint(fetch_exchange('US-NY', 'US-NEISO'))
254
255 print('fetch_exchange("US-NY", "CA-QC")')
256 pprint(fetch_exchange('US-NY', 'CA-QC'))
257
258 print('fetch_exchange("US-NY", "CA-QC", target_datetime=arrow.get("2018-03-13T12:00Z"))')
259 pprint(fetch_exchange('US-NY', 'CA-QC', target_datetime=arrow.get("2018-03-13T12:00Z")))
260
261 print('fetch_exchange("US-NY", "CA-QC", target_datetime=arrow.get("2007-03-13T12:00Z")))')
262 pprint(fetch_exchange('US-NY', 'CA-QC', target_datetime=arrow.get('2007-03-13T12:00Z')))
263
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/parsers/US_NY.py b/parsers/US_NY.py
--- a/parsers/US_NY.py
+++ b/parsers/US_NY.py
@@ -9,8 +9,15 @@
import arrow
import pandas as pd
+# Dual Fuel systems can run either Natural Gas or Oil, they represent
+# significantly more capacity in NY State than plants that can only
+# burn Natural Gas. When looking up fuel usage for NY in 2016 in
+# https://www.eia.gov/electricity/data/state/annual_generation_state.xls
+# 100 times more energy came from NG than Oil. That means Oil
+# consumption in the Dual Fuel systems is roughly ~1%, and to a first
+# approximation it's just Natural Gas.
mapping = {
- 'Dual Fuel': 'unknown',
+ 'Dual Fuel': 'gas',
'Natural Gas': 'gas',
'Nuclear': 'nuclear',
'Other Fossil Fuels': 'unknown',
| {"golden_diff": "diff --git a/parsers/US_NY.py b/parsers/US_NY.py\n--- a/parsers/US_NY.py\n+++ b/parsers/US_NY.py\n@@ -9,8 +9,15 @@\n import arrow\n import pandas as pd\n \n+# Dual Fuel systems can run either Natural Gas or Oil, they represent\n+# significantly more capacity in NY State than plants that can only\n+# burn Natural Gas. When looking up fuel usage for NY in 2016 in\n+# https://www.eia.gov/electricity/data/state/annual_generation_state.xls\n+# 100 times more energy came from NG than Oil. That means Oil\n+# consumption in the Dual Fuel systems is roughly ~1%, and to a first\n+# approximation it's just Natural Gas.\n mapping = {\n- 'Dual Fuel': 'unknown',\n+ 'Dual Fuel': 'gas',\n 'Natural Gas': 'gas',\n 'Nuclear': 'nuclear',\n 'Other Fossil Fuels': 'unknown',\n", "issue": "United States NY ISO calculation on Dual Fuel is probably wrong\nJust found your site today, really cool. I'd actually done a tech demo with streaming data from the nyiso as part of an MQTT / Kubernetes talk. Demo site at http://ny-power.org.\r\n\r\nThe Dual Fuel column is getting mapped to pretty dirty fossil fuel systems, which I think is probably inaccurate. My understanding of the dual fuel plants is they mostly burn Natural Gas, but can burn Oil instead when Natural Gas is constrained and targeted for heating (typically during peak heating demand in the winter). I looked up the total Oil burned for electricity generation in 2016 (the last year that numbers are available), and it was actually really low. So when I was simulating it I gave it a kind of 30% Oil / 70% NG number as an approximation. It's probably as good a guess as one can get. But the worse than coal numbers are I think pretty far off.\r\n\r\nIs there a way in the system to provide a custom value, or is it just mapping back to the IPCC averaged numbers? \n", "before_files": [{"content": "#!/usr/bin/env python3\n\n\"\"\"Real time parser for the state of New York.\"\"\"\nfrom collections import defaultdict\nfrom datetime import timedelta\nfrom operator import itemgetter\nfrom urllib.error import HTTPError\n\nimport arrow\nimport pandas as pd\n\nmapping = {\n 'Dual Fuel': 'unknown',\n 'Natural Gas': 'gas',\n 'Nuclear': 'nuclear',\n 'Other Fossil Fuels': 'unknown',\n 'Other Renewables': 'unknown',\n 'Wind': 'wind',\n 'Hydro': 'hydro'\n}\n\n\ndef read_csv_data(url):\n \"\"\"\n Gets csv data from a url and returns a dataframe.\n \"\"\"\n\n csv_data = pd.read_csv(url)\n\n return csv_data\n\n\ndef timestamp_converter(timestamp_string):\n \"\"\"\n Converts timestamps in nyiso data into aware datetime objects.\n \"\"\"\n\n dt_naive = arrow.get(timestamp_string, 'MM/DD/YYYY HH:mm:ss')\n dt_aware = dt_naive.replace(tzinfo='America/New_York').datetime\n\n return dt_aware\n\n\ndef data_parser(df):\n \"\"\"\n Takes dataframe and loops over rows to form dictionaries consisting of datetime and generation type.\n Merges these dictionaries using datetime key.\n Maps to type and returns a list of tuples containing datetime string and production.\n \"\"\"\n\n chunks = []\n for row in df.itertuples():\n piece = {}\n piece['datetime'] = row[1]\n piece[row[3]] = row[4]\n chunks.append(piece)\n\n # Join dicts on shared 'datetime' keys.\n combine = defaultdict(dict)\n for elem in chunks:\n combine[elem['datetime']].update(elem)\n\n ordered = sorted(combine.values(), key=itemgetter(\"datetime\"))\n\n mapped_generation = []\n for item in ordered:\n mapped_types = [(mapping.get(k, k), v) for k, v in item.items()]\n\n # Need to avoid multiple 'unknown' keys overwriting.\n complete_production = defaultdict(lambda: 0.0)\n for key, val in mapped_types:\n try:\n complete_production[key] += val\n except TypeError:\n # Datetime is a string at this point!\n complete_production[key] = val\n\n dt = complete_production.pop('datetime')\n final = (dt, dict(complete_production))\n mapped_generation.append(final)\n\n return mapped_generation\n\n\ndef fetch_production(zone_key='US-NY', session=None, target_datetime=None, logger=None):\n \"\"\"\n Requests the last known production mix (in MW) of a given zone\n\n Arguments:\n zone_key: used in case a parser is able to fetch multiple zones\n session: requests session passed in order to re-use an existing session,\n not used here due to difficulty providing it to pandas\n target_datetime: the datetime for which we want production data. If not provided, we should\n default it to now. The provided target_datetime is timezone-aware in UTC.\n logger: an instance of a `logging.Logger`; all raised exceptions are also logged automatically\n\n Return:\n A list of dictionaries in the form:\n {\n 'zoneKey': 'FR',\n 'datetime': '2017-01-01T00:00:00Z',\n 'production': {\n 'biomass': 0.0,\n 'coal': 0.0,\n 'gas': 0.0,\n 'hydro': 0.0,\n 'nuclear': null,\n 'oil': 0.0,\n 'solar': 0.0,\n 'wind': 0.0,\n 'geothermal': 0.0,\n 'unknown': 0.0\n },\n 'storage': {\n 'hydro': -10.0,\n },\n 'source': 'mysource.com'\n }\n \"\"\"\n if target_datetime:\n # ensure we have an arrow object\n target_datetime = arrow.get(target_datetime)\n else:\n target_datetime = arrow.now('America/New_York')\n\n ny_date = target_datetime.format('YYYYMMDD')\n mix_url = 'http://mis.nyiso.com/public/csv/rtfuelmix/{}rtfuelmix.csv'.format(ny_date)\n try:\n raw_data = read_csv_data(mix_url)\n except HTTPError:\n # this can happen when target_datetime has no data available\n return None\n\n clean_data = data_parser(raw_data)\n\n production_mix = []\n for datapoint in clean_data:\n data = {\n 'zoneKey': zone_key,\n 'datetime': timestamp_converter(datapoint[0]),\n 'production': datapoint[1],\n 'storage': {'hydro': None},\n 'source': 'nyiso.com'\n }\n\n production_mix.append(data)\n\n return production_mix\n\n\ndef fetch_exchange(zone_key1, zone_key2, session=None, target_datetime=None, logger=None):\n \"\"\"Requests the last known power exchange (in MW) between two zones\n\n Arguments:\n zone_key1, zone_key2: specifies which exchange to get\n session: requests session passed in order to re-use an existing session,\n not used here due to difficulty providing it to pandas\n target_datetime: the datetime for which we want production data. If not provided, we should\n default it to now. The provided target_datetime is timezone-aware in UTC.\n logger: an instance of a `logging.Logger`; all raised exceptions are also logged automatically\n\n Return:\n A list of dictionaries in the form:\n {\n 'sortedZoneKeys': 'DK->NO',\n 'datetime': '2017-01-01T00:00:00Z',\n 'netFlow': 0.0,\n 'source': 'mysource.com'\n }\n where net flow is from DK into NO\n \"\"\"\n url = 'http://mis.nyiso.com/public/csv/ExternalLimitsFlows/{}ExternalLimitsFlows.csv'\n\n sorted_zone_keys = '->'.join(sorted([zone_key1, zone_key2]))\n\n # In the source CSV, positive is flow into NY, negative is flow out of NY.\n # In Electricity Map, A->B means flow to B is positive.\n if sorted_zone_keys == 'US-NEISO->US-NY':\n direction = 1\n relevant_exchanges = ['SCH - NE - NY', 'SCH - NPX_1385', 'SCH - NPX_CSC']\n elif sorted_zone_keys == 'US-NY->US-PJM':\n direction = -1\n relevant_exchanges = ['SCH - PJ - NY', 'SCH - PJM_HTP', 'SCH - PJM_NEPTUNE', 'SCH - PJM_VFT']\n elif sorted_zone_keys == 'CA-ON->US-NY':\n direction = 1\n relevant_exchanges = ['SCH - OH - NY']\n elif sorted_zone_keys == 'CA-QC->US-NY':\n direction = 1\n relevant_exchanges = ['SCH - HQ_CEDARS', 'SCH - HQ - NY']\n else:\n raise NotImplementedError('Exchange pair not supported: {}'.format(sorted_zone_keys))\n\n if target_datetime:\n # ensure we have an arrow object\n target_datetime = arrow.get(target_datetime)\n else:\n target_datetime = arrow.now('America/New_York')\n ny_date = target_datetime.format('YYYYMMDD')\n exchange_url = url.format(ny_date)\n\n try:\n exchange_data = read_csv_data(exchange_url)\n except HTTPError:\n # this can happen when target_datetime has no data available\n return None\n\n new_england_exs = exchange_data.loc[exchange_data['Interface Name'].isin(relevant_exchanges)]\n consolidated_flows = new_england_exs.reset_index().groupby(\"Timestamp\").sum()\n\n now = arrow.utcnow()\n\n exchange_5min = []\n for row in consolidated_flows.itertuples():\n flow = float(row[3]) * direction\n # Timestamp for exchange does not include seconds.\n dt = timestamp_converter(row[0] + ':00')\n\n if (dt > now) and ((dt - now) < timedelta(seconds=300)):\n # NYISO exchanges CSV (and only the exchanges CSV) includes data\n # up to 5 minutes in the future (but only 5 minutes in the future).\n # This also happens on their official website.\n # Electricity Map raises error with data in the future, so skip\n # that datapoint. If it's more than 5 minutes in the future,\n # it's weird/unexpected and thus worthy of failure and logging.\n continue\n\n exchange = {\n 'sortedZoneKeys': sorted_zone_keys,\n 'datetime': dt,\n 'netFlow': flow,\n 'source': 'nyiso.com'\n }\n\n exchange_5min.append(exchange)\n\n return exchange_5min\n\n\nif __name__ == '__main__':\n \"\"\"Main method, never used by the Electricity Map backend, but handy for testing.\"\"\"\n\n from pprint import pprint\n print('fetch_production() ->')\n pprint(fetch_production())\n\n print('fetch_production(target_datetime=arrow.get(\"2018-03-13T12:00Z\") ->')\n pprint(fetch_production(target_datetime=arrow.get(\"2018-03-13T12:00Z\")))\n\n print('fetch_production(target_datetime=arrow.get(\"2007-03-13T12:00Z\") ->')\n pprint(fetch_production(target_datetime=arrow.get(\"2007-03-13T12:00Z\")))\n\n print('fetch_exchange(US-NY, US-NEISO)')\n pprint(fetch_exchange('US-NY', 'US-NEISO'))\n\n print('fetch_exchange(\"US-NY\", \"CA-QC\")')\n pprint(fetch_exchange('US-NY', 'CA-QC'))\n\n print('fetch_exchange(\"US-NY\", \"CA-QC\", target_datetime=arrow.get(\"2018-03-13T12:00Z\"))')\n pprint(fetch_exchange('US-NY', 'CA-QC', target_datetime=arrow.get(\"2018-03-13T12:00Z\")))\n\n print('fetch_exchange(\"US-NY\", \"CA-QC\", target_datetime=arrow.get(\"2007-03-13T12:00Z\")))')\n pprint(fetch_exchange('US-NY', 'CA-QC', target_datetime=arrow.get('2007-03-13T12:00Z')))\n", "path": "parsers/US_NY.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\n\"\"\"Real time parser for the state of New York.\"\"\"\nfrom collections import defaultdict\nfrom datetime import timedelta\nfrom operator import itemgetter\nfrom urllib.error import HTTPError\n\nimport arrow\nimport pandas as pd\n\n# Dual Fuel systems can run either Natural Gas or Oil, they represent\n# significantly more capacity in NY State than plants that can only\n# burn Natural Gas. When looking up fuel usage for NY in 2016 in\n# https://www.eia.gov/electricity/data/state/annual_generation_state.xls\n# 100 times more energy came from NG than Oil. That means Oil\n# consumption in the Dual Fuel systems is roughly ~1%, and to a first\n# approximation it's just Natural Gas.\nmapping = {\n 'Dual Fuel': 'gas',\n 'Natural Gas': 'gas',\n 'Nuclear': 'nuclear',\n 'Other Fossil Fuels': 'unknown',\n 'Other Renewables': 'unknown',\n 'Wind': 'wind',\n 'Hydro': 'hydro'\n}\n\n\ndef read_csv_data(url):\n \"\"\"\n Gets csv data from a url and returns a dataframe.\n \"\"\"\n\n csv_data = pd.read_csv(url)\n\n return csv_data\n\n\ndef timestamp_converter(timestamp_string):\n \"\"\"\n Converts timestamps in nyiso data into aware datetime objects.\n \"\"\"\n\n dt_naive = arrow.get(timestamp_string, 'MM/DD/YYYY HH:mm:ss')\n dt_aware = dt_naive.replace(tzinfo='America/New_York').datetime\n\n return dt_aware\n\n\ndef data_parser(df):\n \"\"\"\n Takes dataframe and loops over rows to form dictionaries consisting of datetime and generation type.\n Merges these dictionaries using datetime key.\n Maps to type and returns a list of tuples containing datetime string and production.\n \"\"\"\n\n chunks = []\n for row in df.itertuples():\n piece = {}\n piece['datetime'] = row[1]\n piece[row[3]] = row[4]\n chunks.append(piece)\n\n # Join dicts on shared 'datetime' keys.\n combine = defaultdict(dict)\n for elem in chunks:\n combine[elem['datetime']].update(elem)\n\n ordered = sorted(combine.values(), key=itemgetter(\"datetime\"))\n\n mapped_generation = []\n for item in ordered:\n mapped_types = [(mapping.get(k, k), v) for k, v in item.items()]\n\n # Need to avoid multiple 'unknown' keys overwriting.\n complete_production = defaultdict(lambda: 0.0)\n for key, val in mapped_types:\n try:\n complete_production[key] += val\n except TypeError:\n # Datetime is a string at this point!\n complete_production[key] = val\n\n dt = complete_production.pop('datetime')\n final = (dt, dict(complete_production))\n mapped_generation.append(final)\n\n return mapped_generation\n\n\ndef fetch_production(zone_key='US-NY', session=None, target_datetime=None, logger=None):\n \"\"\"\n Requests the last known production mix (in MW) of a given zone\n\n Arguments:\n zone_key: used in case a parser is able to fetch multiple zones\n session: requests session passed in order to re-use an existing session,\n not used here due to difficulty providing it to pandas\n target_datetime: the datetime for which we want production data. If not provided, we should\n default it to now. The provided target_datetime is timezone-aware in UTC.\n logger: an instance of a `logging.Logger`; all raised exceptions are also logged automatically\n\n Return:\n A list of dictionaries in the form:\n {\n 'zoneKey': 'FR',\n 'datetime': '2017-01-01T00:00:00Z',\n 'production': {\n 'biomass': 0.0,\n 'coal': 0.0,\n 'gas': 0.0,\n 'hydro': 0.0,\n 'nuclear': null,\n 'oil': 0.0,\n 'solar': 0.0,\n 'wind': 0.0,\n 'geothermal': 0.0,\n 'unknown': 0.0\n },\n 'storage': {\n 'hydro': -10.0,\n },\n 'source': 'mysource.com'\n }\n \"\"\"\n if target_datetime:\n # ensure we have an arrow object\n target_datetime = arrow.get(target_datetime)\n else:\n target_datetime = arrow.now('America/New_York')\n\n ny_date = target_datetime.format('YYYYMMDD')\n mix_url = 'http://mis.nyiso.com/public/csv/rtfuelmix/{}rtfuelmix.csv'.format(ny_date)\n try:\n raw_data = read_csv_data(mix_url)\n except HTTPError:\n # this can happen when target_datetime has no data available\n return None\n\n clean_data = data_parser(raw_data)\n\n production_mix = []\n for datapoint in clean_data:\n data = {\n 'zoneKey': zone_key,\n 'datetime': timestamp_converter(datapoint[0]),\n 'production': datapoint[1],\n 'storage': {'hydro': None},\n 'source': 'nyiso.com'\n }\n\n production_mix.append(data)\n\n return production_mix\n\n\ndef fetch_exchange(zone_key1, zone_key2, session=None, target_datetime=None, logger=None):\n \"\"\"Requests the last known power exchange (in MW) between two zones\n\n Arguments:\n zone_key1, zone_key2: specifies which exchange to get\n session: requests session passed in order to re-use an existing session,\n not used here due to difficulty providing it to pandas\n target_datetime: the datetime for which we want production data. If not provided, we should\n default it to now. The provided target_datetime is timezone-aware in UTC.\n logger: an instance of a `logging.Logger`; all raised exceptions are also logged automatically\n\n Return:\n A list of dictionaries in the form:\n {\n 'sortedZoneKeys': 'DK->NO',\n 'datetime': '2017-01-01T00:00:00Z',\n 'netFlow': 0.0,\n 'source': 'mysource.com'\n }\n where net flow is from DK into NO\n \"\"\"\n url = 'http://mis.nyiso.com/public/csv/ExternalLimitsFlows/{}ExternalLimitsFlows.csv'\n\n sorted_zone_keys = '->'.join(sorted([zone_key1, zone_key2]))\n\n # In the source CSV, positive is flow into NY, negative is flow out of NY.\n # In Electricity Map, A->B means flow to B is positive.\n if sorted_zone_keys == 'US-NEISO->US-NY':\n direction = 1\n relevant_exchanges = ['SCH - NE - NY', 'SCH - NPX_1385', 'SCH - NPX_CSC']\n elif sorted_zone_keys == 'US-NY->US-PJM':\n direction = -1\n relevant_exchanges = ['SCH - PJ - NY', 'SCH - PJM_HTP', 'SCH - PJM_NEPTUNE', 'SCH - PJM_VFT']\n elif sorted_zone_keys == 'CA-ON->US-NY':\n direction = 1\n relevant_exchanges = ['SCH - OH - NY']\n elif sorted_zone_keys == 'CA-QC->US-NY':\n direction = 1\n relevant_exchanges = ['SCH - HQ_CEDARS', 'SCH - HQ - NY']\n else:\n raise NotImplementedError('Exchange pair not supported: {}'.format(sorted_zone_keys))\n\n if target_datetime:\n # ensure we have an arrow object\n target_datetime = arrow.get(target_datetime)\n else:\n target_datetime = arrow.now('America/New_York')\n ny_date = target_datetime.format('YYYYMMDD')\n exchange_url = url.format(ny_date)\n\n try:\n exchange_data = read_csv_data(exchange_url)\n except HTTPError:\n # this can happen when target_datetime has no data available\n return None\n\n new_england_exs = exchange_data.loc[exchange_data['Interface Name'].isin(relevant_exchanges)]\n consolidated_flows = new_england_exs.reset_index().groupby(\"Timestamp\").sum()\n\n now = arrow.utcnow()\n\n exchange_5min = []\n for row in consolidated_flows.itertuples():\n flow = float(row[3]) * direction\n # Timestamp for exchange does not include seconds.\n dt = timestamp_converter(row[0] + ':00')\n\n if (dt > now) and ((dt - now) < timedelta(seconds=300)):\n # NYISO exchanges CSV (and only the exchanges CSV) includes data\n # up to 5 minutes in the future (but only 5 minutes in the future).\n # This also happens on their official website.\n # Electricity Map raises error with data in the future, so skip\n # that datapoint. If it's more than 5 minutes in the future,\n # it's weird/unexpected and thus worthy of failure and logging.\n continue\n\n exchange = {\n 'sortedZoneKeys': sorted_zone_keys,\n 'datetime': dt,\n 'netFlow': flow,\n 'source': 'nyiso.com'\n }\n\n exchange_5min.append(exchange)\n\n return exchange_5min\n\n\nif __name__ == '__main__':\n \"\"\"Main method, never used by the Electricity Map backend, but handy for testing.\"\"\"\n\n from pprint import pprint\n print('fetch_production() ->')\n pprint(fetch_production())\n\n print('fetch_production(target_datetime=arrow.get(\"2018-03-13T12:00Z\") ->')\n pprint(fetch_production(target_datetime=arrow.get(\"2018-03-13T12:00Z\")))\n\n print('fetch_production(target_datetime=arrow.get(\"2007-03-13T12:00Z\") ->')\n pprint(fetch_production(target_datetime=arrow.get(\"2007-03-13T12:00Z\")))\n\n print('fetch_exchange(US-NY, US-NEISO)')\n pprint(fetch_exchange('US-NY', 'US-NEISO'))\n\n print('fetch_exchange(\"US-NY\", \"CA-QC\")')\n pprint(fetch_exchange('US-NY', 'CA-QC'))\n\n print('fetch_exchange(\"US-NY\", \"CA-QC\", target_datetime=arrow.get(\"2018-03-13T12:00Z\"))')\n pprint(fetch_exchange('US-NY', 'CA-QC', target_datetime=arrow.get(\"2018-03-13T12:00Z\")))\n\n print('fetch_exchange(\"US-NY\", \"CA-QC\", target_datetime=arrow.get(\"2007-03-13T12:00Z\")))')\n pprint(fetch_exchange('US-NY', 'CA-QC', target_datetime=arrow.get('2007-03-13T12:00Z')))\n", "path": "parsers/US_NY.py"}]} | 3,487 | 217 |
gh_patches_debug_20113 | rasdani/github-patches | git_diff | googleapis__google-auth-library-python-708 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Getting project ID using Application Default Credentials fails when gcloud command writes anything to stderr
- OS: Ubuntu 20.04
- Python version: 3.8
- pip version: 20.0.2
- `google-auth` version: 1.19.2
#### Steps to reproduce
1. Arrange for gcloud to throw a warning. For example I'm suffering from this https://github.com/GoogleCloudPlatform/gsutil/issues/999
2. Attempt to use ADC e.g. `credentials, project = google.auth.default()`
3. Note that project always comes back at None even if `gcloud config set project` is correctly set
4. Root cause seems to be that in _cloud_sdk.py/get_project_id() the subprocess.check_output command merges stderr and stdout. So in the case that stderr is not empty and the subprocess does not fail, you might get badly formed JSON on which json.loads a few lines later chokes.
For example, my raw gcloud output is like:
/snap/google-cloud-sdk/165/lib/third_party/requests/__init__.py:83: RequestsDependencyWarning: Old version of cryptography ([1, 2, 3]) may cause slowdown.\n warnings.warn(warning, RequestsDependencyWarning)\n{\n "configuration": {\n "active_configuration": "default",\n "properties": {\n "core": {\n "account": "[email protected]",\n "disable_usage_reporting": "False",\n "project": "my-test-project"\n },\n "deployment_manager": {\n "glob_imports": "True"\n }\n }\n },\n "credential": {\n "access_token".... etc etc.
Expected behaviour: non-fatal errors or warnings from gcloud should not corrupt the output and cause the project ID lookup to fail.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `google/auth/_cloud_sdk.py`
Content:
```
1 # Copyright 2015 Google Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Helpers for reading the Google Cloud SDK's configuration."""
16
17 import json
18 import os
19 import subprocess
20
21 import six
22
23 from google.auth import environment_vars
24 from google.auth import exceptions
25
26
27 # The ~/.config subdirectory containing gcloud credentials.
28 _CONFIG_DIRECTORY = "gcloud"
29 # Windows systems store config at %APPDATA%\gcloud
30 _WINDOWS_CONFIG_ROOT_ENV_VAR = "APPDATA"
31 # The name of the file in the Cloud SDK config that contains default
32 # credentials.
33 _CREDENTIALS_FILENAME = "application_default_credentials.json"
34 # The name of the Cloud SDK shell script
35 _CLOUD_SDK_POSIX_COMMAND = "gcloud"
36 _CLOUD_SDK_WINDOWS_COMMAND = "gcloud.cmd"
37 # The command to get the Cloud SDK configuration
38 _CLOUD_SDK_CONFIG_COMMAND = ("config", "config-helper", "--format", "json")
39 # The command to get google user access token
40 _CLOUD_SDK_USER_ACCESS_TOKEN_COMMAND = ("auth", "print-access-token")
41 # Cloud SDK's application-default client ID
42 CLOUD_SDK_CLIENT_ID = (
43 "764086051850-6qr4p6gpi6hn506pt8ejuq83di341hur.apps.googleusercontent.com"
44 )
45
46
47 def get_config_path():
48 """Returns the absolute path the the Cloud SDK's configuration directory.
49
50 Returns:
51 str: The Cloud SDK config path.
52 """
53 # If the path is explicitly set, return that.
54 try:
55 return os.environ[environment_vars.CLOUD_SDK_CONFIG_DIR]
56 except KeyError:
57 pass
58
59 # Non-windows systems store this at ~/.config/gcloud
60 if os.name != "nt":
61 return os.path.join(os.path.expanduser("~"), ".config", _CONFIG_DIRECTORY)
62 # Windows systems store config at %APPDATA%\gcloud
63 else:
64 try:
65 return os.path.join(
66 os.environ[_WINDOWS_CONFIG_ROOT_ENV_VAR], _CONFIG_DIRECTORY
67 )
68 except KeyError:
69 # This should never happen unless someone is really
70 # messing with things, but we'll cover the case anyway.
71 drive = os.environ.get("SystemDrive", "C:")
72 return os.path.join(drive, "\\", _CONFIG_DIRECTORY)
73
74
75 def get_application_default_credentials_path():
76 """Gets the path to the application default credentials file.
77
78 The path may or may not exist.
79
80 Returns:
81 str: The full path to application default credentials.
82 """
83 config_path = get_config_path()
84 return os.path.join(config_path, _CREDENTIALS_FILENAME)
85
86
87 def get_project_id():
88 """Gets the project ID from the Cloud SDK.
89
90 Returns:
91 Optional[str]: The project ID.
92 """
93 if os.name == "nt":
94 command = _CLOUD_SDK_WINDOWS_COMMAND
95 else:
96 command = _CLOUD_SDK_POSIX_COMMAND
97
98 try:
99 output = subprocess.check_output(
100 (command,) + _CLOUD_SDK_CONFIG_COMMAND, stderr=subprocess.STDOUT
101 )
102 except (subprocess.CalledProcessError, OSError, IOError):
103 return None
104
105 try:
106 configuration = json.loads(output.decode("utf-8"))
107 except ValueError:
108 return None
109
110 try:
111 return configuration["configuration"]["properties"]["core"]["project"]
112 except KeyError:
113 return None
114
115
116 def get_auth_access_token(account=None):
117 """Load user access token with the ``gcloud auth print-access-token`` command.
118
119 Args:
120 account (Optional[str]): Account to get the access token for. If not
121 specified, the current active account will be used.
122
123 Returns:
124 str: The user access token.
125
126 Raises:
127 google.auth.exceptions.UserAccessTokenError: if failed to get access
128 token from gcloud.
129 """
130 if os.name == "nt":
131 command = _CLOUD_SDK_WINDOWS_COMMAND
132 else:
133 command = _CLOUD_SDK_POSIX_COMMAND
134
135 try:
136 if account:
137 command = (
138 (command,)
139 + _CLOUD_SDK_USER_ACCESS_TOKEN_COMMAND
140 + ("--account=" + account,)
141 )
142 else:
143 command = (command,) + _CLOUD_SDK_USER_ACCESS_TOKEN_COMMAND
144
145 access_token = subprocess.check_output(command, stderr=subprocess.STDOUT)
146 # remove the trailing "\n"
147 return access_token.decode("utf-8").strip()
148 except (subprocess.CalledProcessError, OSError, IOError) as caught_exc:
149 new_exc = exceptions.UserAccessTokenError(
150 "Failed to obtain access token", caught_exc
151 )
152 six.raise_from(new_exc, caught_exc)
153
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/google/auth/_cloud_sdk.py b/google/auth/_cloud_sdk.py
--- a/google/auth/_cloud_sdk.py
+++ b/google/auth/_cloud_sdk.py
@@ -84,6 +84,13 @@
return os.path.join(config_path, _CREDENTIALS_FILENAME)
+def _run_subprocess_ignore_stderr(command):
+ """ Return subprocess.check_output with the given command and ignores stderr."""
+ with open(os.devnull, "w") as devnull:
+ output = subprocess.check_output(command, stderr=devnull)
+ return output
+
+
def get_project_id():
"""Gets the project ID from the Cloud SDK.
@@ -96,9 +103,9 @@
command = _CLOUD_SDK_POSIX_COMMAND
try:
- output = subprocess.check_output(
- (command,) + _CLOUD_SDK_CONFIG_COMMAND, stderr=subprocess.STDOUT
- )
+ # Ignore the stderr coming from gcloud, so it won't be mixed into the output.
+ # https://github.com/googleapis/google-auth-library-python/issues/673
+ output = _run_subprocess_ignore_stderr((command,) + _CLOUD_SDK_CONFIG_COMMAND)
except (subprocess.CalledProcessError, OSError, IOError):
return None
| {"golden_diff": "diff --git a/google/auth/_cloud_sdk.py b/google/auth/_cloud_sdk.py\n--- a/google/auth/_cloud_sdk.py\n+++ b/google/auth/_cloud_sdk.py\n@@ -84,6 +84,13 @@\n return os.path.join(config_path, _CREDENTIALS_FILENAME)\n \n \n+def _run_subprocess_ignore_stderr(command):\n+ \"\"\" Return subprocess.check_output with the given command and ignores stderr.\"\"\"\n+ with open(os.devnull, \"w\") as devnull:\n+ output = subprocess.check_output(command, stderr=devnull)\n+ return output\n+\n+\n def get_project_id():\n \"\"\"Gets the project ID from the Cloud SDK.\n \n@@ -96,9 +103,9 @@\n command = _CLOUD_SDK_POSIX_COMMAND\n \n try:\n- output = subprocess.check_output(\n- (command,) + _CLOUD_SDK_CONFIG_COMMAND, stderr=subprocess.STDOUT\n- )\n+ # Ignore the stderr coming from gcloud, so it won't be mixed into the output.\n+ # https://github.com/googleapis/google-auth-library-python/issues/673\n+ output = _run_subprocess_ignore_stderr((command,) + _CLOUD_SDK_CONFIG_COMMAND)\n except (subprocess.CalledProcessError, OSError, IOError):\n return None\n", "issue": "Getting project ID using Application Default Credentials fails when gcloud command writes anything to stderr\n - OS: Ubuntu 20.04\r\n - Python version: 3.8\r\n - pip version: 20.0.2\r\n - `google-auth` version: 1.19.2\r\n\r\n#### Steps to reproduce\r\n\r\n 1. Arrange for gcloud to throw a warning. For example I'm suffering from this https://github.com/GoogleCloudPlatform/gsutil/issues/999\r\n 2. Attempt to use ADC e.g. `credentials, project = google.auth.default()`\r\n 3. Note that project always comes back at None even if `gcloud config set project` is correctly set\r\n 4. Root cause seems to be that in _cloud_sdk.py/get_project_id() the subprocess.check_output command merges stderr and stdout. So in the case that stderr is not empty and the subprocess does not fail, you might get badly formed JSON on which json.loads a few lines later chokes.\r\n\r\nFor example, my raw gcloud output is like:\r\n\r\n/snap/google-cloud-sdk/165/lib/third_party/requests/__init__.py:83: RequestsDependencyWarning: Old version of cryptography ([1, 2, 3]) may cause slowdown.\\n warnings.warn(warning, RequestsDependencyWarning)\\n{\\n \"configuration\": {\\n \"active_configuration\": \"default\",\\n \"properties\": {\\n \"core\": {\\n \"account\": \"[email protected]\",\\n \"disable_usage_reporting\": \"False\",\\n \"project\": \"my-test-project\"\\n },\\n \"deployment_manager\": {\\n \"glob_imports\": \"True\"\\n }\\n }\\n },\\n \"credential\": {\\n \"access_token\".... etc etc.\r\n\r\nExpected behaviour: non-fatal errors or warnings from gcloud should not corrupt the output and cause the project ID lookup to fail.\n", "before_files": [{"content": "# Copyright 2015 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Helpers for reading the Google Cloud SDK's configuration.\"\"\"\n\nimport json\nimport os\nimport subprocess\n\nimport six\n\nfrom google.auth import environment_vars\nfrom google.auth import exceptions\n\n\n# The ~/.config subdirectory containing gcloud credentials.\n_CONFIG_DIRECTORY = \"gcloud\"\n# Windows systems store config at %APPDATA%\\gcloud\n_WINDOWS_CONFIG_ROOT_ENV_VAR = \"APPDATA\"\n# The name of the file in the Cloud SDK config that contains default\n# credentials.\n_CREDENTIALS_FILENAME = \"application_default_credentials.json\"\n# The name of the Cloud SDK shell script\n_CLOUD_SDK_POSIX_COMMAND = \"gcloud\"\n_CLOUD_SDK_WINDOWS_COMMAND = \"gcloud.cmd\"\n# The command to get the Cloud SDK configuration\n_CLOUD_SDK_CONFIG_COMMAND = (\"config\", \"config-helper\", \"--format\", \"json\")\n# The command to get google user access token\n_CLOUD_SDK_USER_ACCESS_TOKEN_COMMAND = (\"auth\", \"print-access-token\")\n# Cloud SDK's application-default client ID\nCLOUD_SDK_CLIENT_ID = (\n \"764086051850-6qr4p6gpi6hn506pt8ejuq83di341hur.apps.googleusercontent.com\"\n)\n\n\ndef get_config_path():\n \"\"\"Returns the absolute path the the Cloud SDK's configuration directory.\n\n Returns:\n str: The Cloud SDK config path.\n \"\"\"\n # If the path is explicitly set, return that.\n try:\n return os.environ[environment_vars.CLOUD_SDK_CONFIG_DIR]\n except KeyError:\n pass\n\n # Non-windows systems store this at ~/.config/gcloud\n if os.name != \"nt\":\n return os.path.join(os.path.expanduser(\"~\"), \".config\", _CONFIG_DIRECTORY)\n # Windows systems store config at %APPDATA%\\gcloud\n else:\n try:\n return os.path.join(\n os.environ[_WINDOWS_CONFIG_ROOT_ENV_VAR], _CONFIG_DIRECTORY\n )\n except KeyError:\n # This should never happen unless someone is really\n # messing with things, but we'll cover the case anyway.\n drive = os.environ.get(\"SystemDrive\", \"C:\")\n return os.path.join(drive, \"\\\\\", _CONFIG_DIRECTORY)\n\n\ndef get_application_default_credentials_path():\n \"\"\"Gets the path to the application default credentials file.\n\n The path may or may not exist.\n\n Returns:\n str: The full path to application default credentials.\n \"\"\"\n config_path = get_config_path()\n return os.path.join(config_path, _CREDENTIALS_FILENAME)\n\n\ndef get_project_id():\n \"\"\"Gets the project ID from the Cloud SDK.\n\n Returns:\n Optional[str]: The project ID.\n \"\"\"\n if os.name == \"nt\":\n command = _CLOUD_SDK_WINDOWS_COMMAND\n else:\n command = _CLOUD_SDK_POSIX_COMMAND\n\n try:\n output = subprocess.check_output(\n (command,) + _CLOUD_SDK_CONFIG_COMMAND, stderr=subprocess.STDOUT\n )\n except (subprocess.CalledProcessError, OSError, IOError):\n return None\n\n try:\n configuration = json.loads(output.decode(\"utf-8\"))\n except ValueError:\n return None\n\n try:\n return configuration[\"configuration\"][\"properties\"][\"core\"][\"project\"]\n except KeyError:\n return None\n\n\ndef get_auth_access_token(account=None):\n \"\"\"Load user access token with the ``gcloud auth print-access-token`` command.\n\n Args:\n account (Optional[str]): Account to get the access token for. If not\n specified, the current active account will be used.\n\n Returns:\n str: The user access token.\n\n Raises:\n google.auth.exceptions.UserAccessTokenError: if failed to get access\n token from gcloud.\n \"\"\"\n if os.name == \"nt\":\n command = _CLOUD_SDK_WINDOWS_COMMAND\n else:\n command = _CLOUD_SDK_POSIX_COMMAND\n\n try:\n if account:\n command = (\n (command,)\n + _CLOUD_SDK_USER_ACCESS_TOKEN_COMMAND\n + (\"--account=\" + account,)\n )\n else:\n command = (command,) + _CLOUD_SDK_USER_ACCESS_TOKEN_COMMAND\n\n access_token = subprocess.check_output(command, stderr=subprocess.STDOUT)\n # remove the trailing \"\\n\"\n return access_token.decode(\"utf-8\").strip()\n except (subprocess.CalledProcessError, OSError, IOError) as caught_exc:\n new_exc = exceptions.UserAccessTokenError(\n \"Failed to obtain access token\", caught_exc\n )\n six.raise_from(new_exc, caught_exc)\n", "path": "google/auth/_cloud_sdk.py"}], "after_files": [{"content": "# Copyright 2015 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Helpers for reading the Google Cloud SDK's configuration.\"\"\"\n\nimport json\nimport os\nimport subprocess\n\nimport six\n\nfrom google.auth import environment_vars\nfrom google.auth import exceptions\n\n\n# The ~/.config subdirectory containing gcloud credentials.\n_CONFIG_DIRECTORY = \"gcloud\"\n# Windows systems store config at %APPDATA%\\gcloud\n_WINDOWS_CONFIG_ROOT_ENV_VAR = \"APPDATA\"\n# The name of the file in the Cloud SDK config that contains default\n# credentials.\n_CREDENTIALS_FILENAME = \"application_default_credentials.json\"\n# The name of the Cloud SDK shell script\n_CLOUD_SDK_POSIX_COMMAND = \"gcloud\"\n_CLOUD_SDK_WINDOWS_COMMAND = \"gcloud.cmd\"\n# The command to get the Cloud SDK configuration\n_CLOUD_SDK_CONFIG_COMMAND = (\"config\", \"config-helper\", \"--format\", \"json\")\n# The command to get google user access token\n_CLOUD_SDK_USER_ACCESS_TOKEN_COMMAND = (\"auth\", \"print-access-token\")\n# Cloud SDK's application-default client ID\nCLOUD_SDK_CLIENT_ID = (\n \"764086051850-6qr4p6gpi6hn506pt8ejuq83di341hur.apps.googleusercontent.com\"\n)\n\n\ndef get_config_path():\n \"\"\"Returns the absolute path the the Cloud SDK's configuration directory.\n\n Returns:\n str: The Cloud SDK config path.\n \"\"\"\n # If the path is explicitly set, return that.\n try:\n return os.environ[environment_vars.CLOUD_SDK_CONFIG_DIR]\n except KeyError:\n pass\n\n # Non-windows systems store this at ~/.config/gcloud\n if os.name != \"nt\":\n return os.path.join(os.path.expanduser(\"~\"), \".config\", _CONFIG_DIRECTORY)\n # Windows systems store config at %APPDATA%\\gcloud\n else:\n try:\n return os.path.join(\n os.environ[_WINDOWS_CONFIG_ROOT_ENV_VAR], _CONFIG_DIRECTORY\n )\n except KeyError:\n # This should never happen unless someone is really\n # messing with things, but we'll cover the case anyway.\n drive = os.environ.get(\"SystemDrive\", \"C:\")\n return os.path.join(drive, \"\\\\\", _CONFIG_DIRECTORY)\n\n\ndef get_application_default_credentials_path():\n \"\"\"Gets the path to the application default credentials file.\n\n The path may or may not exist.\n\n Returns:\n str: The full path to application default credentials.\n \"\"\"\n config_path = get_config_path()\n return os.path.join(config_path, _CREDENTIALS_FILENAME)\n\n\ndef _run_subprocess_ignore_stderr(command):\n \"\"\" Return subprocess.check_output with the given command and ignores stderr.\"\"\"\n with open(os.devnull, \"w\") as devnull:\n output = subprocess.check_output(command, stderr=devnull)\n return output\n\n\ndef get_project_id():\n \"\"\"Gets the project ID from the Cloud SDK.\n\n Returns:\n Optional[str]: The project ID.\n \"\"\"\n if os.name == \"nt\":\n command = _CLOUD_SDK_WINDOWS_COMMAND\n else:\n command = _CLOUD_SDK_POSIX_COMMAND\n\n try:\n # Ignore the stderr coming from gcloud, so it won't be mixed into the output.\n # https://github.com/googleapis/google-auth-library-python/issues/673\n output = _run_subprocess_ignore_stderr((command,) + _CLOUD_SDK_CONFIG_COMMAND)\n except (subprocess.CalledProcessError, OSError, IOError):\n return None\n\n try:\n configuration = json.loads(output.decode(\"utf-8\"))\n except ValueError:\n return None\n\n try:\n return configuration[\"configuration\"][\"properties\"][\"core\"][\"project\"]\n except KeyError:\n return None\n\n\ndef get_auth_access_token(account=None):\n \"\"\"Load user access token with the ``gcloud auth print-access-token`` command.\n\n Args:\n account (Optional[str]): Account to get the access token for. If not\n specified, the current active account will be used.\n\n Returns:\n str: The user access token.\n\n Raises:\n google.auth.exceptions.UserAccessTokenError: if failed to get access\n token from gcloud.\n \"\"\"\n if os.name == \"nt\":\n command = _CLOUD_SDK_WINDOWS_COMMAND\n else:\n command = _CLOUD_SDK_POSIX_COMMAND\n\n try:\n if account:\n command = (\n (command,)\n + _CLOUD_SDK_USER_ACCESS_TOKEN_COMMAND\n + (\"--account=\" + account,)\n )\n else:\n command = (command,) + _CLOUD_SDK_USER_ACCESS_TOKEN_COMMAND\n\n access_token = subprocess.check_output(command, stderr=subprocess.STDOUT)\n # remove the trailing \"\\n\"\n return access_token.decode(\"utf-8\").strip()\n except (subprocess.CalledProcessError, OSError, IOError) as caught_exc:\n new_exc = exceptions.UserAccessTokenError(\n \"Failed to obtain access token\", caught_exc\n )\n six.raise_from(new_exc, caught_exc)\n", "path": "google/auth/_cloud_sdk.py"}]} | 2,144 | 278 |
gh_patches_debug_122 | rasdani/github-patches | git_diff | XanaduAI__strawberryfields-581 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Dependency versions error
#### Issue description
I made a fork of this project and tried to setup a new virtual environment.
```
python -m venv sf-venv
source sf-venv/bin/active.fish
pip install -r requirements.txt
```
However, I got the following error
```
ERROR: Cannot install -r requirements.txt (line 4) and numpy>=1.20 because these package versions have conflicting dependencies.
The conflict is caused by:
The user requested numpy>=1.20
tensorflow 2.5.0 depends on numpy~=1.19.2
To fix this you could try to:
1. loosen the range of package versions you've specified
2. remove package versions to allow pip attempt to solve the dependency conflict
ERROR: ResolutionImpossible: for help visit https://pip.pypa.io/en/latest/user_guide/#fixing-conflicting-dependencies
```
#### Additional information
If it helps, I am using Python 3.9.4 and pip 21.1.1.
A quick fix would be to downgrade the version of numpy in requirements.txt and solve the issue, but I am not sure it is the best way to go.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # Copyright 2019 Xanadu Quantum Technologies Inc.
2
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6
7 # http://www.apache.org/licenses/LICENSE-2.0
8
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 import os
15 import sys
16
17 from setuptools import setup, find_packages
18
19
20 with open("strawberryfields/_version.py") as f:
21 version = f.readlines()[-1].split()[-1].strip("\"'")
22
23
24 requirements = [
25 "numpy>=1.17.4",
26 "scipy>=1.0.0",
27 "sympy>=1.5",
28 "networkx>=2.0",
29 "quantum-blackbird>=0.3.0",
30 "python-dateutil>=2.8.0",
31 "thewalrus>=0.15.0",
32 "numba",
33 "toml",
34 "appdirs",
35 "requests>=2.22.0",
36 "urllib3>=1.25.3",
37 ]
38
39 info = {
40 "name": "StrawberryFields",
41 "version": version,
42 "maintainer": "Xanadu Inc.",
43 "maintainer_email": "[email protected]",
44 "url": "https://github.com/XanaduAI/StrawberryFields",
45 "license": "Apache License 2.0",
46 "packages": find_packages(where="."),
47 "package_data": {"strawberryfields": ["backends/data/*", "apps/data/feature_data/*",
48 "apps/data/sample_data/*"]},
49 "include_package_data": True,
50 "entry_points" : {
51 'console_scripts': [
52 'sf=strawberryfields.cli:main'
53 ]
54 },
55 "description": "Open source library for continuous-variable quantum computation",
56 "long_description": open("README.rst", encoding="utf-8").read(),
57 "long_description_content_type": "text/x-rst",
58 "provides": ["strawberryfields"],
59 "install_requires": requirements,
60 # 'extras_require': extra_requirements,
61 "command_options": {
62 "build_sphinx": {"version": ("setup.py", version), "release": ("setup.py", version)}
63 },
64 }
65
66 classifiers = [
67 "Development Status :: 4 - Beta",
68 "Environment :: Console",
69 "Intended Audience :: Science/Research",
70 "License :: OSI Approved :: Apache Software License",
71 "Natural Language :: English",
72 "Operating System :: POSIX",
73 "Operating System :: MacOS :: MacOS X",
74 "Operating System :: POSIX :: Linux",
75 "Operating System :: Microsoft :: Windows",
76 "Programming Language :: Python",
77 "Programming Language :: Python :: 3",
78 "Programming Language :: Python :: 3.7",
79 "Programming Language :: Python :: 3.8",
80 "Programming Language :: Python :: 3.9",
81 "Programming Language :: Python :: 3 :: Only",
82 "Topic :: Scientific/Engineering :: Physics",
83 ]
84
85 setup(classifiers=classifiers, **(info))
86
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -22,7 +22,7 @@
requirements = [
- "numpy>=1.17.4",
+ "numpy>=1.19.2",
"scipy>=1.0.0",
"sympy>=1.5",
"networkx>=2.0",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -22,7 +22,7 @@\n \n \n requirements = [\n- \"numpy>=1.17.4\",\n+ \"numpy>=1.19.2\",\n \"scipy>=1.0.0\",\n \"sympy>=1.5\",\n \"networkx>=2.0\",\n", "issue": "Dependency versions error\n#### Issue description\r\nI made a fork of this project and tried to setup a new virtual environment.\r\n\r\n```\r\npython -m venv sf-venv\r\nsource sf-venv/bin/active.fish\r\npip install -r requirements.txt\r\n```\r\n\r\nHowever, I got the following error\r\n``` \r\nERROR: Cannot install -r requirements.txt (line 4) and numpy>=1.20 because these package versions have conflicting dependencies.\r\n\r\nThe conflict is caused by:\r\n The user requested numpy>=1.20\r\n tensorflow 2.5.0 depends on numpy~=1.19.2\r\n\r\nTo fix this you could try to:\r\n1. loosen the range of package versions you've specified\r\n2. remove package versions to allow pip attempt to solve the dependency conflict\r\n\r\nERROR: ResolutionImpossible: for help visit https://pip.pypa.io/en/latest/user_guide/#fixing-conflicting-dependencies\r\n```\r\n\r\n#### Additional information\r\n\r\nIf it helps, I am using Python 3.9.4 and pip 21.1.1. \r\n\r\nA quick fix would be to downgrade the version of numpy in requirements.txt and solve the issue, but I am not sure it is the best way to go.\r\n\n", "before_files": [{"content": "# Copyright 2019 Xanadu Quantum Technologies Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport os\nimport sys\n\nfrom setuptools import setup, find_packages\n\n\nwith open(\"strawberryfields/_version.py\") as f:\n version = f.readlines()[-1].split()[-1].strip(\"\\\"'\")\n\n\nrequirements = [\n \"numpy>=1.17.4\",\n \"scipy>=1.0.0\",\n \"sympy>=1.5\",\n \"networkx>=2.0\",\n \"quantum-blackbird>=0.3.0\",\n \"python-dateutil>=2.8.0\",\n \"thewalrus>=0.15.0\",\n \"numba\",\n \"toml\",\n \"appdirs\",\n \"requests>=2.22.0\",\n \"urllib3>=1.25.3\",\n]\n\ninfo = {\n \"name\": \"StrawberryFields\",\n \"version\": version,\n \"maintainer\": \"Xanadu Inc.\",\n \"maintainer_email\": \"[email protected]\",\n \"url\": \"https://github.com/XanaduAI/StrawberryFields\",\n \"license\": \"Apache License 2.0\",\n \"packages\": find_packages(where=\".\"),\n \"package_data\": {\"strawberryfields\": [\"backends/data/*\", \"apps/data/feature_data/*\",\n \"apps/data/sample_data/*\"]},\n \"include_package_data\": True,\n \"entry_points\" : {\n 'console_scripts': [\n 'sf=strawberryfields.cli:main'\n ]\n },\n \"description\": \"Open source library for continuous-variable quantum computation\",\n \"long_description\": open(\"README.rst\", encoding=\"utf-8\").read(),\n \"long_description_content_type\": \"text/x-rst\",\n \"provides\": [\"strawberryfields\"],\n \"install_requires\": requirements,\n # 'extras_require': extra_requirements,\n \"command_options\": {\n \"build_sphinx\": {\"version\": (\"setup.py\", version), \"release\": (\"setup.py\", version)}\n },\n}\n\nclassifiers = [\n \"Development Status :: 4 - Beta\",\n \"Environment :: Console\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Natural Language :: English\",\n \"Operating System :: POSIX\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Topic :: Scientific/Engineering :: Physics\",\n]\n\nsetup(classifiers=classifiers, **(info))\n", "path": "setup.py"}], "after_files": [{"content": "# Copyright 2019 Xanadu Quantum Technologies Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport os\nimport sys\n\nfrom setuptools import setup, find_packages\n\n\nwith open(\"strawberryfields/_version.py\") as f:\n version = f.readlines()[-1].split()[-1].strip(\"\\\"'\")\n\n\nrequirements = [\n \"numpy>=1.19.2\",\n \"scipy>=1.0.0\",\n \"sympy>=1.5\",\n \"networkx>=2.0\",\n \"quantum-blackbird>=0.3.0\",\n \"python-dateutil>=2.8.0\",\n \"thewalrus>=0.15.0\",\n \"numba\",\n \"toml\",\n \"appdirs\",\n \"requests>=2.22.0\",\n \"urllib3>=1.25.3\",\n]\n\ninfo = {\n \"name\": \"StrawberryFields\",\n \"version\": version,\n \"maintainer\": \"Xanadu Inc.\",\n \"maintainer_email\": \"[email protected]\",\n \"url\": \"https://github.com/XanaduAI/StrawberryFields\",\n \"license\": \"Apache License 2.0\",\n \"packages\": find_packages(where=\".\"),\n \"package_data\": {\"strawberryfields\": [\"backends/data/*\", \"apps/data/feature_data/*\",\n \"apps/data/sample_data/*\"]},\n \"include_package_data\": True,\n \"entry_points\" : {\n 'console_scripts': [\n 'sf=strawberryfields.cli:main'\n ]\n },\n \"description\": \"Open source library for continuous-variable quantum computation\",\n \"long_description\": open(\"README.rst\", encoding=\"utf-8\").read(),\n \"long_description_content_type\": \"text/x-rst\",\n \"provides\": [\"strawberryfields\"],\n \"install_requires\": requirements,\n # 'extras_require': extra_requirements,\n \"command_options\": {\n \"build_sphinx\": {\"version\": (\"setup.py\", version), \"release\": (\"setup.py\", version)}\n },\n}\n\nclassifiers = [\n \"Development Status :: 4 - Beta\",\n \"Environment :: Console\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Natural Language :: English\",\n \"Operating System :: POSIX\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Topic :: Scientific/Engineering :: Physics\",\n]\n\nsetup(classifiers=classifiers, **(info))\n", "path": "setup.py"}]} | 1,418 | 90 |
gh_patches_debug_36362 | rasdani/github-patches | git_diff | wright-group__WrightTools-829 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Recover trim, a method of channel
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `WrightTools/data/_channel.py`
Content:
```
1 """Channel class and associated."""
2
3
4 # --- import --------------------------------------------------------------------------------------
5
6
7 import numpy as np
8
9 import h5py
10
11 from .. import kit as wt_kit
12 from .._dataset import Dataset
13
14 __all__ = ["Channel"]
15
16 # --- class ---------------------------------------------------------------------------------------
17
18
19 class Channel(Dataset):
20 """Channel."""
21
22 class_name = "Channel"
23
24 def __init__(
25 self,
26 parent,
27 id,
28 *,
29 units=None,
30 null=None,
31 signed=None,
32 label=None,
33 label_seed=None,
34 **kwargs
35 ):
36 """Construct a channel object.
37
38 Parameters
39 ----------
40 values : array-like
41 Values.
42 name : string
43 Channel name.
44 units : string (optional)
45 Channel units. Default is None.
46 null : number (optional)
47 Channel null. Default is None (0).
48 signed : booelan (optional)
49 Channel signed flag. Default is None (guess).
50 label : string.
51 Label. Default is None.
52 label_seed : list of strings
53 Label seed. Default is None.
54 **kwargs
55 Additional keyword arguments are added to the attrs dictionary
56 and to the natural namespace of the object (if possible).
57 """
58 self._parent = parent
59 super().__init__(id)
60 self.label = label
61 self.label_seed = label_seed
62 self.units = units
63 self.dimensionality = len(self.shape)
64 # attrs
65 self.attrs.update(kwargs)
66 self.attrs["name"] = h5py.h5i.get_name(self.id).decode().split("/")[-1]
67 self.attrs["class"] = "Channel"
68 if signed is not None:
69 self.attrs["signed"] = signed
70 if null is not None:
71 self.attrs["null"] = null
72 for key, value in self.attrs.items():
73 identifier = wt_kit.string2identifier(key)
74 if not hasattr(self, identifier):
75 setattr(self, identifier, value)
76
77 @property
78 def major_extent(self) -> complex:
79 """Maximum deviation from null."""
80 return max((self.max() - self.null, self.null - self.min()))
81
82 @property
83 def minor_extent(self) -> complex:
84 """Minimum deviation from null."""
85 return min((self.max() - self.null, self.null - self.min()))
86
87 @property
88 def null(self) -> complex:
89 if "null" not in self.attrs.keys():
90 self.attrs["null"] = 0
91 return self.attrs["null"]
92
93 @null.setter
94 def null(self, value):
95 self.attrs["null"] = value
96
97 @property
98 def signed(self) -> bool:
99 if "signed" not in self.attrs.keys():
100 self.attrs["signed"] = False
101 return self.attrs["signed"]
102
103 @signed.setter
104 def signed(self, value):
105 self.attrs["signed"] = value
106
107 def mag(self) -> complex:
108 """Channel magnitude (maximum deviation from null)."""
109 return self.major_extent
110
111 def normalize(self, mag=1.):
112 """Normalize a Channel, set `null` to 0 and the mag to given value.
113
114 Parameters
115 ----------
116 mag : float (optional)
117 New value of mag. Default is 1.
118 """
119
120 def f(dataset, s, null, mag):
121 dataset[s] -= null
122 dataset[s] /= mag
123
124 if self.signed:
125 mag = self.mag() / mag
126 else:
127 mag = self.max() / mag
128 self.chunkwise(f, null=self.null, mag=mag)
129 self._null = 0
130
131 def trim(self, neighborhood, method="ztest", factor=3, replace="nan", verbose=True):
132 """Remove outliers from the dataset.
133
134 Identifies outliers by comparing each point to its
135 neighbors using a statistical test.
136
137 Parameters
138 ----------
139 neighborhood : list of integers
140 Size of the neighborhood in each dimension. Length of the list must
141 be equal to the dimensionality of the channel.
142 method : {'ztest'} (optional)
143 Statistical test used to detect outliers. Default is ztest.
144
145 ztest
146 Compare point deviation from neighborhood mean to neighborhood
147 standard deviation.
148
149 factor : number (optional)
150 Tolerance factor. Default is 3.
151 replace : {'nan', 'mean', 'mask', number} (optional)
152 Behavior of outlier replacement. Default is nan.
153
154 nan
155 Outliers are replaced by numpy nans.
156
157 mean
158 Outliers are replaced by the mean of its neighborhood.
159
160 mask
161 Array is masked at outliers.
162
163 number
164 Array becomes given number.
165
166 Returns
167 -------
168 list of tuples
169 Indicies of trimmed outliers.
170
171 See Also
172 --------
173 clip
174 Remove pixels outside of a certain range.
175 """
176 raise NotImplementedError
177 outliers = []
178 means = []
179 # find outliers
180 for idx in np.ndindex(self.shape):
181 slices = []
182 for i, di, size in zip(idx, neighborhood, self.shape):
183 start = max(0, i - di)
184 stop = min(size, i + di + 1)
185 slices.append(slice(start, stop, 1))
186 neighbors = self[slices]
187 mean = np.nanmean(neighbors)
188 limit = np.nanstd(neighbors) * factor
189 if np.abs(self[idx] - mean) > limit:
190 outliers.append(idx)
191 means.append(mean)
192 # replace outliers
193 i = tuple(zip(*outliers))
194 if replace == "nan":
195 self[i] = np.nan
196 elif replace == "mean":
197 self[i] = means
198 elif replace == "mask":
199 self[:] = np.ma.array(self[:])
200 self[i] = np.ma.masked
201 elif type(replace) in [int, float]:
202 self[i] = replace
203 else:
204 raise KeyError("replace must be one of {nan, mean, mask} or some number")
205 # finish
206 if verbose:
207 print("%i outliers removed" % len(outliers))
208 return outliers
209
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/WrightTools/data/_channel.py b/WrightTools/data/_channel.py
--- a/WrightTools/data/_channel.py
+++ b/WrightTools/data/_channel.py
@@ -8,8 +8,12 @@
import h5py
+import warnings
+import numbers
+
from .. import kit as wt_kit
from .._dataset import Dataset
+from .. import exceptions as wt_exceptions
__all__ = ["Channel"]
@@ -148,7 +152,7 @@
factor : number (optional)
Tolerance factor. Default is 3.
- replace : {'nan', 'mean', 'mask', number} (optional)
+ replace : {'nan', 'mean', number} (optional)
Behavior of outlier replacement. Default is nan.
nan
@@ -157,9 +161,6 @@
mean
Outliers are replaced by the mean of its neighborhood.
- mask
- Array is masked at outliers.
-
number
Array becomes given number.
@@ -173,7 +174,7 @@
clip
Remove pixels outside of a certain range.
"""
- raise NotImplementedError
+ warnings.warn("trim", category=wt_exceptions.EntireDatasetInMemoryWarning)
outliers = []
means = []
# find outliers
@@ -192,16 +193,19 @@
# replace outliers
i = tuple(zip(*outliers))
if replace == "nan":
- self[i] = np.nan
+ arr = self[:]
+ arr[i] = np.nan
+ self[:] = arr
elif replace == "mean":
- self[i] = means
- elif replace == "mask":
- self[:] = np.ma.array(self[:])
- self[i] = np.ma.masked
- elif type(replace) in [int, float]:
- self[i] = replace
+ arr = self[:]
+ arr[i] = means
+ self[:] = arr
+ elif isinstance(replace, numbers.Number):
+ arr = self[:]
+ arr[i] = replace
+ self[:] = arr
else:
- raise KeyError("replace must be one of {nan, mean, mask} or some number")
+ raise KeyError("replace must be one of {nan, mean} or some number")
# finish
if verbose:
print("%i outliers removed" % len(outliers))
| {"golden_diff": "diff --git a/WrightTools/data/_channel.py b/WrightTools/data/_channel.py\n--- a/WrightTools/data/_channel.py\n+++ b/WrightTools/data/_channel.py\n@@ -8,8 +8,12 @@\n \n import h5py\n \n+import warnings\n+import numbers\n+\n from .. import kit as wt_kit\n from .._dataset import Dataset\n+from .. import exceptions as wt_exceptions\n \n __all__ = [\"Channel\"]\n \n@@ -148,7 +152,7 @@\n \n factor : number (optional)\n Tolerance factor. Default is 3.\n- replace : {'nan', 'mean', 'mask', number} (optional)\n+ replace : {'nan', 'mean', number} (optional)\n Behavior of outlier replacement. Default is nan.\n \n nan\n@@ -157,9 +161,6 @@\n mean\n Outliers are replaced by the mean of its neighborhood.\n \n- mask\n- Array is masked at outliers.\n-\n number\n Array becomes given number.\n \n@@ -173,7 +174,7 @@\n clip\n Remove pixels outside of a certain range.\n \"\"\"\n- raise NotImplementedError\n+ warnings.warn(\"trim\", category=wt_exceptions.EntireDatasetInMemoryWarning)\n outliers = []\n means = []\n # find outliers\n@@ -192,16 +193,19 @@\n # replace outliers\n i = tuple(zip(*outliers))\n if replace == \"nan\":\n- self[i] = np.nan\n+ arr = self[:]\n+ arr[i] = np.nan\n+ self[:] = arr\n elif replace == \"mean\":\n- self[i] = means\n- elif replace == \"mask\":\n- self[:] = np.ma.array(self[:])\n- self[i] = np.ma.masked\n- elif type(replace) in [int, float]:\n- self[i] = replace\n+ arr = self[:]\n+ arr[i] = means\n+ self[:] = arr\n+ elif isinstance(replace, numbers.Number):\n+ arr = self[:]\n+ arr[i] = replace\n+ self[:] = arr\n else:\n- raise KeyError(\"replace must be one of {nan, mean, mask} or some number\")\n+ raise KeyError(\"replace must be one of {nan, mean} or some number\")\n # finish\n if verbose:\n print(\"%i outliers removed\" % len(outliers))\n", "issue": "Recover trim, a method of channel\n\n", "before_files": [{"content": "\"\"\"Channel class and associated.\"\"\"\n\n\n# --- import --------------------------------------------------------------------------------------\n\n\nimport numpy as np\n\nimport h5py\n\nfrom .. import kit as wt_kit\nfrom .._dataset import Dataset\n\n__all__ = [\"Channel\"]\n\n# --- class ---------------------------------------------------------------------------------------\n\n\nclass Channel(Dataset):\n \"\"\"Channel.\"\"\"\n\n class_name = \"Channel\"\n\n def __init__(\n self,\n parent,\n id,\n *,\n units=None,\n null=None,\n signed=None,\n label=None,\n label_seed=None,\n **kwargs\n ):\n \"\"\"Construct a channel object.\n\n Parameters\n ----------\n values : array-like\n Values.\n name : string\n Channel name.\n units : string (optional)\n Channel units. Default is None.\n null : number (optional)\n Channel null. Default is None (0).\n signed : booelan (optional)\n Channel signed flag. Default is None (guess).\n label : string.\n Label. Default is None.\n label_seed : list of strings\n Label seed. Default is None.\n **kwargs\n Additional keyword arguments are added to the attrs dictionary\n and to the natural namespace of the object (if possible).\n \"\"\"\n self._parent = parent\n super().__init__(id)\n self.label = label\n self.label_seed = label_seed\n self.units = units\n self.dimensionality = len(self.shape)\n # attrs\n self.attrs.update(kwargs)\n self.attrs[\"name\"] = h5py.h5i.get_name(self.id).decode().split(\"/\")[-1]\n self.attrs[\"class\"] = \"Channel\"\n if signed is not None:\n self.attrs[\"signed\"] = signed\n if null is not None:\n self.attrs[\"null\"] = null\n for key, value in self.attrs.items():\n identifier = wt_kit.string2identifier(key)\n if not hasattr(self, identifier):\n setattr(self, identifier, value)\n\n @property\n def major_extent(self) -> complex:\n \"\"\"Maximum deviation from null.\"\"\"\n return max((self.max() - self.null, self.null - self.min()))\n\n @property\n def minor_extent(self) -> complex:\n \"\"\"Minimum deviation from null.\"\"\"\n return min((self.max() - self.null, self.null - self.min()))\n\n @property\n def null(self) -> complex:\n if \"null\" not in self.attrs.keys():\n self.attrs[\"null\"] = 0\n return self.attrs[\"null\"]\n\n @null.setter\n def null(self, value):\n self.attrs[\"null\"] = value\n\n @property\n def signed(self) -> bool:\n if \"signed\" not in self.attrs.keys():\n self.attrs[\"signed\"] = False\n return self.attrs[\"signed\"]\n\n @signed.setter\n def signed(self, value):\n self.attrs[\"signed\"] = value\n\n def mag(self) -> complex:\n \"\"\"Channel magnitude (maximum deviation from null).\"\"\"\n return self.major_extent\n\n def normalize(self, mag=1.):\n \"\"\"Normalize a Channel, set `null` to 0 and the mag to given value.\n\n Parameters\n ----------\n mag : float (optional)\n New value of mag. Default is 1.\n \"\"\"\n\n def f(dataset, s, null, mag):\n dataset[s] -= null\n dataset[s] /= mag\n\n if self.signed:\n mag = self.mag() / mag\n else:\n mag = self.max() / mag\n self.chunkwise(f, null=self.null, mag=mag)\n self._null = 0\n\n def trim(self, neighborhood, method=\"ztest\", factor=3, replace=\"nan\", verbose=True):\n \"\"\"Remove outliers from the dataset.\n\n Identifies outliers by comparing each point to its\n neighbors using a statistical test.\n\n Parameters\n ----------\n neighborhood : list of integers\n Size of the neighborhood in each dimension. Length of the list must\n be equal to the dimensionality of the channel.\n method : {'ztest'} (optional)\n Statistical test used to detect outliers. Default is ztest.\n\n ztest\n Compare point deviation from neighborhood mean to neighborhood\n standard deviation.\n\n factor : number (optional)\n Tolerance factor. Default is 3.\n replace : {'nan', 'mean', 'mask', number} (optional)\n Behavior of outlier replacement. Default is nan.\n\n nan\n Outliers are replaced by numpy nans.\n\n mean\n Outliers are replaced by the mean of its neighborhood.\n\n mask\n Array is masked at outliers.\n\n number\n Array becomes given number.\n\n Returns\n -------\n list of tuples\n Indicies of trimmed outliers.\n\n See Also\n --------\n clip\n Remove pixels outside of a certain range.\n \"\"\"\n raise NotImplementedError\n outliers = []\n means = []\n # find outliers\n for idx in np.ndindex(self.shape):\n slices = []\n for i, di, size in zip(idx, neighborhood, self.shape):\n start = max(0, i - di)\n stop = min(size, i + di + 1)\n slices.append(slice(start, stop, 1))\n neighbors = self[slices]\n mean = np.nanmean(neighbors)\n limit = np.nanstd(neighbors) * factor\n if np.abs(self[idx] - mean) > limit:\n outliers.append(idx)\n means.append(mean)\n # replace outliers\n i = tuple(zip(*outliers))\n if replace == \"nan\":\n self[i] = np.nan\n elif replace == \"mean\":\n self[i] = means\n elif replace == \"mask\":\n self[:] = np.ma.array(self[:])\n self[i] = np.ma.masked\n elif type(replace) in [int, float]:\n self[i] = replace\n else:\n raise KeyError(\"replace must be one of {nan, mean, mask} or some number\")\n # finish\n if verbose:\n print(\"%i outliers removed\" % len(outliers))\n return outliers\n", "path": "WrightTools/data/_channel.py"}], "after_files": [{"content": "\"\"\"Channel class and associated.\"\"\"\n\n\n# --- import --------------------------------------------------------------------------------------\n\n\nimport numpy as np\n\nimport h5py\n\nimport warnings\nimport numbers\n\nfrom .. import kit as wt_kit\nfrom .._dataset import Dataset\nfrom .. import exceptions as wt_exceptions\n\n__all__ = [\"Channel\"]\n\n# --- class ---------------------------------------------------------------------------------------\n\n\nclass Channel(Dataset):\n \"\"\"Channel.\"\"\"\n\n class_name = \"Channel\"\n\n def __init__(\n self,\n parent,\n id,\n *,\n units=None,\n null=None,\n signed=None,\n label=None,\n label_seed=None,\n **kwargs\n ):\n \"\"\"Construct a channel object.\n\n Parameters\n ----------\n values : array-like\n Values.\n name : string\n Channel name.\n units : string (optional)\n Channel units. Default is None.\n null : number (optional)\n Channel null. Default is None (0).\n signed : booelan (optional)\n Channel signed flag. Default is None (guess).\n label : string.\n Label. Default is None.\n label_seed : list of strings\n Label seed. Default is None.\n **kwargs\n Additional keyword arguments are added to the attrs dictionary\n and to the natural namespace of the object (if possible).\n \"\"\"\n self._parent = parent\n super().__init__(id)\n self.label = label\n self.label_seed = label_seed\n self.units = units\n self.dimensionality = len(self.shape)\n # attrs\n self.attrs.update(kwargs)\n self.attrs[\"name\"] = h5py.h5i.get_name(self.id).decode().split(\"/\")[-1]\n self.attrs[\"class\"] = \"Channel\"\n if signed is not None:\n self.attrs[\"signed\"] = signed\n if null is not None:\n self.attrs[\"null\"] = null\n for key, value in self.attrs.items():\n identifier = wt_kit.string2identifier(key)\n if not hasattr(self, identifier):\n setattr(self, identifier, value)\n\n @property\n def major_extent(self) -> complex:\n \"\"\"Maximum deviation from null.\"\"\"\n return max((self.max() - self.null, self.null - self.min()))\n\n @property\n def minor_extent(self) -> complex:\n \"\"\"Minimum deviation from null.\"\"\"\n return min((self.max() - self.null, self.null - self.min()))\n\n @property\n def null(self) -> complex:\n if \"null\" not in self.attrs.keys():\n self.attrs[\"null\"] = 0\n return self.attrs[\"null\"]\n\n @null.setter\n def null(self, value):\n self.attrs[\"null\"] = value\n\n @property\n def signed(self) -> bool:\n if \"signed\" not in self.attrs.keys():\n self.attrs[\"signed\"] = False\n return self.attrs[\"signed\"]\n\n @signed.setter\n def signed(self, value):\n self.attrs[\"signed\"] = value\n\n def mag(self) -> complex:\n \"\"\"Channel magnitude (maximum deviation from null).\"\"\"\n return self.major_extent\n\n def normalize(self, mag=1.):\n \"\"\"Normalize a Channel, set `null` to 0 and the mag to given value.\n\n Parameters\n ----------\n mag : float (optional)\n New value of mag. Default is 1.\n \"\"\"\n\n def f(dataset, s, null, mag):\n dataset[s] -= null\n dataset[s] /= mag\n\n if self.signed:\n mag = self.mag() / mag\n else:\n mag = self.max() / mag\n self.chunkwise(f, null=self.null, mag=mag)\n self._null = 0\n\n def trim(self, neighborhood, method=\"ztest\", factor=3, replace=\"nan\", verbose=True):\n \"\"\"Remove outliers from the dataset.\n\n Identifies outliers by comparing each point to its\n neighbors using a statistical test.\n\n Parameters\n ----------\n neighborhood : list of integers\n Size of the neighborhood in each dimension. Length of the list must\n be equal to the dimensionality of the channel.\n method : {'ztest'} (optional)\n Statistical test used to detect outliers. Default is ztest.\n\n ztest\n Compare point deviation from neighborhood mean to neighborhood\n standard deviation.\n\n factor : number (optional)\n Tolerance factor. Default is 3.\n replace : {'nan', 'mean', number} (optional)\n Behavior of outlier replacement. Default is nan.\n\n nan\n Outliers are replaced by numpy nans.\n\n mean\n Outliers are replaced by the mean of its neighborhood.\n\n number\n Array becomes given number.\n\n Returns\n -------\n list of tuples\n Indicies of trimmed outliers.\n\n See Also\n --------\n clip\n Remove pixels outside of a certain range.\n \"\"\"\n warnings.warn(\"trim\", category=wt_exceptions.EntireDatasetInMemoryWarning)\n outliers = []\n means = []\n # find outliers\n for idx in np.ndindex(self.shape):\n slices = []\n for i, di, size in zip(idx, neighborhood, self.shape):\n start = max(0, i - di)\n stop = min(size, i + di + 1)\n slices.append(slice(start, stop, 1))\n neighbors = self[slices]\n mean = np.nanmean(neighbors)\n limit = np.nanstd(neighbors) * factor\n if np.abs(self[idx] - mean) > limit:\n outliers.append(idx)\n means.append(mean)\n # replace outliers\n i = tuple(zip(*outliers))\n if replace == \"nan\":\n arr = self[:]\n arr[i] = np.nan\n self[:] = arr\n elif replace == \"mean\":\n arr = self[:]\n arr[i] = means\n self[:] = arr\n elif isinstance(replace, numbers.Number):\n arr = self[:]\n arr[i] = replace\n self[:] = arr\n else:\n raise KeyError(\"replace must be one of {nan, mean} or some number\")\n # finish\n if verbose:\n print(\"%i outliers removed\" % len(outliers))\n return outliers\n", "path": "WrightTools/data/_channel.py"}]} | 2,103 | 544 |
gh_patches_debug_19530 | rasdani/github-patches | git_diff | mozmeao__snippets-service-995 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix NR reporting
See https://github.com/mozmeao/infra/issues/1106
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `snippets/wsgi/app.py`
Content:
```
1 """
2 WSGI config for snippets project.
3
4 It exposes the WSGI callable as a module-level variable named ``application``.
5
6 For more information on this file, see
7 https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
8 """
9 import os
10 os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'snippets.settings') # NOQA
11
12 from django.core.wsgi import get_wsgi_application
13
14 import newrelic.agent
15 from decouple import config
16 from raven.contrib.django.raven_compat.middleware.wsgi import Sentry
17
18 application = get_wsgi_application()
19
20 application = Sentry(application)
21
22 # Add NewRelic
23 newrelic_ini = config('NEW_RELIC_CONFIG_FILE', default='newrelic.ini')
24 newrelic_license_key = config('NEW_RELIC_LICENSE_KEY', default=None)
25 if newrelic_ini and newrelic_license_key:
26 newrelic.agent.initialize(newrelic_ini)
27 application = newrelic.agent.wsgi_application()(application)
28
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/snippets/wsgi/app.py b/snippets/wsgi/app.py
--- a/snippets/wsgi/app.py
+++ b/snippets/wsgi/app.py
@@ -6,22 +6,14 @@
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
-import os
-os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'snippets.settings') # NOQA
-
-from django.core.wsgi import get_wsgi_application
-
import newrelic.agent
-from decouple import config
-from raven.contrib.django.raven_compat.middleware.wsgi import Sentry
+newrelic.agent.initialize('newrelic.ini')
+import os # NOQA
+os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'snippets.settings') # NOQA
+
+from django.core.wsgi import get_wsgi_application # NOQA
application = get_wsgi_application()
+from raven.contrib.django.raven_compat.middleware.wsgi import Sentry # NOQA
application = Sentry(application)
-
-# Add NewRelic
-newrelic_ini = config('NEW_RELIC_CONFIG_FILE', default='newrelic.ini')
-newrelic_license_key = config('NEW_RELIC_LICENSE_KEY', default=None)
-if newrelic_ini and newrelic_license_key:
- newrelic.agent.initialize(newrelic_ini)
- application = newrelic.agent.wsgi_application()(application)
| {"golden_diff": "diff --git a/snippets/wsgi/app.py b/snippets/wsgi/app.py\n--- a/snippets/wsgi/app.py\n+++ b/snippets/wsgi/app.py\n@@ -6,22 +6,14 @@\n For more information on this file, see\n https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/\n \"\"\"\n-import os\n-os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'snippets.settings') # NOQA\n-\n-from django.core.wsgi import get_wsgi_application\n-\n import newrelic.agent\n-from decouple import config\n-from raven.contrib.django.raven_compat.middleware.wsgi import Sentry\n+newrelic.agent.initialize('newrelic.ini')\n \n+import os # NOQA\n+os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'snippets.settings') # NOQA\n+\n+from django.core.wsgi import get_wsgi_application # NOQA\n application = get_wsgi_application()\n \n+from raven.contrib.django.raven_compat.middleware.wsgi import Sentry # NOQA\n application = Sentry(application)\n-\n-# Add NewRelic\n-newrelic_ini = config('NEW_RELIC_CONFIG_FILE', default='newrelic.ini')\n-newrelic_license_key = config('NEW_RELIC_LICENSE_KEY', default=None)\n-if newrelic_ini and newrelic_license_key:\n- newrelic.agent.initialize(newrelic_ini)\n- application = newrelic.agent.wsgi_application()(application)\n", "issue": "Fix NR reporting\nSee https://github.com/mozmeao/infra/issues/1106\n", "before_files": [{"content": "\"\"\"\nWSGI config for snippets project.\n\nIt exposes the WSGI callable as a module-level variable named ``application``.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/\n\"\"\"\nimport os\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'snippets.settings') # NOQA\n\nfrom django.core.wsgi import get_wsgi_application\n\nimport newrelic.agent\nfrom decouple import config\nfrom raven.contrib.django.raven_compat.middleware.wsgi import Sentry\n\napplication = get_wsgi_application()\n\napplication = Sentry(application)\n\n# Add NewRelic\nnewrelic_ini = config('NEW_RELIC_CONFIG_FILE', default='newrelic.ini')\nnewrelic_license_key = config('NEW_RELIC_LICENSE_KEY', default=None)\nif newrelic_ini and newrelic_license_key:\n newrelic.agent.initialize(newrelic_ini)\n application = newrelic.agent.wsgi_application()(application)\n", "path": "snippets/wsgi/app.py"}], "after_files": [{"content": "\"\"\"\nWSGI config for snippets project.\n\nIt exposes the WSGI callable as a module-level variable named ``application``.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/\n\"\"\"\nimport newrelic.agent\nnewrelic.agent.initialize('newrelic.ini')\n\nimport os # NOQA\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'snippets.settings') # NOQA\n\nfrom django.core.wsgi import get_wsgi_application # NOQA\napplication = get_wsgi_application()\n\nfrom raven.contrib.django.raven_compat.middleware.wsgi import Sentry # NOQA\napplication = Sentry(application)\n", "path": "snippets/wsgi/app.py"}]} | 540 | 311 |
gh_patches_debug_16876 | rasdani/github-patches | git_diff | chainer__chainer-1355 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Possibly wrong doc or code for deconvolution cover_all
The documentation says:
```
cover_all (bool): If True, all spatial locations are convoluted into
some output pixels. It may make the output size larger.
```
However, when I prepare a small toy example, the output is larger when `cover_all=True`. I feel like either the code or the documentation needs to be inverted.
See an [gist notebook](https://gist.github.com/LukasDrude/8a9ebbaa3a6ba4ae0e2bef611afefd5a) for the toy example or the attached screenshot. I had set the weight matrices to ones and disabled normalization for clarity.

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `chainer/functions/pooling/unpooling_2d.py`
Content:
```
1 from chainer import cuda
2 from chainer.functions.pooling import pooling_2d
3 from chainer.utils import conv
4 from chainer.utils import type_check
5
6
7 class Unpooling2D(pooling_2d.Pooling2D):
8
9 """Unpooling over a set of 2d planes."""
10
11 def __init__(self, ksize, stride=None, pad=0,
12 outsize=None, cover_all=True):
13 super(Unpooling2D, self).__init__(ksize, stride, pad, cover_all)
14 self.outh, self.outw = (None, None) if outsize is None else outsize
15
16 def check_type_forward(self, in_types):
17 n_in = in_types.size()
18 type_check.expect(n_in == 1)
19 x_type = in_types[0]
20
21 type_check.expect(
22 x_type.dtype.kind == 'f',
23 x_type.ndim == 4,
24 )
25
26 if self.outh is not None:
27 expected_h = conv.get_conv_outsize(
28 self.outh, self.kh, self.sy, self.ph, cover_all=self.cover_all)
29 type_check.expect(x_type.shape[2] == expected_h)
30 if self.outw is not None:
31 expected_w = conv.get_conv_outsize(
32 self.outw, self.kw, self.sx, self.pw, cover_all=self.cover_all)
33 type_check.expect(x_type.shape[3] == expected_w)
34
35 def forward(self, x):
36 h, w = x[0].shape[2:]
37 if self.outh is None:
38 self.outh = conv.get_deconv_outsize(
39 h, self.kh, self.sy, self.ph, cover_all=self.cover_all)
40 if self.outw is None:
41 self.outw = conv.get_deconv_outsize(
42 w, self.kw, self.sx, self.pw, cover_all=self.cover_all)
43 xp = cuda.get_array_module(*x)
44 col = xp.tile(x[0][:, :, None, None],
45 (1, 1, self.kh, self.kw, 1, 1))
46 if isinstance(x[0], cuda.ndarray):
47 y = conv.col2im_gpu(col, self.sy, self.sx, self.ph, self.pw,
48 self.outh, self.outw)
49 else:
50 y = conv.col2im_cpu(col, self.sy, self.sx, self.ph, self.pw,
51 self.outh, self.outw)
52 return y,
53
54 def backward(self, x, gy):
55 if isinstance(gy[0], cuda.ndarray):
56 gcol = conv.im2col_gpu(
57 gy[0], self.kh, self.kw, self.sy, self.sx, self.ph, self.pw,
58 cover_all=self.cover_all)
59 else:
60 gcol = conv.im2col_cpu(
61 gy[0], self.kh, self.kw, self.sy, self.sx, self.ph, self.pw,
62 cover_all=self.cover_all)
63 gx = gcol.sum(axis=(2, 3))
64 return gx,
65
66
67 def unpooling_2d(x, ksize, stride=None, pad=0, outsize=None, cover_all=True):
68 """Inverse operation of pooling for 2d array.
69
70 This function acts similarly to :class:`~functions.Deconvolution2D`, but
71 it spreads input 2d array's value without any parameter instead of
72 computing the inner products.
73
74 Args:
75 x (~chainer.Variable): Input variable.
76 ksize (int or pair of ints): Size of pooling window. ``ksize=k`` and
77 ``ksize=(k, k)`` are equivalent.
78 stride (int, pair of ints or None): Stride of pooling applications.
79 ``stride=s`` and ``stride=(s, s)`` are equivalent. If ``None`` is
80 specified, then it uses same stride as the pooling window size.
81 pad (int or pair of ints): Spatial padding width for the input array.
82 ``pad=p`` and ``pad=(p, p)`` are equivalent.
83 outsize (None or pair of ints): Expected output size (height, width)
84 of array after the operation. If ``None``, the size
85 (height or width) is estimated from the size of input array
86 in first batch with
87 :func:`~chainer.utils.conv.get_deconv_outsize`.
88 If outsize is not ``None``, the result of outsize applied to
89 :func:`~chainer.utils.conv.get_conv_outsize` must be equal to
90 the shape of the 2d array in the input batch ``x``.
91 cover_all (bool): If ``True``, all spatial locations are pooled
92 into some output pixels, and the output size is larger than that
93 when cover_all is ``False``.
94
95 Returns:
96 ~chainer.Variable: Output variable.
97
98 """
99 return Unpooling2D(ksize, stride, pad, outsize, cover_all)(x)
100
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/chainer/functions/pooling/unpooling_2d.py b/chainer/functions/pooling/unpooling_2d.py
--- a/chainer/functions/pooling/unpooling_2d.py
+++ b/chainer/functions/pooling/unpooling_2d.py
@@ -88,9 +88,12 @@
If outsize is not ``None``, the result of outsize applied to
:func:`~chainer.utils.conv.get_conv_outsize` must be equal to
the shape of the 2d array in the input batch ``x``.
- cover_all (bool): If ``True``, all spatial locations are pooled
- into some output pixels, and the output size is larger than that
- when cover_all is ``False``.
+ cover_all (bool): If ``True``, the output size may be smaller than
+ the size if ``cover_all`` is ``False``. This flag serves to
+ align behavior to the pooling functions which can cover all
+ input locations, see :func:`~chainer.functions.max_pooling_2d`
+ and :func:`~chainer.functions.convolution_2d`.
+
Returns:
~chainer.Variable: Output variable.
| {"golden_diff": "diff --git a/chainer/functions/pooling/unpooling_2d.py b/chainer/functions/pooling/unpooling_2d.py\n--- a/chainer/functions/pooling/unpooling_2d.py\n+++ b/chainer/functions/pooling/unpooling_2d.py\n@@ -88,9 +88,12 @@\n If outsize is not ``None``, the result of outsize applied to\n :func:`~chainer.utils.conv.get_conv_outsize` must be equal to\n the shape of the 2d array in the input batch ``x``.\n- cover_all (bool): If ``True``, all spatial locations are pooled\n- into some output pixels, and the output size is larger than that\n- when cover_all is ``False``.\n+ cover_all (bool): If ``True``, the output size may be smaller than\n+ the size if ``cover_all`` is ``False``. This flag serves to\n+ align behavior to the pooling functions which can cover all\n+ input locations, see :func:`~chainer.functions.max_pooling_2d`\n+ and :func:`~chainer.functions.convolution_2d`.\n+\n \n Returns:\n ~chainer.Variable: Output variable.\n", "issue": "Possibly wrong doc or code for deconvolution cover_all\nThe documentation says:\n\n```\ncover_all (bool): If True, all spatial locations are convoluted into\n some output pixels. It may make the output size larger.\n```\n\nHowever, when I prepare a small toy example, the output is larger when `cover_all=True`. I feel like either the code or the documentation needs to be inverted.\n\nSee an [gist notebook](https://gist.github.com/LukasDrude/8a9ebbaa3a6ba4ae0e2bef611afefd5a) for the toy example or the attached screenshot. I had set the weight matrices to ones and disabled normalization for clarity.\n\n\n\n", "before_files": [{"content": "from chainer import cuda\nfrom chainer.functions.pooling import pooling_2d\nfrom chainer.utils import conv\nfrom chainer.utils import type_check\n\n\nclass Unpooling2D(pooling_2d.Pooling2D):\n\n \"\"\"Unpooling over a set of 2d planes.\"\"\"\n\n def __init__(self, ksize, stride=None, pad=0,\n outsize=None, cover_all=True):\n super(Unpooling2D, self).__init__(ksize, stride, pad, cover_all)\n self.outh, self.outw = (None, None) if outsize is None else outsize\n\n def check_type_forward(self, in_types):\n n_in = in_types.size()\n type_check.expect(n_in == 1)\n x_type = in_types[0]\n\n type_check.expect(\n x_type.dtype.kind == 'f',\n x_type.ndim == 4,\n )\n\n if self.outh is not None:\n expected_h = conv.get_conv_outsize(\n self.outh, self.kh, self.sy, self.ph, cover_all=self.cover_all)\n type_check.expect(x_type.shape[2] == expected_h)\n if self.outw is not None:\n expected_w = conv.get_conv_outsize(\n self.outw, self.kw, self.sx, self.pw, cover_all=self.cover_all)\n type_check.expect(x_type.shape[3] == expected_w)\n\n def forward(self, x):\n h, w = x[0].shape[2:]\n if self.outh is None:\n self.outh = conv.get_deconv_outsize(\n h, self.kh, self.sy, self.ph, cover_all=self.cover_all)\n if self.outw is None:\n self.outw = conv.get_deconv_outsize(\n w, self.kw, self.sx, self.pw, cover_all=self.cover_all)\n xp = cuda.get_array_module(*x)\n col = xp.tile(x[0][:, :, None, None],\n (1, 1, self.kh, self.kw, 1, 1))\n if isinstance(x[0], cuda.ndarray):\n y = conv.col2im_gpu(col, self.sy, self.sx, self.ph, self.pw,\n self.outh, self.outw)\n else:\n y = conv.col2im_cpu(col, self.sy, self.sx, self.ph, self.pw,\n self.outh, self.outw)\n return y,\n\n def backward(self, x, gy):\n if isinstance(gy[0], cuda.ndarray):\n gcol = conv.im2col_gpu(\n gy[0], self.kh, self.kw, self.sy, self.sx, self.ph, self.pw,\n cover_all=self.cover_all)\n else:\n gcol = conv.im2col_cpu(\n gy[0], self.kh, self.kw, self.sy, self.sx, self.ph, self.pw,\n cover_all=self.cover_all)\n gx = gcol.sum(axis=(2, 3))\n return gx,\n\n\ndef unpooling_2d(x, ksize, stride=None, pad=0, outsize=None, cover_all=True):\n \"\"\"Inverse operation of pooling for 2d array.\n\n This function acts similarly to :class:`~functions.Deconvolution2D`, but\n it spreads input 2d array's value without any parameter instead of\n computing the inner products.\n\n Args:\n x (~chainer.Variable): Input variable.\n ksize (int or pair of ints): Size of pooling window. ``ksize=k`` and\n ``ksize=(k, k)`` are equivalent.\n stride (int, pair of ints or None): Stride of pooling applications.\n ``stride=s`` and ``stride=(s, s)`` are equivalent. If ``None`` is\n specified, then it uses same stride as the pooling window size.\n pad (int or pair of ints): Spatial padding width for the input array.\n ``pad=p`` and ``pad=(p, p)`` are equivalent.\n outsize (None or pair of ints): Expected output size (height, width)\n of array after the operation. If ``None``, the size\n (height or width) is estimated from the size of input array\n in first batch with\n :func:`~chainer.utils.conv.get_deconv_outsize`.\n If outsize is not ``None``, the result of outsize applied to\n :func:`~chainer.utils.conv.get_conv_outsize` must be equal to\n the shape of the 2d array in the input batch ``x``.\n cover_all (bool): If ``True``, all spatial locations are pooled\n into some output pixels, and the output size is larger than that\n when cover_all is ``False``.\n\n Returns:\n ~chainer.Variable: Output variable.\n\n \"\"\"\n return Unpooling2D(ksize, stride, pad, outsize, cover_all)(x)\n", "path": "chainer/functions/pooling/unpooling_2d.py"}], "after_files": [{"content": "from chainer import cuda\nfrom chainer.functions.pooling import pooling_2d\nfrom chainer.utils import conv\nfrom chainer.utils import type_check\n\n\nclass Unpooling2D(pooling_2d.Pooling2D):\n\n \"\"\"Unpooling over a set of 2d planes.\"\"\"\n\n def __init__(self, ksize, stride=None, pad=0,\n outsize=None, cover_all=True):\n super(Unpooling2D, self).__init__(ksize, stride, pad, cover_all)\n self.outh, self.outw = (None, None) if outsize is None else outsize\n\n def check_type_forward(self, in_types):\n n_in = in_types.size()\n type_check.expect(n_in == 1)\n x_type = in_types[0]\n\n type_check.expect(\n x_type.dtype.kind == 'f',\n x_type.ndim == 4,\n )\n\n if self.outh is not None:\n expected_h = conv.get_conv_outsize(\n self.outh, self.kh, self.sy, self.ph, cover_all=self.cover_all)\n type_check.expect(x_type.shape[2] == expected_h)\n if self.outw is not None:\n expected_w = conv.get_conv_outsize(\n self.outw, self.kw, self.sx, self.pw, cover_all=self.cover_all)\n type_check.expect(x_type.shape[3] == expected_w)\n\n def forward(self, x):\n h, w = x[0].shape[2:]\n if self.outh is None:\n self.outh = conv.get_deconv_outsize(\n h, self.kh, self.sy, self.ph, cover_all=self.cover_all)\n if self.outw is None:\n self.outw = conv.get_deconv_outsize(\n w, self.kw, self.sx, self.pw, cover_all=self.cover_all)\n xp = cuda.get_array_module(*x)\n col = xp.tile(x[0][:, :, None, None],\n (1, 1, self.kh, self.kw, 1, 1))\n if isinstance(x[0], cuda.ndarray):\n y = conv.col2im_gpu(col, self.sy, self.sx, self.ph, self.pw,\n self.outh, self.outw)\n else:\n y = conv.col2im_cpu(col, self.sy, self.sx, self.ph, self.pw,\n self.outh, self.outw)\n return y,\n\n def backward(self, x, gy):\n if isinstance(gy[0], cuda.ndarray):\n gcol = conv.im2col_gpu(\n gy[0], self.kh, self.kw, self.sy, self.sx, self.ph, self.pw,\n cover_all=self.cover_all)\n else:\n gcol = conv.im2col_cpu(\n gy[0], self.kh, self.kw, self.sy, self.sx, self.ph, self.pw,\n cover_all=self.cover_all)\n gx = gcol.sum(axis=(2, 3))\n return gx,\n\n\ndef unpooling_2d(x, ksize, stride=None, pad=0, outsize=None, cover_all=True):\n \"\"\"Inverse operation of pooling for 2d array.\n\n This function acts similarly to :class:`~functions.Deconvolution2D`, but\n it spreads input 2d array's value without any parameter instead of\n computing the inner products.\n\n Args:\n x (~chainer.Variable): Input variable.\n ksize (int or pair of ints): Size of pooling window. ``ksize=k`` and\n ``ksize=(k, k)`` are equivalent.\n stride (int, pair of ints or None): Stride of pooling applications.\n ``stride=s`` and ``stride=(s, s)`` are equivalent. If ``None`` is\n specified, then it uses same stride as the pooling window size.\n pad (int or pair of ints): Spatial padding width for the input array.\n ``pad=p`` and ``pad=(p, p)`` are equivalent.\n outsize (None or pair of ints): Expected output size (height, width)\n of array after the operation. If ``None``, the size\n (height or width) is estimated from the size of input array\n in first batch with\n :func:`~chainer.utils.conv.get_deconv_outsize`.\n If outsize is not ``None``, the result of outsize applied to\n :func:`~chainer.utils.conv.get_conv_outsize` must be equal to\n the shape of the 2d array in the input batch ``x``.\n cover_all (bool): If ``True``, the output size may be smaller than\n the size if ``cover_all`` is ``False``. This flag serves to\n align behavior to the pooling functions which can cover all\n input locations, see :func:`~chainer.functions.max_pooling_2d`\n and :func:`~chainer.functions.convolution_2d`.\n\n\n Returns:\n ~chainer.Variable: Output variable.\n\n \"\"\"\n return Unpooling2D(ksize, stride, pad, outsize, cover_all)(x)\n", "path": "chainer/functions/pooling/unpooling_2d.py"}]} | 1,780 | 271 |
gh_patches_debug_25164 | rasdani/github-patches | git_diff | Kinto__kinto-930 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
'set' object has no attribute extends
```kinto\/views\/permissions.py\", line 107, in get_records\n perms_by_object_uri.setdefault(subobj_uri, []).extend(resource_perms)\nAttributeError: 'set' object has no attribute 'extend'"```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kinto/views/permissions.py`
Content:
```
1 import colander
2 from pyramid.security import NO_PERMISSION_REQUIRED, Authenticated
3 from pyramid.settings import aslist
4
5 from kinto.authorization import PERMISSIONS_INHERITANCE_TREE
6 from kinto.core import utils as core_utils, resource
7 from kinto.core.storage.memory import extract_record_set
8
9
10 def allowed_from_settings(settings, principals):
11 """Returns every permissions allowed from settings for the current user.
12 :param settings dict: app settings
13 :param principals list: list of principals of current user
14 :rtype: dict
15
16 Result example::
17
18 {
19 "bucket": {"write", "collection:create"},
20 "collection": {"read"}
21 }
22
23 XXX: This helper will be useful for Kinto/kinto#894
24 """
25 perms_settings = {k: aslist(v) for k, v in settings.items()
26 if k.endswith('_principals')}
27 from_settings = {}
28 for key, allowed_principals in perms_settings.items():
29 resource_name, permission, _ = key.split('_')
30 # Keep the known permissions only.
31 if resource_name not in PERMISSIONS_INHERITANCE_TREE.keys():
32 continue
33 # Keep the permissions of the current user only.
34 if not bool(set(principals) & set(allowed_principals)):
35 continue
36 # ``collection_create_principals`` means ``collection:create`` in bucket.
37 if permission == 'create':
38 permission = '%s:%s' % (resource_name, permission)
39 resource_name = { # resource parents.
40 'bucket': '',
41 'collection': 'bucket',
42 'group': 'bucket',
43 'record': 'collection'}[resource_name]
44 # Store them in a convenient way.
45 from_settings.setdefault(resource_name, set()).add(permission)
46 return from_settings
47
48
49 class PermissionsModel(object):
50 id_field = 'id'
51 modified_field = 'last_modified'
52 deleted_field = 'deleted'
53
54 def __init__(self, request):
55 self.request = request
56
57 def get_records(self, filters=None, sorting=None, pagination_rules=None,
58 limit=None, include_deleted=False, parent_id=None):
59 # Invert the permissions inheritance tree.
60 perms_descending_tree = {}
61 for on_resource, tree in PERMISSIONS_INHERITANCE_TREE.items():
62 for obtained_perm, obtained_from in tree.items():
63 for from_resource, perms in obtained_from.items():
64 for perm in perms:
65 perms_descending_tree.setdefault(from_resource, {})\
66 .setdefault(perm, {})\
67 .setdefault(on_resource, set())\
68 .add(obtained_perm)
69
70 # Obtain current principals.
71 principals = self.request.effective_principals
72 if Authenticated in principals:
73 # Since this view does not require any permission (can be used to
74 # obtain public users permissions), we have to add the prefixed
75 # userid among the principals
76 # (see :mod:`kinto.core.authentication`)
77 userid = self.request.prefixed_userid
78 principals.append(userid)
79
80 # Query every possible permission of the current user from backend.
81 backend = self.request.registry.permission
82 perms_by_object_uri = backend.get_accessible_objects(principals)
83
84 # Check settings for every allowed resources.
85 from_settings = allowed_from_settings(self.request.registry.settings, principals)
86
87 # Expand permissions obtained from backend with the object URIs that
88 # correspond to permissions allowed from settings.
89 allowed_resources = {'bucket', 'collection', 'group'} & set(from_settings.keys())
90 if allowed_resources:
91 storage = self.request.registry.storage
92 every_bucket, _ = storage.get_all(parent_id='', collection_id='bucket')
93 for bucket in every_bucket:
94 bucket_uri = '/buckets/{id}'.format(**bucket)
95 for res in allowed_resources:
96 resource_perms = from_settings[res]
97 # Bucket is always fetched.
98 if res == 'bucket':
99 perms_by_object_uri.setdefault(bucket_uri, []).extend(resource_perms)
100 continue
101 # Fetch bucket collections and groups.
102 # XXX: wrong approach: query in a loop!
103 every_subobjects, _ = storage.get_all(parent_id=bucket_uri,
104 collection_id=res)
105 for subobject in every_subobjects:
106 subobj_uri = bucket_uri + '/{0}s/{1}'.format(res, subobject['id'])
107 perms_by_object_uri.setdefault(subobj_uri, []).extend(resource_perms)
108
109 entries = []
110 for object_uri, perms in perms_by_object_uri.items():
111 try:
112 # Obtain associated res from object URI
113 resource_name, matchdict = core_utils.view_lookup(self.request,
114 object_uri)
115 except ValueError:
116 # Skip permissions entries that are not linked to an object URI
117 continue
118
119 # For consistency with event payloads, prefix id with resource name
120 matchdict[resource_name + '_id'] = matchdict.get('id')
121
122 # Expand implicit permissions using descending tree.
123 permissions = set(perms)
124 for perm in perms:
125 obtained = perms_descending_tree[resource_name][perm]
126 # Related to same resource only and not every sub-objects.
127 # (e.g "bucket:write" gives "bucket:read" but not "group:read")
128 permissions |= obtained[resource_name]
129
130 entry = dict(uri=object_uri,
131 resource_name=resource_name,
132 permissions=list(permissions),
133 **matchdict)
134 entries.append(entry)
135
136 return extract_record_set(entries, filters=filters, sorting=sorting,
137 pagination_rules=pagination_rules,
138 limit=limit)
139
140
141 class PermissionsSchema(resource.ResourceSchema):
142 uri = colander.SchemaNode(colander.String())
143 resource_name = colander.SchemaNode(colander.String())
144 permissions = colander.Sequence(colander.SchemaNode(colander.String()))
145 bucket_id = colander.SchemaNode(colander.String())
146 collection_id = colander.SchemaNode(colander.String(),
147 missing=colander.drop)
148 group_id = colander.SchemaNode(colander.String(),
149 missing=colander.drop)
150 record_id = colander.SchemaNode(colander.String(),
151 missing=colander.drop)
152
153 class Options:
154 preserve_unknown = False
155
156
157 @resource.register(name='permissions',
158 description='List of user permissions',
159 collection_path='/permissions',
160 record_path=None,
161 collection_methods=('GET',),
162 permission=NO_PERMISSION_REQUIRED)
163 class Permissions(resource.ShareableResource):
164
165 schema = PermissionsSchema
166
167 def __init__(self, request, context=None):
168 super(Permissions, self).__init__(request, context)
169 self.model = PermissionsModel(request)
170
171 def _extract_sorting(self, limit):
172 # Permissions entries are not stored with timestamp, so do not
173 # force it.
174 result = super(Permissions, self)._extract_sorting(limit)
175 without_last_modified = [s for s in result
176 if s.field != self.model.modified_field]
177 return without_last_modified
178
179 def _extract_filters(self, queryparams=None):
180 result = super(Permissions, self)._extract_filters(queryparams)
181 without_last_modified = [s for s in result
182 if s.field != self.model.modified_field]
183 return without_last_modified
184
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kinto/views/permissions.py b/kinto/views/permissions.py
--- a/kinto/views/permissions.py
+++ b/kinto/views/permissions.py
@@ -96,7 +96,7 @@
resource_perms = from_settings[res]
# Bucket is always fetched.
if res == 'bucket':
- perms_by_object_uri.setdefault(bucket_uri, []).extend(resource_perms)
+ perms_by_object_uri.setdefault(bucket_uri, set()).update(resource_perms)
continue
# Fetch bucket collections and groups.
# XXX: wrong approach: query in a loop!
@@ -104,7 +104,7 @@
collection_id=res)
for subobject in every_subobjects:
subobj_uri = bucket_uri + '/{0}s/{1}'.format(res, subobject['id'])
- perms_by_object_uri.setdefault(subobj_uri, []).extend(resource_perms)
+ perms_by_object_uri.setdefault(subobj_uri, set()).update(resource_perms)
entries = []
for object_uri, perms in perms_by_object_uri.items():
| {"golden_diff": "diff --git a/kinto/views/permissions.py b/kinto/views/permissions.py\n--- a/kinto/views/permissions.py\n+++ b/kinto/views/permissions.py\n@@ -96,7 +96,7 @@\n resource_perms = from_settings[res]\n # Bucket is always fetched.\n if res == 'bucket':\n- perms_by_object_uri.setdefault(bucket_uri, []).extend(resource_perms)\n+ perms_by_object_uri.setdefault(bucket_uri, set()).update(resource_perms)\n continue\n # Fetch bucket collections and groups.\n # XXX: wrong approach: query in a loop!\n@@ -104,7 +104,7 @@\n collection_id=res)\n for subobject in every_subobjects:\n subobj_uri = bucket_uri + '/{0}s/{1}'.format(res, subobject['id'])\n- perms_by_object_uri.setdefault(subobj_uri, []).extend(resource_perms)\n+ perms_by_object_uri.setdefault(subobj_uri, set()).update(resource_perms)\n \n entries = []\n for object_uri, perms in perms_by_object_uri.items():\n", "issue": "'set' object has no attribute extends\n```kinto\\/views\\/permissions.py\\\", line 107, in get_records\\n perms_by_object_uri.setdefault(subobj_uri, []).extend(resource_perms)\\nAttributeError: 'set' object has no attribute 'extend'\"```\r\n\r\n\n", "before_files": [{"content": "import colander\nfrom pyramid.security import NO_PERMISSION_REQUIRED, Authenticated\nfrom pyramid.settings import aslist\n\nfrom kinto.authorization import PERMISSIONS_INHERITANCE_TREE\nfrom kinto.core import utils as core_utils, resource\nfrom kinto.core.storage.memory import extract_record_set\n\n\ndef allowed_from_settings(settings, principals):\n \"\"\"Returns every permissions allowed from settings for the current user.\n :param settings dict: app settings\n :param principals list: list of principals of current user\n :rtype: dict\n\n Result example::\n\n {\n \"bucket\": {\"write\", \"collection:create\"},\n \"collection\": {\"read\"}\n }\n\n XXX: This helper will be useful for Kinto/kinto#894\n \"\"\"\n perms_settings = {k: aslist(v) for k, v in settings.items()\n if k.endswith('_principals')}\n from_settings = {}\n for key, allowed_principals in perms_settings.items():\n resource_name, permission, _ = key.split('_')\n # Keep the known permissions only.\n if resource_name not in PERMISSIONS_INHERITANCE_TREE.keys():\n continue\n # Keep the permissions of the current user only.\n if not bool(set(principals) & set(allowed_principals)):\n continue\n # ``collection_create_principals`` means ``collection:create`` in bucket.\n if permission == 'create':\n permission = '%s:%s' % (resource_name, permission)\n resource_name = { # resource parents.\n 'bucket': '',\n 'collection': 'bucket',\n 'group': 'bucket',\n 'record': 'collection'}[resource_name]\n # Store them in a convenient way.\n from_settings.setdefault(resource_name, set()).add(permission)\n return from_settings\n\n\nclass PermissionsModel(object):\n id_field = 'id'\n modified_field = 'last_modified'\n deleted_field = 'deleted'\n\n def __init__(self, request):\n self.request = request\n\n def get_records(self, filters=None, sorting=None, pagination_rules=None,\n limit=None, include_deleted=False, parent_id=None):\n # Invert the permissions inheritance tree.\n perms_descending_tree = {}\n for on_resource, tree in PERMISSIONS_INHERITANCE_TREE.items():\n for obtained_perm, obtained_from in tree.items():\n for from_resource, perms in obtained_from.items():\n for perm in perms:\n perms_descending_tree.setdefault(from_resource, {})\\\n .setdefault(perm, {})\\\n .setdefault(on_resource, set())\\\n .add(obtained_perm)\n\n # Obtain current principals.\n principals = self.request.effective_principals\n if Authenticated in principals:\n # Since this view does not require any permission (can be used to\n # obtain public users permissions), we have to add the prefixed\n # userid among the principals\n # (see :mod:`kinto.core.authentication`)\n userid = self.request.prefixed_userid\n principals.append(userid)\n\n # Query every possible permission of the current user from backend.\n backend = self.request.registry.permission\n perms_by_object_uri = backend.get_accessible_objects(principals)\n\n # Check settings for every allowed resources.\n from_settings = allowed_from_settings(self.request.registry.settings, principals)\n\n # Expand permissions obtained from backend with the object URIs that\n # correspond to permissions allowed from settings.\n allowed_resources = {'bucket', 'collection', 'group'} & set(from_settings.keys())\n if allowed_resources:\n storage = self.request.registry.storage\n every_bucket, _ = storage.get_all(parent_id='', collection_id='bucket')\n for bucket in every_bucket:\n bucket_uri = '/buckets/{id}'.format(**bucket)\n for res in allowed_resources:\n resource_perms = from_settings[res]\n # Bucket is always fetched.\n if res == 'bucket':\n perms_by_object_uri.setdefault(bucket_uri, []).extend(resource_perms)\n continue\n # Fetch bucket collections and groups.\n # XXX: wrong approach: query in a loop!\n every_subobjects, _ = storage.get_all(parent_id=bucket_uri,\n collection_id=res)\n for subobject in every_subobjects:\n subobj_uri = bucket_uri + '/{0}s/{1}'.format(res, subobject['id'])\n perms_by_object_uri.setdefault(subobj_uri, []).extend(resource_perms)\n\n entries = []\n for object_uri, perms in perms_by_object_uri.items():\n try:\n # Obtain associated res from object URI\n resource_name, matchdict = core_utils.view_lookup(self.request,\n object_uri)\n except ValueError:\n # Skip permissions entries that are not linked to an object URI\n continue\n\n # For consistency with event payloads, prefix id with resource name\n matchdict[resource_name + '_id'] = matchdict.get('id')\n\n # Expand implicit permissions using descending tree.\n permissions = set(perms)\n for perm in perms:\n obtained = perms_descending_tree[resource_name][perm]\n # Related to same resource only and not every sub-objects.\n # (e.g \"bucket:write\" gives \"bucket:read\" but not \"group:read\")\n permissions |= obtained[resource_name]\n\n entry = dict(uri=object_uri,\n resource_name=resource_name,\n permissions=list(permissions),\n **matchdict)\n entries.append(entry)\n\n return extract_record_set(entries, filters=filters, sorting=sorting,\n pagination_rules=pagination_rules,\n limit=limit)\n\n\nclass PermissionsSchema(resource.ResourceSchema):\n uri = colander.SchemaNode(colander.String())\n resource_name = colander.SchemaNode(colander.String())\n permissions = colander.Sequence(colander.SchemaNode(colander.String()))\n bucket_id = colander.SchemaNode(colander.String())\n collection_id = colander.SchemaNode(colander.String(),\n missing=colander.drop)\n group_id = colander.SchemaNode(colander.String(),\n missing=colander.drop)\n record_id = colander.SchemaNode(colander.String(),\n missing=colander.drop)\n\n class Options:\n preserve_unknown = False\n\n\[email protected](name='permissions',\n description='List of user permissions',\n collection_path='/permissions',\n record_path=None,\n collection_methods=('GET',),\n permission=NO_PERMISSION_REQUIRED)\nclass Permissions(resource.ShareableResource):\n\n schema = PermissionsSchema\n\n def __init__(self, request, context=None):\n super(Permissions, self).__init__(request, context)\n self.model = PermissionsModel(request)\n\n def _extract_sorting(self, limit):\n # Permissions entries are not stored with timestamp, so do not\n # force it.\n result = super(Permissions, self)._extract_sorting(limit)\n without_last_modified = [s for s in result\n if s.field != self.model.modified_field]\n return without_last_modified\n\n def _extract_filters(self, queryparams=None):\n result = super(Permissions, self)._extract_filters(queryparams)\n without_last_modified = [s for s in result\n if s.field != self.model.modified_field]\n return without_last_modified\n", "path": "kinto/views/permissions.py"}], "after_files": [{"content": "import colander\nfrom pyramid.security import NO_PERMISSION_REQUIRED, Authenticated\nfrom pyramid.settings import aslist\n\nfrom kinto.authorization import PERMISSIONS_INHERITANCE_TREE\nfrom kinto.core import utils as core_utils, resource\nfrom kinto.core.storage.memory import extract_record_set\n\n\ndef allowed_from_settings(settings, principals):\n \"\"\"Returns every permissions allowed from settings for the current user.\n :param settings dict: app settings\n :param principals list: list of principals of current user\n :rtype: dict\n\n Result example::\n\n {\n \"bucket\": {\"write\", \"collection:create\"},\n \"collection\": {\"read\"}\n }\n\n XXX: This helper will be useful for Kinto/kinto#894\n \"\"\"\n perms_settings = {k: aslist(v) for k, v in settings.items()\n if k.endswith('_principals')}\n from_settings = {}\n for key, allowed_principals in perms_settings.items():\n resource_name, permission, _ = key.split('_')\n # Keep the known permissions only.\n if resource_name not in PERMISSIONS_INHERITANCE_TREE.keys():\n continue\n # Keep the permissions of the current user only.\n if not bool(set(principals) & set(allowed_principals)):\n continue\n # ``collection_create_principals`` means ``collection:create`` in bucket.\n if permission == 'create':\n permission = '%s:%s' % (resource_name, permission)\n resource_name = { # resource parents.\n 'bucket': '',\n 'collection': 'bucket',\n 'group': 'bucket',\n 'record': 'collection'}[resource_name]\n # Store them in a convenient way.\n from_settings.setdefault(resource_name, set()).add(permission)\n return from_settings\n\n\nclass PermissionsModel(object):\n id_field = 'id'\n modified_field = 'last_modified'\n deleted_field = 'deleted'\n\n def __init__(self, request):\n self.request = request\n\n def get_records(self, filters=None, sorting=None, pagination_rules=None,\n limit=None, include_deleted=False, parent_id=None):\n # Invert the permissions inheritance tree.\n perms_descending_tree = {}\n for on_resource, tree in PERMISSIONS_INHERITANCE_TREE.items():\n for obtained_perm, obtained_from in tree.items():\n for from_resource, perms in obtained_from.items():\n for perm in perms:\n perms_descending_tree.setdefault(from_resource, {})\\\n .setdefault(perm, {})\\\n .setdefault(on_resource, set())\\\n .add(obtained_perm)\n\n # Obtain current principals.\n principals = self.request.effective_principals\n if Authenticated in principals:\n # Since this view does not require any permission (can be used to\n # obtain public users permissions), we have to add the prefixed\n # userid among the principals\n # (see :mod:`kinto.core.authentication`)\n userid = self.request.prefixed_userid\n principals.append(userid)\n\n # Query every possible permission of the current user from backend.\n backend = self.request.registry.permission\n perms_by_object_uri = backend.get_accessible_objects(principals)\n\n # Check settings for every allowed resources.\n from_settings = allowed_from_settings(self.request.registry.settings, principals)\n\n # Expand permissions obtained from backend with the object URIs that\n # correspond to permissions allowed from settings.\n allowed_resources = {'bucket', 'collection', 'group'} & set(from_settings.keys())\n if allowed_resources:\n storage = self.request.registry.storage\n every_bucket, _ = storage.get_all(parent_id='', collection_id='bucket')\n for bucket in every_bucket:\n bucket_uri = '/buckets/{id}'.format(**bucket)\n for res in allowed_resources:\n resource_perms = from_settings[res]\n # Bucket is always fetched.\n if res == 'bucket':\n perms_by_object_uri.setdefault(bucket_uri, set()).update(resource_perms)\n continue\n # Fetch bucket collections and groups.\n # XXX: wrong approach: query in a loop!\n every_subobjects, _ = storage.get_all(parent_id=bucket_uri,\n collection_id=res)\n for subobject in every_subobjects:\n subobj_uri = bucket_uri + '/{0}s/{1}'.format(res, subobject['id'])\n perms_by_object_uri.setdefault(subobj_uri, set()).update(resource_perms)\n\n entries = []\n for object_uri, perms in perms_by_object_uri.items():\n try:\n # Obtain associated res from object URI\n resource_name, matchdict = core_utils.view_lookup(self.request,\n object_uri)\n except ValueError:\n # Skip permissions entries that are not linked to an object URI\n continue\n\n # For consistency with event payloads, prefix id with resource name\n matchdict[resource_name + '_id'] = matchdict.get('id')\n\n # Expand implicit permissions using descending tree.\n permissions = set(perms)\n for perm in perms:\n obtained = perms_descending_tree[resource_name][perm]\n # Related to same resource only and not every sub-objects.\n # (e.g \"bucket:write\" gives \"bucket:read\" but not \"group:read\")\n permissions |= obtained[resource_name]\n\n entry = dict(uri=object_uri,\n resource_name=resource_name,\n permissions=list(permissions),\n **matchdict)\n entries.append(entry)\n\n return extract_record_set(entries, filters=filters, sorting=sorting,\n pagination_rules=pagination_rules,\n limit=limit)\n\n\nclass PermissionsSchema(resource.ResourceSchema):\n uri = colander.SchemaNode(colander.String())\n resource_name = colander.SchemaNode(colander.String())\n permissions = colander.Sequence(colander.SchemaNode(colander.String()))\n bucket_id = colander.SchemaNode(colander.String())\n collection_id = colander.SchemaNode(colander.String(),\n missing=colander.drop)\n group_id = colander.SchemaNode(colander.String(),\n missing=colander.drop)\n record_id = colander.SchemaNode(colander.String(),\n missing=colander.drop)\n\n class Options:\n preserve_unknown = False\n\n\[email protected](name='permissions',\n description='List of user permissions',\n collection_path='/permissions',\n record_path=None,\n collection_methods=('GET',),\n permission=NO_PERMISSION_REQUIRED)\nclass Permissions(resource.ShareableResource):\n\n schema = PermissionsSchema\n\n def __init__(self, request, context=None):\n super(Permissions, self).__init__(request, context)\n self.model = PermissionsModel(request)\n\n def _extract_sorting(self, limit):\n # Permissions entries are not stored with timestamp, so do not\n # force it.\n result = super(Permissions, self)._extract_sorting(limit)\n without_last_modified = [s for s in result\n if s.field != self.model.modified_field]\n return without_last_modified\n\n def _extract_filters(self, queryparams=None):\n result = super(Permissions, self)._extract_filters(queryparams)\n without_last_modified = [s for s in result\n if s.field != self.model.modified_field]\n return without_last_modified\n", "path": "kinto/views/permissions.py"}]} | 2,277 | 226 |
gh_patches_debug_12576 | rasdani/github-patches | git_diff | Lightning-AI__torchmetrics-1116 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Minor discrepancy in AveragePrecision documentation for `average="none"`
## 📚 Documentation
The [documentation](https://torchmetrics.readthedocs.io/en/latest/classification/average_precision.html) for the `torchmetrics.AveragePrecision` class and `torchmetrics.functional.average_precision()` function state that setting `average="none"` is permitted. However, the source code only seems to allow `average=None` (see [here](https://github.com/Lightning-AI/metrics/blob/master/src/torchmetrics/classification/avg_precision.py#L98) and [here](https://github.com/Lightning-AI/metrics/blob/master/src/torchmetrics/functional/classification/average_precision.py#L175)).
I'd be happy to submit a PR for this but I would like some feedback on how to best resolve this, since I am not familiar with the design of this library. The two immediate directions I can think of are editing the documentation to only allow `average=None` or editing the source code to support `average="none"`.
Thanks!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/torchmetrics/functional/classification/average_precision.py`
Content:
```
1 # Copyright The PyTorch Lightning team.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 import warnings
15 from typing import List, Optional, Tuple, Union
16
17 import torch
18 from torch import Tensor
19
20 from torchmetrics.functional.classification.precision_recall_curve import (
21 _precision_recall_curve_compute,
22 _precision_recall_curve_update,
23 )
24 from torchmetrics.utilities.data import _bincount
25
26
27 def _average_precision_update(
28 preds: Tensor,
29 target: Tensor,
30 num_classes: Optional[int] = None,
31 pos_label: Optional[int] = None,
32 average: Optional[str] = "macro",
33 ) -> Tuple[Tensor, Tensor, int, Optional[int]]:
34 """Format the predictions and target based on the ``num_classes``, ``pos_label`` and ``average`` parameter.
35
36 Args:
37 preds: predictions from model (logits or probabilities)
38 target: ground truth values
39 num_classes: integer with number of classes.
40 pos_label: integer determining the positive class. Default is ``None`` which for binary problem is translated
41 to 1. For multiclass problems this argument should not be set as we iteratively change it in the
42 range ``[0, num_classes-1]``
43 average: reduction method for multi-class or multi-label problems
44 """
45 preds, target, num_classes, pos_label = _precision_recall_curve_update(preds, target, num_classes, pos_label)
46 if average == "micro" and preds.ndim != target.ndim:
47 raise ValueError("Cannot use `micro` average with multi-class input")
48
49 return preds, target, num_classes, pos_label
50
51
52 def _average_precision_compute(
53 preds: Tensor,
54 target: Tensor,
55 num_classes: int,
56 pos_label: Optional[int] = None,
57 average: Optional[str] = "macro",
58 ) -> Union[List[Tensor], Tensor]:
59 """Computes the average precision score.
60
61 Args:
62 preds: predictions from model (logits or probabilities)
63 target: ground truth values
64 num_classes: integer with number of classes.
65 pos_label: integer determining the positive class. Default is ``None`` which for binary problem is translated
66 to 1. For multiclass problems his argument should not be set as we iteratively change it in the
67 range ``[0, num_classes-1]``
68 average: reduction method for multi-class or multi-label problems
69
70 Example:
71 >>> # binary case
72 >>> preds = torch.tensor([0, 1, 2, 3])
73 >>> target = torch.tensor([0, 1, 1, 1])
74 >>> pos_label = 1
75 >>> preds, target, num_classes, pos_label = _average_precision_update(preds, target, pos_label=pos_label)
76 >>> _average_precision_compute(preds, target, num_classes, pos_label)
77 tensor(1.)
78
79 >>> # multiclass case
80 >>> preds = torch.tensor([[0.75, 0.05, 0.05, 0.05, 0.05],
81 ... [0.05, 0.75, 0.05, 0.05, 0.05],
82 ... [0.05, 0.05, 0.75, 0.05, 0.05],
83 ... [0.05, 0.05, 0.05, 0.75, 0.05]])
84 >>> target = torch.tensor([0, 1, 3, 2])
85 >>> num_classes = 5
86 >>> preds, target, num_classes, pos_label = _average_precision_update(preds, target, num_classes)
87 >>> _average_precision_compute(preds, target, num_classes, average=None)
88 [tensor(1.), tensor(1.), tensor(0.2500), tensor(0.2500), tensor(nan)]
89 """
90
91 if average == "micro" and preds.ndim == target.ndim:
92 preds = preds.flatten()
93 target = target.flatten()
94 num_classes = 1
95
96 precision, recall, _ = _precision_recall_curve_compute(preds, target, num_classes, pos_label)
97 if average == "weighted":
98 if preds.ndim == target.ndim and target.ndim > 1:
99 weights = target.sum(dim=0).float()
100 else:
101 weights = _bincount(target, minlength=num_classes).float()
102 weights = weights / torch.sum(weights)
103 else:
104 weights = None
105 return _average_precision_compute_with_precision_recall(precision, recall, num_classes, average, weights)
106
107
108 def _average_precision_compute_with_precision_recall(
109 precision: Tensor,
110 recall: Tensor,
111 num_classes: int,
112 average: Optional[str] = "macro",
113 weights: Optional[Tensor] = None,
114 ) -> Union[List[Tensor], Tensor]:
115 """Computes the average precision score from precision and recall.
116
117 Args:
118 precision: precision values
119 recall: recall values
120 num_classes: integer with number of classes. Not nessesary to provide
121 for binary problems.
122 average: reduction method for multi-class or multi-label problems
123 weights: weights to use when average='weighted'
124
125 Example:
126 >>> # binary case
127 >>> preds = torch.tensor([0, 1, 2, 3])
128 >>> target = torch.tensor([0, 1, 1, 1])
129 >>> pos_label = 1
130 >>> preds, target, num_classes, pos_label = _average_precision_update(preds, target, pos_label=pos_label)
131 >>> precision, recall, _ = _precision_recall_curve_compute(preds, target, num_classes, pos_label)
132 >>> _average_precision_compute_with_precision_recall(precision, recall, num_classes, average=None)
133 tensor(1.)
134
135 >>> # multiclass case
136 >>> preds = torch.tensor([[0.75, 0.05, 0.05, 0.05, 0.05],
137 ... [0.05, 0.75, 0.05, 0.05, 0.05],
138 ... [0.05, 0.05, 0.75, 0.05, 0.05],
139 ... [0.05, 0.05, 0.05, 0.75, 0.05]])
140 >>> target = torch.tensor([0, 1, 3, 2])
141 >>> num_classes = 5
142 >>> preds, target, num_classes, pos_label = _average_precision_update(preds, target, num_classes)
143 >>> precision, recall, _ = _precision_recall_curve_compute(preds, target, num_classes)
144 >>> _average_precision_compute_with_precision_recall(precision, recall, num_classes, average=None)
145 [tensor(1.), tensor(1.), tensor(0.2500), tensor(0.2500), tensor(nan)]
146 """
147
148 # Return the step function integral
149 # The following works because the last entry of precision is
150 # guaranteed to be 1, as returned by precision_recall_curve
151 if num_classes == 1:
152 return -torch.sum((recall[1:] - recall[:-1]) * precision[:-1])
153
154 res = []
155 for p, r in zip(precision, recall):
156 res.append(-torch.sum((r[1:] - r[:-1]) * p[:-1]))
157
158 # Reduce
159 if average in ("macro", "weighted"):
160 res = torch.stack(res)
161 if torch.isnan(res).any():
162 warnings.warn(
163 "Average precision score for one or more classes was `nan`. Ignoring these classes in average",
164 UserWarning,
165 )
166 if average == "macro":
167 return res[~torch.isnan(res)].mean()
168 weights = torch.ones_like(res) if weights is None else weights
169 return (res * weights)[~torch.isnan(res)].sum()
170 if average is None:
171 return res
172 allowed_average = ("micro", "macro", "weighted", None)
173 raise ValueError(f"Expected argument `average` to be one of {allowed_average}" f" but got {average}")
174
175
176 def average_precision(
177 preds: Tensor,
178 target: Tensor,
179 num_classes: Optional[int] = None,
180 pos_label: Optional[int] = None,
181 average: Optional[str] = "macro",
182 ) -> Union[List[Tensor], Tensor]:
183 """Computes the average precision score.
184
185 Args:
186 preds: predictions from model (logits or probabilities)
187 target: ground truth values
188 num_classes: integer with number of classes. Not nessesary to provide
189 for binary problems.
190 pos_label: integer determining the positive class. Default is ``None`` which for binary problem is translated
191 to 1. For multiclass problems his argument should not be set as we iteratively change it in the
192 range ``[0, num_classes-1]``
193 average:
194 defines the reduction that is applied in the case of multiclass and multilabel input.
195 Should be one of the following:
196
197 - ``'macro'`` [default]: Calculate the metric for each class separately, and average the
198 metrics across classes (with equal weights for each class).
199 - ``'micro'``: Calculate the metric globally, across all samples and classes. Cannot be
200 used with multiclass input.
201 - ``'weighted'``: Calculate the metric for each class separately, and average the
202 metrics across classes, weighting each class by its support.
203 - ``'none'`` or ``None``: Calculate the metric for each class separately, and return
204 the metric for every class.
205
206 Returns:
207 tensor with average precision. If multiclass will return list
208 of such tensors, one for each class
209
210 Example (binary case):
211 >>> from torchmetrics.functional import average_precision
212 >>> pred = torch.tensor([0, 1, 2, 3])
213 >>> target = torch.tensor([0, 1, 1, 1])
214 >>> average_precision(pred, target, pos_label=1)
215 tensor(1.)
216
217 Example (multiclass case):
218 >>> pred = torch.tensor([[0.75, 0.05, 0.05, 0.05, 0.05],
219 ... [0.05, 0.75, 0.05, 0.05, 0.05],
220 ... [0.05, 0.05, 0.75, 0.05, 0.05],
221 ... [0.05, 0.05, 0.05, 0.75, 0.05]])
222 >>> target = torch.tensor([0, 1, 3, 2])
223 >>> average_precision(pred, target, num_classes=5, average=None)
224 [tensor(1.), tensor(1.), tensor(0.2500), tensor(0.2500), tensor(nan)]
225 """
226 preds, target, num_classes, pos_label = _average_precision_update(preds, target, num_classes, pos_label, average)
227 return _average_precision_compute(preds, target, num_classes, pos_label, average)
228
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/torchmetrics/functional/classification/average_precision.py b/src/torchmetrics/functional/classification/average_precision.py
--- a/src/torchmetrics/functional/classification/average_precision.py
+++ b/src/torchmetrics/functional/classification/average_precision.py
@@ -167,9 +167,9 @@
return res[~torch.isnan(res)].mean()
weights = torch.ones_like(res) if weights is None else weights
return (res * weights)[~torch.isnan(res)].sum()
- if average is None:
+ if average is None or average == "none":
return res
- allowed_average = ("micro", "macro", "weighted", None)
+ allowed_average = ("micro", "macro", "weighted", "none", None)
raise ValueError(f"Expected argument `average` to be one of {allowed_average}" f" but got {average}")
| {"golden_diff": "diff --git a/src/torchmetrics/functional/classification/average_precision.py b/src/torchmetrics/functional/classification/average_precision.py\n--- a/src/torchmetrics/functional/classification/average_precision.py\n+++ b/src/torchmetrics/functional/classification/average_precision.py\n@@ -167,9 +167,9 @@\n return res[~torch.isnan(res)].mean()\n weights = torch.ones_like(res) if weights is None else weights\n return (res * weights)[~torch.isnan(res)].sum()\n- if average is None:\n+ if average is None or average == \"none\":\n return res\n- allowed_average = (\"micro\", \"macro\", \"weighted\", None)\n+ allowed_average = (\"micro\", \"macro\", \"weighted\", \"none\", None)\n raise ValueError(f\"Expected argument `average` to be one of {allowed_average}\" f\" but got {average}\")\n", "issue": "Minor discrepancy in AveragePrecision documentation for `average=\"none\"`\n## \ud83d\udcda Documentation\r\n\r\nThe [documentation](https://torchmetrics.readthedocs.io/en/latest/classification/average_precision.html) for the `torchmetrics.AveragePrecision` class and `torchmetrics.functional.average_precision()` function state that setting `average=\"none\"` is permitted. However, the source code only seems to allow `average=None` (see [here](https://github.com/Lightning-AI/metrics/blob/master/src/torchmetrics/classification/avg_precision.py#L98) and [here](https://github.com/Lightning-AI/metrics/blob/master/src/torchmetrics/functional/classification/average_precision.py#L175)).\r\n\r\nI'd be happy to submit a PR for this but I would like some feedback on how to best resolve this, since I am not familiar with the design of this library. The two immediate directions I can think of are editing the documentation to only allow `average=None` or editing the source code to support `average=\"none\"`.\r\n\r\nThanks!\n", "before_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport warnings\nfrom typing import List, Optional, Tuple, Union\n\nimport torch\nfrom torch import Tensor\n\nfrom torchmetrics.functional.classification.precision_recall_curve import (\n _precision_recall_curve_compute,\n _precision_recall_curve_update,\n)\nfrom torchmetrics.utilities.data import _bincount\n\n\ndef _average_precision_update(\n preds: Tensor,\n target: Tensor,\n num_classes: Optional[int] = None,\n pos_label: Optional[int] = None,\n average: Optional[str] = \"macro\",\n) -> Tuple[Tensor, Tensor, int, Optional[int]]:\n \"\"\"Format the predictions and target based on the ``num_classes``, ``pos_label`` and ``average`` parameter.\n\n Args:\n preds: predictions from model (logits or probabilities)\n target: ground truth values\n num_classes: integer with number of classes.\n pos_label: integer determining the positive class. Default is ``None`` which for binary problem is translated\n to 1. For multiclass problems this argument should not be set as we iteratively change it in the\n range ``[0, num_classes-1]``\n average: reduction method for multi-class or multi-label problems\n \"\"\"\n preds, target, num_classes, pos_label = _precision_recall_curve_update(preds, target, num_classes, pos_label)\n if average == \"micro\" and preds.ndim != target.ndim:\n raise ValueError(\"Cannot use `micro` average with multi-class input\")\n\n return preds, target, num_classes, pos_label\n\n\ndef _average_precision_compute(\n preds: Tensor,\n target: Tensor,\n num_classes: int,\n pos_label: Optional[int] = None,\n average: Optional[str] = \"macro\",\n) -> Union[List[Tensor], Tensor]:\n \"\"\"Computes the average precision score.\n\n Args:\n preds: predictions from model (logits or probabilities)\n target: ground truth values\n num_classes: integer with number of classes.\n pos_label: integer determining the positive class. Default is ``None`` which for binary problem is translated\n to 1. For multiclass problems his argument should not be set as we iteratively change it in the\n range ``[0, num_classes-1]``\n average: reduction method for multi-class or multi-label problems\n\n Example:\n >>> # binary case\n >>> preds = torch.tensor([0, 1, 2, 3])\n >>> target = torch.tensor([0, 1, 1, 1])\n >>> pos_label = 1\n >>> preds, target, num_classes, pos_label = _average_precision_update(preds, target, pos_label=pos_label)\n >>> _average_precision_compute(preds, target, num_classes, pos_label)\n tensor(1.)\n\n >>> # multiclass case\n >>> preds = torch.tensor([[0.75, 0.05, 0.05, 0.05, 0.05],\n ... [0.05, 0.75, 0.05, 0.05, 0.05],\n ... [0.05, 0.05, 0.75, 0.05, 0.05],\n ... [0.05, 0.05, 0.05, 0.75, 0.05]])\n >>> target = torch.tensor([0, 1, 3, 2])\n >>> num_classes = 5\n >>> preds, target, num_classes, pos_label = _average_precision_update(preds, target, num_classes)\n >>> _average_precision_compute(preds, target, num_classes, average=None)\n [tensor(1.), tensor(1.), tensor(0.2500), tensor(0.2500), tensor(nan)]\n \"\"\"\n\n if average == \"micro\" and preds.ndim == target.ndim:\n preds = preds.flatten()\n target = target.flatten()\n num_classes = 1\n\n precision, recall, _ = _precision_recall_curve_compute(preds, target, num_classes, pos_label)\n if average == \"weighted\":\n if preds.ndim == target.ndim and target.ndim > 1:\n weights = target.sum(dim=0).float()\n else:\n weights = _bincount(target, minlength=num_classes).float()\n weights = weights / torch.sum(weights)\n else:\n weights = None\n return _average_precision_compute_with_precision_recall(precision, recall, num_classes, average, weights)\n\n\ndef _average_precision_compute_with_precision_recall(\n precision: Tensor,\n recall: Tensor,\n num_classes: int,\n average: Optional[str] = \"macro\",\n weights: Optional[Tensor] = None,\n) -> Union[List[Tensor], Tensor]:\n \"\"\"Computes the average precision score from precision and recall.\n\n Args:\n precision: precision values\n recall: recall values\n num_classes: integer with number of classes. Not nessesary to provide\n for binary problems.\n average: reduction method for multi-class or multi-label problems\n weights: weights to use when average='weighted'\n\n Example:\n >>> # binary case\n >>> preds = torch.tensor([0, 1, 2, 3])\n >>> target = torch.tensor([0, 1, 1, 1])\n >>> pos_label = 1\n >>> preds, target, num_classes, pos_label = _average_precision_update(preds, target, pos_label=pos_label)\n >>> precision, recall, _ = _precision_recall_curve_compute(preds, target, num_classes, pos_label)\n >>> _average_precision_compute_with_precision_recall(precision, recall, num_classes, average=None)\n tensor(1.)\n\n >>> # multiclass case\n >>> preds = torch.tensor([[0.75, 0.05, 0.05, 0.05, 0.05],\n ... [0.05, 0.75, 0.05, 0.05, 0.05],\n ... [0.05, 0.05, 0.75, 0.05, 0.05],\n ... [0.05, 0.05, 0.05, 0.75, 0.05]])\n >>> target = torch.tensor([0, 1, 3, 2])\n >>> num_classes = 5\n >>> preds, target, num_classes, pos_label = _average_precision_update(preds, target, num_classes)\n >>> precision, recall, _ = _precision_recall_curve_compute(preds, target, num_classes)\n >>> _average_precision_compute_with_precision_recall(precision, recall, num_classes, average=None)\n [tensor(1.), tensor(1.), tensor(0.2500), tensor(0.2500), tensor(nan)]\n \"\"\"\n\n # Return the step function integral\n # The following works because the last entry of precision is\n # guaranteed to be 1, as returned by precision_recall_curve\n if num_classes == 1:\n return -torch.sum((recall[1:] - recall[:-1]) * precision[:-1])\n\n res = []\n for p, r in zip(precision, recall):\n res.append(-torch.sum((r[1:] - r[:-1]) * p[:-1]))\n\n # Reduce\n if average in (\"macro\", \"weighted\"):\n res = torch.stack(res)\n if torch.isnan(res).any():\n warnings.warn(\n \"Average precision score for one or more classes was `nan`. Ignoring these classes in average\",\n UserWarning,\n )\n if average == \"macro\":\n return res[~torch.isnan(res)].mean()\n weights = torch.ones_like(res) if weights is None else weights\n return (res * weights)[~torch.isnan(res)].sum()\n if average is None:\n return res\n allowed_average = (\"micro\", \"macro\", \"weighted\", None)\n raise ValueError(f\"Expected argument `average` to be one of {allowed_average}\" f\" but got {average}\")\n\n\ndef average_precision(\n preds: Tensor,\n target: Tensor,\n num_classes: Optional[int] = None,\n pos_label: Optional[int] = None,\n average: Optional[str] = \"macro\",\n) -> Union[List[Tensor], Tensor]:\n \"\"\"Computes the average precision score.\n\n Args:\n preds: predictions from model (logits or probabilities)\n target: ground truth values\n num_classes: integer with number of classes. Not nessesary to provide\n for binary problems.\n pos_label: integer determining the positive class. Default is ``None`` which for binary problem is translated\n to 1. For multiclass problems his argument should not be set as we iteratively change it in the\n range ``[0, num_classes-1]``\n average:\n defines the reduction that is applied in the case of multiclass and multilabel input.\n Should be one of the following:\n\n - ``'macro'`` [default]: Calculate the metric for each class separately, and average the\n metrics across classes (with equal weights for each class).\n - ``'micro'``: Calculate the metric globally, across all samples and classes. Cannot be\n used with multiclass input.\n - ``'weighted'``: Calculate the metric for each class separately, and average the\n metrics across classes, weighting each class by its support.\n - ``'none'`` or ``None``: Calculate the metric for each class separately, and return\n the metric for every class.\n\n Returns:\n tensor with average precision. If multiclass will return list\n of such tensors, one for each class\n\n Example (binary case):\n >>> from torchmetrics.functional import average_precision\n >>> pred = torch.tensor([0, 1, 2, 3])\n >>> target = torch.tensor([0, 1, 1, 1])\n >>> average_precision(pred, target, pos_label=1)\n tensor(1.)\n\n Example (multiclass case):\n >>> pred = torch.tensor([[0.75, 0.05, 0.05, 0.05, 0.05],\n ... [0.05, 0.75, 0.05, 0.05, 0.05],\n ... [0.05, 0.05, 0.75, 0.05, 0.05],\n ... [0.05, 0.05, 0.05, 0.75, 0.05]])\n >>> target = torch.tensor([0, 1, 3, 2])\n >>> average_precision(pred, target, num_classes=5, average=None)\n [tensor(1.), tensor(1.), tensor(0.2500), tensor(0.2500), tensor(nan)]\n \"\"\"\n preds, target, num_classes, pos_label = _average_precision_update(preds, target, num_classes, pos_label, average)\n return _average_precision_compute(preds, target, num_classes, pos_label, average)\n", "path": "src/torchmetrics/functional/classification/average_precision.py"}], "after_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport warnings\nfrom typing import List, Optional, Tuple, Union\n\nimport torch\nfrom torch import Tensor\n\nfrom torchmetrics.functional.classification.precision_recall_curve import (\n _precision_recall_curve_compute,\n _precision_recall_curve_update,\n)\nfrom torchmetrics.utilities.data import _bincount\n\n\ndef _average_precision_update(\n preds: Tensor,\n target: Tensor,\n num_classes: Optional[int] = None,\n pos_label: Optional[int] = None,\n average: Optional[str] = \"macro\",\n) -> Tuple[Tensor, Tensor, int, Optional[int]]:\n \"\"\"Format the predictions and target based on the ``num_classes``, ``pos_label`` and ``average`` parameter.\n\n Args:\n preds: predictions from model (logits or probabilities)\n target: ground truth values\n num_classes: integer with number of classes.\n pos_label: integer determining the positive class. Default is ``None`` which for binary problem is translated\n to 1. For multiclass problems this argument should not be set as we iteratively change it in the\n range ``[0, num_classes-1]``\n average: reduction method for multi-class or multi-label problems\n \"\"\"\n preds, target, num_classes, pos_label = _precision_recall_curve_update(preds, target, num_classes, pos_label)\n if average == \"micro\" and preds.ndim != target.ndim:\n raise ValueError(\"Cannot use `micro` average with multi-class input\")\n\n return preds, target, num_classes, pos_label\n\n\ndef _average_precision_compute(\n preds: Tensor,\n target: Tensor,\n num_classes: int,\n pos_label: Optional[int] = None,\n average: Optional[str] = \"macro\",\n) -> Union[List[Tensor], Tensor]:\n \"\"\"Computes the average precision score.\n\n Args:\n preds: predictions from model (logits or probabilities)\n target: ground truth values\n num_classes: integer with number of classes.\n pos_label: integer determining the positive class. Default is ``None`` which for binary problem is translated\n to 1. For multiclass problems his argument should not be set as we iteratively change it in the\n range ``[0, num_classes-1]``\n average: reduction method for multi-class or multi-label problems\n\n Example:\n >>> # binary case\n >>> preds = torch.tensor([0, 1, 2, 3])\n >>> target = torch.tensor([0, 1, 1, 1])\n >>> pos_label = 1\n >>> preds, target, num_classes, pos_label = _average_precision_update(preds, target, pos_label=pos_label)\n >>> _average_precision_compute(preds, target, num_classes, pos_label)\n tensor(1.)\n\n >>> # multiclass case\n >>> preds = torch.tensor([[0.75, 0.05, 0.05, 0.05, 0.05],\n ... [0.05, 0.75, 0.05, 0.05, 0.05],\n ... [0.05, 0.05, 0.75, 0.05, 0.05],\n ... [0.05, 0.05, 0.05, 0.75, 0.05]])\n >>> target = torch.tensor([0, 1, 3, 2])\n >>> num_classes = 5\n >>> preds, target, num_classes, pos_label = _average_precision_update(preds, target, num_classes)\n >>> _average_precision_compute(preds, target, num_classes, average=None)\n [tensor(1.), tensor(1.), tensor(0.2500), tensor(0.2500), tensor(nan)]\n \"\"\"\n\n if average == \"micro\" and preds.ndim == target.ndim:\n preds = preds.flatten()\n target = target.flatten()\n num_classes = 1\n\n precision, recall, _ = _precision_recall_curve_compute(preds, target, num_classes, pos_label)\n if average == \"weighted\":\n if preds.ndim == target.ndim and target.ndim > 1:\n weights = target.sum(dim=0).float()\n else:\n weights = _bincount(target, minlength=num_classes).float()\n weights = weights / torch.sum(weights)\n else:\n weights = None\n return _average_precision_compute_with_precision_recall(precision, recall, num_classes, average, weights)\n\n\ndef _average_precision_compute_with_precision_recall(\n precision: Tensor,\n recall: Tensor,\n num_classes: int,\n average: Optional[str] = \"macro\",\n weights: Optional[Tensor] = None,\n) -> Union[List[Tensor], Tensor]:\n \"\"\"Computes the average precision score from precision and recall.\n\n Args:\n precision: precision values\n recall: recall values\n num_classes: integer with number of classes. Not nessesary to provide\n for binary problems.\n average: reduction method for multi-class or multi-label problems\n weights: weights to use when average='weighted'\n\n Example:\n >>> # binary case\n >>> preds = torch.tensor([0, 1, 2, 3])\n >>> target = torch.tensor([0, 1, 1, 1])\n >>> pos_label = 1\n >>> preds, target, num_classes, pos_label = _average_precision_update(preds, target, pos_label=pos_label)\n >>> precision, recall, _ = _precision_recall_curve_compute(preds, target, num_classes, pos_label)\n >>> _average_precision_compute_with_precision_recall(precision, recall, num_classes, average=None)\n tensor(1.)\n\n >>> # multiclass case\n >>> preds = torch.tensor([[0.75, 0.05, 0.05, 0.05, 0.05],\n ... [0.05, 0.75, 0.05, 0.05, 0.05],\n ... [0.05, 0.05, 0.75, 0.05, 0.05],\n ... [0.05, 0.05, 0.05, 0.75, 0.05]])\n >>> target = torch.tensor([0, 1, 3, 2])\n >>> num_classes = 5\n >>> preds, target, num_classes, pos_label = _average_precision_update(preds, target, num_classes)\n >>> precision, recall, _ = _precision_recall_curve_compute(preds, target, num_classes)\n >>> _average_precision_compute_with_precision_recall(precision, recall, num_classes, average=None)\n [tensor(1.), tensor(1.), tensor(0.2500), tensor(0.2500), tensor(nan)]\n \"\"\"\n\n # Return the step function integral\n # The following works because the last entry of precision is\n # guaranteed to be 1, as returned by precision_recall_curve\n if num_classes == 1:\n return -torch.sum((recall[1:] - recall[:-1]) * precision[:-1])\n\n res = []\n for p, r in zip(precision, recall):\n res.append(-torch.sum((r[1:] - r[:-1]) * p[:-1]))\n\n # Reduce\n if average in (\"macro\", \"weighted\"):\n res = torch.stack(res)\n if torch.isnan(res).any():\n warnings.warn(\n \"Average precision score for one or more classes was `nan`. Ignoring these classes in average\",\n UserWarning,\n )\n if average == \"macro\":\n return res[~torch.isnan(res)].mean()\n weights = torch.ones_like(res) if weights is None else weights\n return (res * weights)[~torch.isnan(res)].sum()\n if average is None or average == \"none\":\n return res\n allowed_average = (\"micro\", \"macro\", \"weighted\", \"none\", None)\n raise ValueError(f\"Expected argument `average` to be one of {allowed_average}\" f\" but got {average}\")\n\n\ndef average_precision(\n preds: Tensor,\n target: Tensor,\n num_classes: Optional[int] = None,\n pos_label: Optional[int] = None,\n average: Optional[str] = \"macro\",\n) -> Union[List[Tensor], Tensor]:\n \"\"\"Computes the average precision score.\n\n Args:\n preds: predictions from model (logits or probabilities)\n target: ground truth values\n num_classes: integer with number of classes. Not nessesary to provide\n for binary problems.\n pos_label: integer determining the positive class. Default is ``None`` which for binary problem is translated\n to 1. For multiclass problems his argument should not be set as we iteratively change it in the\n range ``[0, num_classes-1]``\n average:\n defines the reduction that is applied in the case of multiclass and multilabel input.\n Should be one of the following:\n\n - ``'macro'`` [default]: Calculate the metric for each class separately, and average the\n metrics across classes (with equal weights for each class).\n - ``'micro'``: Calculate the metric globally, across all samples and classes. Cannot be\n used with multiclass input.\n - ``'weighted'``: Calculate the metric for each class separately, and average the\n metrics across classes, weighting each class by its support.\n - ``'none'`` or ``None``: Calculate the metric for each class separately, and return\n the metric for every class.\n\n Returns:\n tensor with average precision. If multiclass will return list\n of such tensors, one for each class\n\n Example (binary case):\n >>> from torchmetrics.functional import average_precision\n >>> pred = torch.tensor([0, 1, 2, 3])\n >>> target = torch.tensor([0, 1, 1, 1])\n >>> average_precision(pred, target, pos_label=1)\n tensor(1.)\n\n Example (multiclass case):\n >>> pred = torch.tensor([[0.75, 0.05, 0.05, 0.05, 0.05],\n ... [0.05, 0.75, 0.05, 0.05, 0.05],\n ... [0.05, 0.05, 0.75, 0.05, 0.05],\n ... [0.05, 0.05, 0.05, 0.75, 0.05]])\n >>> target = torch.tensor([0, 1, 3, 2])\n >>> average_precision(pred, target, num_classes=5, average=None)\n [tensor(1.), tensor(1.), tensor(0.2500), tensor(0.2500), tensor(nan)]\n \"\"\"\n preds, target, num_classes, pos_label = _average_precision_update(preds, target, num_classes, pos_label, average)\n return _average_precision_compute(preds, target, num_classes, pos_label, average)\n", "path": "src/torchmetrics/functional/classification/average_precision.py"}]} | 3,636 | 197 |
gh_patches_debug_11166 | rasdani/github-patches | git_diff | DataDog__dd-agent-2443 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[marathon] Marathon plugin slows down agent when marathon has many apps running
We are monitoring a marathon framework using datadog which has over 150 apps, and the marathon check seems to be slowing down the entire datadog process.
After investigating what the plugin actually does, the problem seems to be this loop: https://github.com/DataDog/dd-agent/blob/5.4.4/checks.d/marathon.py#L46. It appears that the agent is sequentially hitting the API 150 times, which is enough to stop the agent from reporting metrics long enough to trigger some of our other alerts.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `checks.d/marathon.py`
Content:
```
1 # (C) Datadog, Inc. 2014-2016
2 # (C) graemej <[email protected]> 2014
3 # All rights reserved
4 # Licensed under Simplified BSD License (see LICENSE)
5
6
7 # stdlib
8 from urlparse import urljoin
9
10 # 3rd party
11 import requests
12
13 # project
14 from checks import AgentCheck
15
16
17 class Marathon(AgentCheck):
18
19 DEFAULT_TIMEOUT = 5
20 SERVICE_CHECK_NAME = 'marathon.can_connect'
21
22 APP_METRICS = [
23 'backoffFactor',
24 'backoffSeconds',
25 'cpus',
26 'disk',
27 'instances',
28 'mem',
29 'taskRateLimit',
30 'tasksRunning',
31 'tasksStaged'
32 ]
33
34 def check(self, instance):
35 if 'url' not in instance:
36 raise Exception('Marathon instance missing "url" value.')
37
38 # Load values from the instance config
39 url = instance['url']
40 user = instance.get('user')
41 password = instance.get('password')
42 if user is not None and password is not None:
43 auth = (user,password)
44 else:
45 auth = None
46 instance_tags = instance.get('tags', [])
47 default_timeout = self.init_config.get('default_timeout', self.DEFAULT_TIMEOUT)
48 timeout = float(instance.get('timeout', default_timeout))
49
50 response = self.get_json(urljoin(url, "/v2/apps"), timeout, auth)
51 if response is not None:
52 self.gauge('marathon.apps', len(response['apps']), tags=instance_tags)
53 for app in response['apps']:
54 tags = ['app_id:' + app['id'], 'version:' + app['version']] + instance_tags
55 for attr in self.APP_METRICS:
56 if attr in app:
57 self.gauge('marathon.' + attr, app[attr], tags=tags)
58
59 query_url = urljoin(url, "/v2/apps/{0}/versions".format(app['id']))
60 versions_reply = self.get_json(query_url, timeout, auth)
61
62 if versions_reply is not None:
63 self.gauge('marathon.versions', len(versions_reply['versions']), tags=tags)
64
65 def get_json(self, url, timeout, auth):
66 try:
67 r = requests.get(url, timeout=timeout, auth=auth)
68 r.raise_for_status()
69 except requests.exceptions.Timeout:
70 # If there's a timeout
71 self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL,
72 message='%s timed out after %s seconds.' % (url, timeout),
73 tags = ["url:{0}".format(url)])
74 raise Exception("Timeout when hitting %s" % url)
75
76 except requests.exceptions.HTTPError:
77 self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL,
78 message='%s returned a status of %s' % (url, r.status_code),
79 tags = ["url:{0}".format(url)])
80 raise Exception("Got %s when hitting %s" % (r.status_code, url))
81
82 else:
83 self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.OK,
84 tags = ["url:{0}".format(url)]
85 )
86
87 return r.json()
88
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/checks.d/marathon.py b/checks.d/marathon.py
--- a/checks.d/marathon.py
+++ b/checks.d/marathon.py
@@ -56,12 +56,6 @@
if attr in app:
self.gauge('marathon.' + attr, app[attr], tags=tags)
- query_url = urljoin(url, "/v2/apps/{0}/versions".format(app['id']))
- versions_reply = self.get_json(query_url, timeout, auth)
-
- if versions_reply is not None:
- self.gauge('marathon.versions', len(versions_reply['versions']), tags=tags)
-
def get_json(self, url, timeout, auth):
try:
r = requests.get(url, timeout=timeout, auth=auth)
| {"golden_diff": "diff --git a/checks.d/marathon.py b/checks.d/marathon.py\n--- a/checks.d/marathon.py\n+++ b/checks.d/marathon.py\n@@ -56,12 +56,6 @@\n if attr in app:\n self.gauge('marathon.' + attr, app[attr], tags=tags)\n \n- query_url = urljoin(url, \"/v2/apps/{0}/versions\".format(app['id']))\n- versions_reply = self.get_json(query_url, timeout, auth)\n-\n- if versions_reply is not None:\n- self.gauge('marathon.versions', len(versions_reply['versions']), tags=tags)\n-\n def get_json(self, url, timeout, auth):\n try:\n r = requests.get(url, timeout=timeout, auth=auth)\n", "issue": "[marathon] Marathon plugin slows down agent when marathon has many apps running\nWe are monitoring a marathon framework using datadog which has over 150 apps, and the marathon check seems to be slowing down the entire datadog process.\n\nAfter investigating what the plugin actually does, the problem seems to be this loop: https://github.com/DataDog/dd-agent/blob/5.4.4/checks.d/marathon.py#L46. It appears that the agent is sequentially hitting the API 150 times, which is enough to stop the agent from reporting metrics long enough to trigger some of our other alerts.\n\n", "before_files": [{"content": "# (C) Datadog, Inc. 2014-2016\n# (C) graemej <[email protected]> 2014\n# All rights reserved\n# Licensed under Simplified BSD License (see LICENSE)\n\n\n# stdlib\nfrom urlparse import urljoin\n\n# 3rd party\nimport requests\n\n# project\nfrom checks import AgentCheck\n\n\nclass Marathon(AgentCheck):\n\n DEFAULT_TIMEOUT = 5\n SERVICE_CHECK_NAME = 'marathon.can_connect'\n\n APP_METRICS = [\n 'backoffFactor',\n 'backoffSeconds',\n 'cpus',\n 'disk',\n 'instances',\n 'mem',\n 'taskRateLimit',\n 'tasksRunning',\n 'tasksStaged'\n ]\n\n def check(self, instance):\n if 'url' not in instance:\n raise Exception('Marathon instance missing \"url\" value.')\n\n # Load values from the instance config\n url = instance['url']\n user = instance.get('user')\n password = instance.get('password')\n if user is not None and password is not None:\n auth = (user,password)\n else:\n auth = None\n instance_tags = instance.get('tags', [])\n default_timeout = self.init_config.get('default_timeout', self.DEFAULT_TIMEOUT)\n timeout = float(instance.get('timeout', default_timeout))\n\n response = self.get_json(urljoin(url, \"/v2/apps\"), timeout, auth)\n if response is not None:\n self.gauge('marathon.apps', len(response['apps']), tags=instance_tags)\n for app in response['apps']:\n tags = ['app_id:' + app['id'], 'version:' + app['version']] + instance_tags\n for attr in self.APP_METRICS:\n if attr in app:\n self.gauge('marathon.' + attr, app[attr], tags=tags)\n\n query_url = urljoin(url, \"/v2/apps/{0}/versions\".format(app['id']))\n versions_reply = self.get_json(query_url, timeout, auth)\n\n if versions_reply is not None:\n self.gauge('marathon.versions', len(versions_reply['versions']), tags=tags)\n\n def get_json(self, url, timeout, auth):\n try:\n r = requests.get(url, timeout=timeout, auth=auth)\n r.raise_for_status()\n except requests.exceptions.Timeout:\n # If there's a timeout\n self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL,\n message='%s timed out after %s seconds.' % (url, timeout),\n tags = [\"url:{0}\".format(url)])\n raise Exception(\"Timeout when hitting %s\" % url)\n\n except requests.exceptions.HTTPError:\n self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL,\n message='%s returned a status of %s' % (url, r.status_code),\n tags = [\"url:{0}\".format(url)])\n raise Exception(\"Got %s when hitting %s\" % (r.status_code, url))\n\n else:\n self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.OK,\n tags = [\"url:{0}\".format(url)]\n )\n\n return r.json()\n", "path": "checks.d/marathon.py"}], "after_files": [{"content": "# (C) Datadog, Inc. 2014-2016\n# (C) graemej <[email protected]> 2014\n# All rights reserved\n# Licensed under Simplified BSD License (see LICENSE)\n\n\n# stdlib\nfrom urlparse import urljoin\n\n# 3rd party\nimport requests\n\n# project\nfrom checks import AgentCheck\n\n\nclass Marathon(AgentCheck):\n\n DEFAULT_TIMEOUT = 5\n SERVICE_CHECK_NAME = 'marathon.can_connect'\n\n APP_METRICS = [\n 'backoffFactor',\n 'backoffSeconds',\n 'cpus',\n 'disk',\n 'instances',\n 'mem',\n 'taskRateLimit',\n 'tasksRunning',\n 'tasksStaged'\n ]\n\n def check(self, instance):\n if 'url' not in instance:\n raise Exception('Marathon instance missing \"url\" value.')\n\n # Load values from the instance config\n url = instance['url']\n user = instance.get('user')\n password = instance.get('password')\n if user is not None and password is not None:\n auth = (user,password)\n else:\n auth = None\n instance_tags = instance.get('tags', [])\n default_timeout = self.init_config.get('default_timeout', self.DEFAULT_TIMEOUT)\n timeout = float(instance.get('timeout', default_timeout))\n\n response = self.get_json(urljoin(url, \"/v2/apps\"), timeout, auth)\n if response is not None:\n self.gauge('marathon.apps', len(response['apps']), tags=instance_tags)\n for app in response['apps']:\n tags = ['app_id:' + app['id'], 'version:' + app['version']] + instance_tags\n for attr in self.APP_METRICS:\n if attr in app:\n self.gauge('marathon.' + attr, app[attr], tags=tags)\n\n def get_json(self, url, timeout, auth):\n try:\n r = requests.get(url, timeout=timeout, auth=auth)\n r.raise_for_status()\n except requests.exceptions.Timeout:\n # If there's a timeout\n self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL,\n message='%s timed out after %s seconds.' % (url, timeout),\n tags = [\"url:{0}\".format(url)])\n raise Exception(\"Timeout when hitting %s\" % url)\n\n except requests.exceptions.HTTPError:\n self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL,\n message='%s returned a status of %s' % (url, r.status_code),\n tags = [\"url:{0}\".format(url)])\n raise Exception(\"Got %s when hitting %s\" % (r.status_code, url))\n\n else:\n self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.OK,\n tags = [\"url:{0}\".format(url)]\n )\n\n return r.json()\n", "path": "checks.d/marathon.py"}]} | 1,258 | 179 |
gh_patches_debug_6540 | rasdani/github-patches | git_diff | ivy-llc__ivy-27295 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
fft2
Paddle-Paddle frontend implementation for the fft2 function.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ivy/functional/frontends/paddle/fft.py`
Content:
```
1 # global
2 import ivy
3 from ivy.func_wrapper import with_supported_dtypes
4 from ivy.functional.frontends.paddle.func_wrapper import (
5 to_ivy_arrays_and_back,
6 )
7
8
9 @with_supported_dtypes(
10 {"2.5.2 and below": ("complex64", "complex128")},
11 "paddle",
12 )
13 @to_ivy_arrays_and_back
14 def fft(x, n=None, axis=-1.0, norm="backward", name=None):
15 ret = ivy.fft(ivy.astype(x, "complex128"), axis, norm=norm, n=n)
16 return ivy.astype(ret, x.dtype)
17
18
19 @with_supported_dtypes(
20 {
21 "2.5.2 and below": (
22 "int32",
23 "int64",
24 "float32",
25 "float64",
26 )
27 },
28 "paddle",
29 )
30 @to_ivy_arrays_and_back
31 def fftfreq(n, d=1.0, dtype=None, name=None):
32 if d * n == 0:
33 raise ValueError("d or n should not be 0.")
34
35 if dtype is None:
36 dtype = ivy.default_dtype()
37 val = 1.0 / (n * d)
38 pos_max = (n + 1) // 2
39 neg_max = n // 2
40 indices = ivy.arange(-neg_max, pos_max, dtype=dtype)
41 indices = ivy.roll(indices, -neg_max)
42 return ivy.multiply(indices, val)
43
44
45 @with_supported_dtypes(
46 {
47 "2.5.2 and below": (
48 "int32",
49 "int64",
50 "float32",
51 "float64",
52 "complex64",
53 "complex128",
54 )
55 },
56 "paddle",
57 )
58 @to_ivy_arrays_and_back
59 def fftshift(x, axes=None, name=None):
60 shape = x.shape
61
62 if axes is None:
63 axes = tuple(range(x.ndim))
64 shifts = [(dim // 2) for dim in shape]
65 elif isinstance(axes, int):
66 shifts = shape[axes] // 2
67 else:
68 shifts = ivy.concat([shape[ax] // 2 for ax in axes])
69
70 roll = ivy.roll(x, shifts, axis=axes)
71
72 return roll
73
74
75 @with_supported_dtypes(
76 {"2.5.2 and below": ("complex64", "complex128")},
77 "paddle",
78 )
79 @to_ivy_arrays_and_back
80 def hfft(x, n=None, axes=-1, norm="backward", name=None):
81 """Compute the FFT of a signal that has Hermitian symmetry, resulting in a real
82 spectrum."""
83 # Determine the input shape and axis length
84 input_shape = x.shape
85 input_len = input_shape[axes]
86
87 # Calculate n if not provided
88 if n is None:
89 n = 2 * (input_len - 1)
90
91 # Perform the FFT along the specified axis
92 result = ivy.fft(x, axes, n=n, norm=norm)
93
94 return ivy.real(result)
95
96
97 @with_supported_dtypes(
98 {"2.5.2 and below": "complex64"},
99 "paddle",
100 )
101 @to_ivy_arrays_and_back
102 def hfft2(x, s=None, axis=(-2, -1), norm="backward"):
103 # check if the input tensor x is a hermitian complex
104 if not ivy.allclose(ivy.conj(ivy.matrix_transpose(x)), x):
105 raise ValueError("Input tensor x must be Hermitian complex.")
106
107 fft_result = ivy.fft2(x, s=s, dim=axis, norm=norm)
108
109 # Depending on the norm, apply scaling and normalization
110 if norm == "forward":
111 fft_result /= ivy.sqrt(ivy.prod(ivy.shape(fft_result)))
112 elif norm == "ortho":
113 fft_result /= ivy.sqrt(ivy.prod(ivy.shape(x)))
114
115 return ivy.real(fft_result) # Return the real part of the result
116
117
118 @with_supported_dtypes(
119 {"2.5.2 and below": ("complex64", "complex128")},
120 "paddle",
121 )
122 @to_ivy_arrays_and_back
123 def ifft(x, n=None, axis=-1.0, norm="backward", name=None):
124 ret = ivy.ifft(ivy.astype(x, "complex128"), axis, norm=norm, n=n)
125 return ivy.astype(ret, x.dtype)
126
127
128 @with_supported_dtypes(
129 {"2.5.2 and below": ("complex64", "complex128")},
130 "paddle",
131 )
132 @to_ivy_arrays_and_back
133 def ifftn(x, s=None, axes=None, norm="backward", name=None):
134 ret = ivy.ifftn(ivy.astype(x, "complex128"), s=s, axes=axes, norm=norm)
135 return ivy.astype(ret, x.dtype)
136
137
138 @with_supported_dtypes(
139 {
140 "2.5.2 and below": (
141 "int32",
142 "int64",
143 "float32",
144 "float64",
145 )
146 },
147 "paddle",
148 )
149 @to_ivy_arrays_and_back
150 def ifftshift(x, axes=None, name=None):
151 shape = x.shape
152
153 if axes is None:
154 axes = tuple(range(x.ndim))
155 shifts = [-(dim // 2) for dim in shape]
156 elif isinstance(axes, int):
157 shifts = -(shape[axes] // 2)
158 else:
159 shifts = ivy.concat([-shape[ax] // 2 for ax in axes])
160
161 roll = ivy.roll(x, shifts, axis=axes)
162
163 return roll
164
165
166 @with_supported_dtypes(
167 {
168 "2.5.2 and below": (
169 "int32",
170 "int64",
171 "float32",
172 "float64",
173 )
174 },
175 "paddle",
176 )
177 @to_ivy_arrays_and_back
178 def ihfft2(x, s=None, axes=(-2, -1), norm="backward", name=None):
179 # check if the input array is two-dimensional and real
180 if len(ivy.array(x).shape) != 2 or ivy.is_complex_dtype(x):
181 raise ValueError("input must be a two-dimensional real array")
182
183 # cast the input to the same float64 type so that there are no backend issues
184 x_ = ivy.astype(x, ivy.float64)
185
186 ihfft2_result = 0
187 # Compute the complex conjugate of the 2-dimensional discrete Fourier Transform
188 if norm == "backward":
189 ihfft2_result = ivy.conj(ivy.rfftn(x_, s=s, axes=axes, norm="forward"))
190 if norm == "forward":
191 ihfft2_result = ivy.conj(ivy.rfftn(x_, s=s, axes=axes, norm="backward"))
192 if norm == "ortho":
193 ihfft2_result = ivy.conj(ivy.rfftn(x_, s=s, axes=axes, norm="ortho"))
194
195 if x.dtype in [ivy.float32, ivy.int32, ivy.int64]:
196 return ivy.astype(ihfft2_result, ivy.complex64)
197 if x.dtype == ivy.float64:
198 return ivy.astype(ihfft2_result, ivy.complex128)
199
200
201 @with_supported_dtypes(
202 {"2.5.2 and below": ("complex64", "complex128")},
203 "paddle",
204 )
205 @to_ivy_arrays_and_back
206 def irfft(x, n=None, axis=-1.0, norm="backward", name=None):
207 if n is None:
208 n = 2 * (x.shape[axis] - 1)
209
210 pos_freq_terms = ivy.take_along_axis(x, range(n // 2 + 1), axis)
211 neg_freq_terms = ivy.conj(pos_freq_terms[1:-1][::-1])
212 combined_freq_terms = ivy.concat((pos_freq_terms, neg_freq_terms), axis=axis)
213 time_domain = ivy.ifft(combined_freq_terms, axis, norm=norm, n=n)
214 if ivy.isreal(x):
215 time_domain = ivy.real(time_domain)
216 return time_domain
217
218
219 @with_supported_dtypes(
220 {
221 "2.5.2 and below": (
222 "int32",
223 "int64",
224 "float16",
225 "float32",
226 "float64",
227 "complex64",
228 "complex128",
229 )
230 },
231 "paddle",
232 )
233 @to_ivy_arrays_and_back
234 def irfft2(x, s=None, axes=(-2, -1), norm="backward"):
235 # Handle values if None
236 if s is None:
237 s = x.shape
238 if axes is None:
239 axes = (-2, -1)
240
241 # Calculate the normalization factor 'n' based on the shape 's'
242 n = ivy.prod(ivy.array(s))
243
244 result = ivy.ifftn(x, dim=axes[0], norm=norm)
245
246 # Normalize the result based on the 'norm' parameter
247 if norm == "backward":
248 result /= n
249 elif norm == "forward":
250 result *= n
251 elif norm == "ortho":
252 result /= ivy.sqrt(n)
253 return result
254
255
256 @with_supported_dtypes(
257 {"2.5.2 and below": ("complex64", "complex128")},
258 "paddle",
259 )
260 @to_ivy_arrays_and_back
261 def irfftn(x, s=None, axes=None, norm="backward", name=None):
262 x = ivy.array(x)
263
264 if axes is None:
265 axes = list(range(len(x.shape)))
266
267 include_last_axis = len(x.shape) - 1 in axes
268
269 if s is None:
270 s = [
271 x.shape[axis] if axis != (len(x.shape) - 1) else 2 * (x.shape[axis] - 1)
272 for axis in axes
273 ]
274
275 real_result = x
276 remaining_axes = [axis for axis in axes if axis != (len(x.shape) - 1)]
277
278 if remaining_axes:
279 real_result = ivy.ifftn(
280 x,
281 s=[s[axes.index(axis)] for axis in remaining_axes],
282 axes=remaining_axes,
283 norm=norm,
284 )
285
286 if include_last_axis:
287 axis = len(x.shape) - 1
288 size = s[axes.index(axis)]
289 freq_domain = ivy.moveaxis(real_result, axis, -1)
290 slices = [slice(None)] * ivy.get_num_dims(freq_domain)
291 slices[-1] = slice(0, size // 2 + 1)
292 pos_freq_terms = freq_domain[tuple(slices)]
293 slices[-1] = slice(1, -1)
294 neg_freq_terms = ivy.conj(pos_freq_terms[tuple(slices)][..., ::-1])
295 combined_freq_terms = ivy.concat((pos_freq_terms, neg_freq_terms), axis=-1)
296 real_result = ivy.ifftn(combined_freq_terms, s=[size], axes=[-1], norm=norm)
297 real_result = ivy.moveaxis(real_result, -1, axis)
298
299 if ivy.is_complex_dtype(x.dtype):
300 output_dtype = "float32" if x.dtype == "complex64" else "float64"
301 else:
302 output_dtype = "float32"
303
304 result_t = ivy.astype(real_result, output_dtype)
305 return result_t
306
307
308 @with_supported_dtypes({"2.5.2 and below": ("float32", "float64")}, "paddle")
309 @to_ivy_arrays_and_back
310 def rfft(x, n=None, axis=-1, norm="backward", name=None):
311 return ivy.dft(x, axis=axis, inverse=False, onesided=True, dft_length=n, norm=norm)
312
313
314 @to_ivy_arrays_and_back
315 def rfftfreq(n, d=1.0, dtype=None, name=None):
316 dtype = ivy.default_dtype()
317 val = 1.0 / (n * d)
318 pos_max = n // 2 + 1
319 indices = ivy.arange(0, pos_max, dtype=dtype)
320 return indices * val
321
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ivy/functional/frontends/paddle/fft.py b/ivy/functional/frontends/paddle/fft.py
--- a/ivy/functional/frontends/paddle/fft.py
+++ b/ivy/functional/frontends/paddle/fft.py
@@ -16,6 +16,25 @@
return ivy.astype(ret, x.dtype)
+@with_supported_dtypes(
+ {
+ "2.5.2 and below": (
+ "int32",
+ "int64",
+ "float32",
+ "float64",
+ )
+ },
+ "paddle",
+)
+@to_ivy_arrays_and_back
+def fft2(x, s=None, axes=(-2, -1), norm="backward", name=None):
+ if axes is None:
+ axes = (-2, -1)
+ ret = ivy.fft2(x, s=s, dim=axes, norm=norm)
+ return ret
+
+
@with_supported_dtypes(
{
"2.5.2 and below": (
| {"golden_diff": "diff --git a/ivy/functional/frontends/paddle/fft.py b/ivy/functional/frontends/paddle/fft.py\n--- a/ivy/functional/frontends/paddle/fft.py\n+++ b/ivy/functional/frontends/paddle/fft.py\n@@ -16,6 +16,25 @@\n return ivy.astype(ret, x.dtype)\n \n \n+@with_supported_dtypes(\n+ {\n+ \"2.5.2 and below\": (\n+ \"int32\",\n+ \"int64\",\n+ \"float32\",\n+ \"float64\",\n+ )\n+ },\n+ \"paddle\",\n+)\n+@to_ivy_arrays_and_back\n+def fft2(x, s=None, axes=(-2, -1), norm=\"backward\", name=None):\n+ if axes is None:\n+ axes = (-2, -1)\n+ ret = ivy.fft2(x, s=s, dim=axes, norm=norm)\n+ return ret\n+\n+\n @with_supported_dtypes(\n {\n \"2.5.2 and below\": (\n", "issue": "fft2\nPaddle-Paddle frontend implementation for the fft2 function.\n", "before_files": [{"content": "# global\nimport ivy\nfrom ivy.func_wrapper import with_supported_dtypes\nfrom ivy.functional.frontends.paddle.func_wrapper import (\n to_ivy_arrays_and_back,\n)\n\n\n@with_supported_dtypes(\n {\"2.5.2 and below\": (\"complex64\", \"complex128\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef fft(x, n=None, axis=-1.0, norm=\"backward\", name=None):\n ret = ivy.fft(ivy.astype(x, \"complex128\"), axis, norm=norm, n=n)\n return ivy.astype(ret, x.dtype)\n\n\n@with_supported_dtypes(\n {\n \"2.5.2 and below\": (\n \"int32\",\n \"int64\",\n \"float32\",\n \"float64\",\n )\n },\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef fftfreq(n, d=1.0, dtype=None, name=None):\n if d * n == 0:\n raise ValueError(\"d or n should not be 0.\")\n\n if dtype is None:\n dtype = ivy.default_dtype()\n val = 1.0 / (n * d)\n pos_max = (n + 1) // 2\n neg_max = n // 2\n indices = ivy.arange(-neg_max, pos_max, dtype=dtype)\n indices = ivy.roll(indices, -neg_max)\n return ivy.multiply(indices, val)\n\n\n@with_supported_dtypes(\n {\n \"2.5.2 and below\": (\n \"int32\",\n \"int64\",\n \"float32\",\n \"float64\",\n \"complex64\",\n \"complex128\",\n )\n },\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef fftshift(x, axes=None, name=None):\n shape = x.shape\n\n if axes is None:\n axes = tuple(range(x.ndim))\n shifts = [(dim // 2) for dim in shape]\n elif isinstance(axes, int):\n shifts = shape[axes] // 2\n else:\n shifts = ivy.concat([shape[ax] // 2 for ax in axes])\n\n roll = ivy.roll(x, shifts, axis=axes)\n\n return roll\n\n\n@with_supported_dtypes(\n {\"2.5.2 and below\": (\"complex64\", \"complex128\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef hfft(x, n=None, axes=-1, norm=\"backward\", name=None):\n \"\"\"Compute the FFT of a signal that has Hermitian symmetry, resulting in a real\n spectrum.\"\"\"\n # Determine the input shape and axis length\n input_shape = x.shape\n input_len = input_shape[axes]\n\n # Calculate n if not provided\n if n is None:\n n = 2 * (input_len - 1)\n\n # Perform the FFT along the specified axis\n result = ivy.fft(x, axes, n=n, norm=norm)\n\n return ivy.real(result)\n\n\n@with_supported_dtypes(\n {\"2.5.2 and below\": \"complex64\"},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef hfft2(x, s=None, axis=(-2, -1), norm=\"backward\"):\n # check if the input tensor x is a hermitian complex\n if not ivy.allclose(ivy.conj(ivy.matrix_transpose(x)), x):\n raise ValueError(\"Input tensor x must be Hermitian complex.\")\n\n fft_result = ivy.fft2(x, s=s, dim=axis, norm=norm)\n\n # Depending on the norm, apply scaling and normalization\n if norm == \"forward\":\n fft_result /= ivy.sqrt(ivy.prod(ivy.shape(fft_result)))\n elif norm == \"ortho\":\n fft_result /= ivy.sqrt(ivy.prod(ivy.shape(x)))\n\n return ivy.real(fft_result) # Return the real part of the result\n\n\n@with_supported_dtypes(\n {\"2.5.2 and below\": (\"complex64\", \"complex128\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef ifft(x, n=None, axis=-1.0, norm=\"backward\", name=None):\n ret = ivy.ifft(ivy.astype(x, \"complex128\"), axis, norm=norm, n=n)\n return ivy.astype(ret, x.dtype)\n\n\n@with_supported_dtypes(\n {\"2.5.2 and below\": (\"complex64\", \"complex128\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef ifftn(x, s=None, axes=None, norm=\"backward\", name=None):\n ret = ivy.ifftn(ivy.astype(x, \"complex128\"), s=s, axes=axes, norm=norm)\n return ivy.astype(ret, x.dtype)\n\n\n@with_supported_dtypes(\n {\n \"2.5.2 and below\": (\n \"int32\",\n \"int64\",\n \"float32\",\n \"float64\",\n )\n },\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef ifftshift(x, axes=None, name=None):\n shape = x.shape\n\n if axes is None:\n axes = tuple(range(x.ndim))\n shifts = [-(dim // 2) for dim in shape]\n elif isinstance(axes, int):\n shifts = -(shape[axes] // 2)\n else:\n shifts = ivy.concat([-shape[ax] // 2 for ax in axes])\n\n roll = ivy.roll(x, shifts, axis=axes)\n\n return roll\n\n\n@with_supported_dtypes(\n {\n \"2.5.2 and below\": (\n \"int32\",\n \"int64\",\n \"float32\",\n \"float64\",\n )\n },\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef ihfft2(x, s=None, axes=(-2, -1), norm=\"backward\", name=None):\n # check if the input array is two-dimensional and real\n if len(ivy.array(x).shape) != 2 or ivy.is_complex_dtype(x):\n raise ValueError(\"input must be a two-dimensional real array\")\n\n # cast the input to the same float64 type so that there are no backend issues\n x_ = ivy.astype(x, ivy.float64)\n\n ihfft2_result = 0\n # Compute the complex conjugate of the 2-dimensional discrete Fourier Transform\n if norm == \"backward\":\n ihfft2_result = ivy.conj(ivy.rfftn(x_, s=s, axes=axes, norm=\"forward\"))\n if norm == \"forward\":\n ihfft2_result = ivy.conj(ivy.rfftn(x_, s=s, axes=axes, norm=\"backward\"))\n if norm == \"ortho\":\n ihfft2_result = ivy.conj(ivy.rfftn(x_, s=s, axes=axes, norm=\"ortho\"))\n\n if x.dtype in [ivy.float32, ivy.int32, ivy.int64]:\n return ivy.astype(ihfft2_result, ivy.complex64)\n if x.dtype == ivy.float64:\n return ivy.astype(ihfft2_result, ivy.complex128)\n\n\n@with_supported_dtypes(\n {\"2.5.2 and below\": (\"complex64\", \"complex128\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef irfft(x, n=None, axis=-1.0, norm=\"backward\", name=None):\n if n is None:\n n = 2 * (x.shape[axis] - 1)\n\n pos_freq_terms = ivy.take_along_axis(x, range(n // 2 + 1), axis)\n neg_freq_terms = ivy.conj(pos_freq_terms[1:-1][::-1])\n combined_freq_terms = ivy.concat((pos_freq_terms, neg_freq_terms), axis=axis)\n time_domain = ivy.ifft(combined_freq_terms, axis, norm=norm, n=n)\n if ivy.isreal(x):\n time_domain = ivy.real(time_domain)\n return time_domain\n\n\n@with_supported_dtypes(\n {\n \"2.5.2 and below\": (\n \"int32\",\n \"int64\",\n \"float16\",\n \"float32\",\n \"float64\",\n \"complex64\",\n \"complex128\",\n )\n },\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef irfft2(x, s=None, axes=(-2, -1), norm=\"backward\"):\n # Handle values if None\n if s is None:\n s = x.shape\n if axes is None:\n axes = (-2, -1)\n\n # Calculate the normalization factor 'n' based on the shape 's'\n n = ivy.prod(ivy.array(s))\n\n result = ivy.ifftn(x, dim=axes[0], norm=norm)\n\n # Normalize the result based on the 'norm' parameter\n if norm == \"backward\":\n result /= n\n elif norm == \"forward\":\n result *= n\n elif norm == \"ortho\":\n result /= ivy.sqrt(n)\n return result\n\n\n@with_supported_dtypes(\n {\"2.5.2 and below\": (\"complex64\", \"complex128\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef irfftn(x, s=None, axes=None, norm=\"backward\", name=None):\n x = ivy.array(x)\n\n if axes is None:\n axes = list(range(len(x.shape)))\n\n include_last_axis = len(x.shape) - 1 in axes\n\n if s is None:\n s = [\n x.shape[axis] if axis != (len(x.shape) - 1) else 2 * (x.shape[axis] - 1)\n for axis in axes\n ]\n\n real_result = x\n remaining_axes = [axis for axis in axes if axis != (len(x.shape) - 1)]\n\n if remaining_axes:\n real_result = ivy.ifftn(\n x,\n s=[s[axes.index(axis)] for axis in remaining_axes],\n axes=remaining_axes,\n norm=norm,\n )\n\n if include_last_axis:\n axis = len(x.shape) - 1\n size = s[axes.index(axis)]\n freq_domain = ivy.moveaxis(real_result, axis, -1)\n slices = [slice(None)] * ivy.get_num_dims(freq_domain)\n slices[-1] = slice(0, size // 2 + 1)\n pos_freq_terms = freq_domain[tuple(slices)]\n slices[-1] = slice(1, -1)\n neg_freq_terms = ivy.conj(pos_freq_terms[tuple(slices)][..., ::-1])\n combined_freq_terms = ivy.concat((pos_freq_terms, neg_freq_terms), axis=-1)\n real_result = ivy.ifftn(combined_freq_terms, s=[size], axes=[-1], norm=norm)\n real_result = ivy.moveaxis(real_result, -1, axis)\n\n if ivy.is_complex_dtype(x.dtype):\n output_dtype = \"float32\" if x.dtype == \"complex64\" else \"float64\"\n else:\n output_dtype = \"float32\"\n\n result_t = ivy.astype(real_result, output_dtype)\n return result_t\n\n\n@with_supported_dtypes({\"2.5.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef rfft(x, n=None, axis=-1, norm=\"backward\", name=None):\n return ivy.dft(x, axis=axis, inverse=False, onesided=True, dft_length=n, norm=norm)\n\n\n@to_ivy_arrays_and_back\ndef rfftfreq(n, d=1.0, dtype=None, name=None):\n dtype = ivy.default_dtype()\n val = 1.0 / (n * d)\n pos_max = n // 2 + 1\n indices = ivy.arange(0, pos_max, dtype=dtype)\n return indices * val\n", "path": "ivy/functional/frontends/paddle/fft.py"}], "after_files": [{"content": "# global\nimport ivy\nfrom ivy.func_wrapper import with_supported_dtypes\nfrom ivy.functional.frontends.paddle.func_wrapper import (\n to_ivy_arrays_and_back,\n)\n\n\n@with_supported_dtypes(\n {\"2.5.2 and below\": (\"complex64\", \"complex128\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef fft(x, n=None, axis=-1.0, norm=\"backward\", name=None):\n ret = ivy.fft(ivy.astype(x, \"complex128\"), axis, norm=norm, n=n)\n return ivy.astype(ret, x.dtype)\n\n\n@with_supported_dtypes(\n {\n \"2.5.2 and below\": (\n \"int32\",\n \"int64\",\n \"float32\",\n \"float64\",\n )\n },\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef fft2(x, s=None, axes=(-2, -1), norm=\"backward\", name=None):\n if axes is None:\n axes = (-2, -1)\n ret = ivy.fft2(x, s=s, dim=axes, norm=norm)\n return ret\n\n\n@with_supported_dtypes(\n {\n \"2.5.2 and below\": (\n \"int32\",\n \"int64\",\n \"float32\",\n \"float64\",\n )\n },\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef fftfreq(n, d=1.0, dtype=None, name=None):\n if d * n == 0:\n raise ValueError(\"d or n should not be 0.\")\n\n if dtype is None:\n dtype = ivy.default_dtype()\n val = 1.0 / (n * d)\n pos_max = (n + 1) // 2\n neg_max = n // 2\n indices = ivy.arange(-neg_max, pos_max, dtype=dtype)\n indices = ivy.roll(indices, -neg_max)\n return ivy.multiply(indices, val)\n\n\n@with_supported_dtypes(\n {\n \"2.5.2 and below\": (\n \"int32\",\n \"int64\",\n \"float32\",\n \"float64\",\n \"complex64\",\n \"complex128\",\n )\n },\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef fftshift(x, axes=None, name=None):\n shape = x.shape\n\n if axes is None:\n axes = tuple(range(x.ndim))\n shifts = [(dim // 2) for dim in shape]\n elif isinstance(axes, int):\n shifts = shape[axes] // 2\n else:\n shifts = ivy.concat([shape[ax] // 2 for ax in axes])\n\n roll = ivy.roll(x, shifts, axis=axes)\n\n return roll\n\n\n@with_supported_dtypes(\n {\"2.5.2 and below\": (\"complex64\", \"complex128\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef hfft(x, n=None, axes=-1, norm=\"backward\", name=None):\n \"\"\"Compute the FFT of a signal that has Hermitian symmetry, resulting in a real\n spectrum.\"\"\"\n # Determine the input shape and axis length\n input_shape = x.shape\n input_len = input_shape[axes]\n\n # Calculate n if not provided\n if n is None:\n n = 2 * (input_len - 1)\n\n # Perform the FFT along the specified axis\n result = ivy.fft(x, axes, n=n, norm=norm)\n\n return ivy.real(result)\n\n\n@with_supported_dtypes(\n {\"2.5.2 and below\": \"complex64\"},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef hfft2(x, s=None, axis=(-2, -1), norm=\"backward\"):\n # check if the input tensor x is a hermitian complex\n if not ivy.allclose(ivy.conj(ivy.matrix_transpose(x)), x):\n raise ValueError(\"Input tensor x must be Hermitian complex.\")\n\n fft_result = ivy.fft2(x, s=s, dim=axis, norm=norm)\n\n # Depending on the norm, apply scaling and normalization\n if norm == \"forward\":\n fft_result /= ivy.sqrt(ivy.prod(ivy.shape(fft_result)))\n elif norm == \"ortho\":\n fft_result /= ivy.sqrt(ivy.prod(ivy.shape(x)))\n\n return ivy.real(fft_result) # Return the real part of the result\n\n\n@with_supported_dtypes(\n {\"2.5.2 and below\": (\"complex64\", \"complex128\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef ifft(x, n=None, axis=-1.0, norm=\"backward\", name=None):\n ret = ivy.ifft(ivy.astype(x, \"complex128\"), axis, norm=norm, n=n)\n return ivy.astype(ret, x.dtype)\n\n\n@with_supported_dtypes(\n {\"2.5.2 and below\": (\"complex64\", \"complex128\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef ifftn(x, s=None, axes=None, norm=\"backward\", name=None):\n ret = ivy.ifftn(ivy.astype(x, \"complex128\"), s=s, axes=axes, norm=norm)\n return ivy.astype(ret, x.dtype)\n\n\n@with_supported_dtypes(\n {\n \"2.5.2 and below\": (\n \"int32\",\n \"int64\",\n \"float32\",\n \"float64\",\n )\n },\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef ifftshift(x, axes=None, name=None):\n shape = x.shape\n\n if axes is None:\n axes = tuple(range(x.ndim))\n shifts = [-(dim // 2) for dim in shape]\n elif isinstance(axes, int):\n shifts = -(shape[axes] // 2)\n else:\n shifts = ivy.concat([-shape[ax] // 2 for ax in axes])\n\n roll = ivy.roll(x, shifts, axis=axes)\n\n return roll\n\n\n@with_supported_dtypes(\n {\n \"2.5.2 and below\": (\n \"int32\",\n \"int64\",\n \"float32\",\n \"float64\",\n )\n },\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef ihfft2(x, s=None, axes=(-2, -1), norm=\"backward\", name=None):\n # check if the input array is two-dimensional and real\n if len(ivy.array(x).shape) != 2 or ivy.is_complex_dtype(x):\n raise ValueError(\"input must be a two-dimensional real array\")\n\n # cast the input to the same float64 type so that there are no backend issues\n x_ = ivy.astype(x, ivy.float64)\n\n ihfft2_result = 0\n # Compute the complex conjugate of the 2-dimensional discrete Fourier Transform\n if norm == \"backward\":\n ihfft2_result = ivy.conj(ivy.rfftn(x_, s=s, axes=axes, norm=\"forward\"))\n if norm == \"forward\":\n ihfft2_result = ivy.conj(ivy.rfftn(x_, s=s, axes=axes, norm=\"backward\"))\n if norm == \"ortho\":\n ihfft2_result = ivy.conj(ivy.rfftn(x_, s=s, axes=axes, norm=\"ortho\"))\n\n if x.dtype in [ivy.float32, ivy.int32, ivy.int64]:\n return ivy.astype(ihfft2_result, ivy.complex64)\n if x.dtype == ivy.float64:\n return ivy.astype(ihfft2_result, ivy.complex128)\n\n\n@with_supported_dtypes(\n {\"2.5.2 and below\": (\"complex64\", \"complex128\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef irfft(x, n=None, axis=-1.0, norm=\"backward\", name=None):\n if n is None:\n n = 2 * (x.shape[axis] - 1)\n\n pos_freq_terms = ivy.take_along_axis(x, range(n // 2 + 1), axis)\n neg_freq_terms = ivy.conj(pos_freq_terms[1:-1][::-1])\n combined_freq_terms = ivy.concat((pos_freq_terms, neg_freq_terms), axis=axis)\n time_domain = ivy.ifft(combined_freq_terms, axis, norm=norm, n=n)\n if ivy.isreal(x):\n time_domain = ivy.real(time_domain)\n return time_domain\n\n\n@with_supported_dtypes(\n {\n \"2.5.2 and below\": (\n \"int32\",\n \"int64\",\n \"float16\",\n \"float32\",\n \"float64\",\n \"complex64\",\n \"complex128\",\n )\n },\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef irfft2(x, s=None, axes=(-2, -1), norm=\"backward\"):\n # Handle values if None\n if s is None:\n s = x.shape\n if axes is None:\n axes = (-2, -1)\n\n # Calculate the normalization factor 'n' based on the shape 's'\n n = ivy.prod(ivy.array(s))\n\n result = ivy.ifftn(x, dim=axes[0], norm=norm)\n\n # Normalize the result based on the 'norm' parameter\n if norm == \"backward\":\n result /= n\n elif norm == \"forward\":\n result *= n\n elif norm == \"ortho\":\n result /= ivy.sqrt(n)\n return result\n\n\n@with_supported_dtypes(\n {\"2.5.2 and below\": (\"complex64\", \"complex128\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef irfftn(x, s=None, axes=None, norm=\"backward\", name=None):\n x = ivy.array(x)\n\n if axes is None:\n axes = list(range(len(x.shape)))\n\n include_last_axis = len(x.shape) - 1 in axes\n\n if s is None:\n s = [\n x.shape[axis] if axis != (len(x.shape) - 1) else 2 * (x.shape[axis] - 1)\n for axis in axes\n ]\n\n real_result = x\n remaining_axes = [axis for axis in axes if axis != (len(x.shape) - 1)]\n\n if remaining_axes:\n real_result = ivy.ifftn(\n x,\n s=[s[axes.index(axis)] for axis in remaining_axes],\n axes=remaining_axes,\n norm=norm,\n )\n\n if include_last_axis:\n axis = len(x.shape) - 1\n size = s[axes.index(axis)]\n freq_domain = ivy.moveaxis(real_result, axis, -1)\n slices = [slice(None)] * ivy.get_num_dims(freq_domain)\n slices[-1] = slice(0, size // 2 + 1)\n pos_freq_terms = freq_domain[tuple(slices)]\n slices[-1] = slice(1, -1)\n neg_freq_terms = ivy.conj(pos_freq_terms[tuple(slices)][..., ::-1])\n combined_freq_terms = ivy.concat((pos_freq_terms, neg_freq_terms), axis=-1)\n real_result = ivy.ifftn(combined_freq_terms, s=[size], axes=[-1], norm=norm)\n real_result = ivy.moveaxis(real_result, -1, axis)\n\n if ivy.is_complex_dtype(x.dtype):\n output_dtype = \"float32\" if x.dtype == \"complex64\" else \"float64\"\n else:\n output_dtype = \"float32\"\n\n result_t = ivy.astype(real_result, output_dtype)\n return result_t\n\n\n@with_supported_dtypes({\"2.5.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef rfft(x, n=None, axis=-1, norm=\"backward\", name=None):\n return ivy.dft(x, axis=axis, inverse=False, onesided=True, dft_length=n, norm=norm)\n\n\n@to_ivy_arrays_and_back\ndef rfftfreq(n, d=1.0, dtype=None, name=None):\n dtype = ivy.default_dtype()\n val = 1.0 / (n * d)\n pos_max = n // 2 + 1\n indices = ivy.arange(0, pos_max, dtype=dtype)\n return indices * val\n", "path": "ivy/functional/frontends/paddle/fft.py"}]} | 3,865 | 240 |
gh_patches_debug_31887 | rasdani/github-patches | git_diff | tobymao__sqlglot-1746 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Type error when converting datediff from redshift to trino
```
sql = "select datediff(week,'2009-01-01','2009-12-31')"
converted_sql = sqlglot.transpile(sql, read="redshift", write="trino")[0]
print(converted_sql)
SELECT DATE_DIFF('week', '2009-01-01', '2009-12-31')
```
Trino error: `Unexpected parameters (varchar(4), varchar(10), varchar(10)) for function date_diff. Expected: date_diff(varchar(x), date, date), date_diff(varchar(x), timestamp(p), timestamp(p)), date_diff(varchar(x), timestamp(p) with time zone, timestamp(p) with time zone), date_diff(varchar(x), time(p), time(p)), date_diff(varchar(x), time(p) with time zone, time(p) with time zone)'
`
Changing the SQL to `SELECT DATE_DIFF('week', DATE'2009-01-01', DATE'2009-12-31')` works in Trino
https://trino.io/docs/current/functions/datetime.html
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sqlglot/dialects/redshift.py`
Content:
```
1 from __future__ import annotations
2
3 import typing as t
4
5 from sqlglot import exp, transforms
6 from sqlglot.dialects.dialect import rename_func
7 from sqlglot.dialects.postgres import Postgres
8 from sqlglot.helper import seq_get
9 from sqlglot.tokens import TokenType
10
11
12 def _json_sql(self: Postgres.Generator, expression: exp.JSONExtract | exp.JSONExtractScalar) -> str:
13 return f'{self.sql(expression, "this")}."{expression.expression.name}"'
14
15
16 class Redshift(Postgres):
17 time_format = "'YYYY-MM-DD HH:MI:SS'"
18 time_mapping = {
19 **Postgres.time_mapping,
20 "MON": "%b",
21 "HH": "%H",
22 }
23
24 class Parser(Postgres.Parser):
25 FUNCTIONS = {
26 **Postgres.Parser.FUNCTIONS,
27 "DATEADD": lambda args: exp.DateAdd(
28 this=seq_get(args, 2),
29 expression=seq_get(args, 1),
30 unit=seq_get(args, 0),
31 ),
32 "DATEDIFF": lambda args: exp.DateDiff(
33 this=seq_get(args, 2),
34 expression=seq_get(args, 1),
35 unit=seq_get(args, 0),
36 ),
37 "NVL": exp.Coalesce.from_arg_list,
38 "STRTOL": exp.FromBase.from_arg_list,
39 }
40
41 CONVERT_TYPE_FIRST = True
42
43 def _parse_types(
44 self, check_func: bool = False, schema: bool = False
45 ) -> t.Optional[exp.Expression]:
46 this = super()._parse_types(check_func=check_func, schema=schema)
47
48 if (
49 isinstance(this, exp.DataType)
50 and this.is_type("varchar")
51 and this.expressions
52 and this.expressions[0].this == exp.column("MAX")
53 ):
54 this.set("expressions", [exp.Var(this="MAX")])
55
56 return this
57
58 class Tokenizer(Postgres.Tokenizer):
59 BIT_STRINGS = []
60 HEX_STRINGS = []
61 STRING_ESCAPES = ["\\"]
62
63 KEYWORDS = {
64 **Postgres.Tokenizer.KEYWORDS,
65 "HLLSKETCH": TokenType.HLLSKETCH,
66 "SUPER": TokenType.SUPER,
67 "SYSDATE": TokenType.CURRENT_TIMESTAMP,
68 "TIME": TokenType.TIMESTAMP,
69 "TIMETZ": TokenType.TIMESTAMPTZ,
70 "TOP": TokenType.TOP,
71 "UNLOAD": TokenType.COMMAND,
72 "VARBYTE": TokenType.VARBINARY,
73 }
74
75 # Redshift allows # to appear as a table identifier prefix
76 SINGLE_TOKENS = Postgres.Tokenizer.SINGLE_TOKENS.copy()
77 SINGLE_TOKENS.pop("#")
78
79 class Generator(Postgres.Generator):
80 LOCKING_READS_SUPPORTED = False
81 RENAME_TABLE_WITH_DB = False
82
83 TYPE_MAPPING = {
84 **Postgres.Generator.TYPE_MAPPING,
85 exp.DataType.Type.BINARY: "VARBYTE",
86 exp.DataType.Type.VARBINARY: "VARBYTE",
87 exp.DataType.Type.INT: "INTEGER",
88 }
89
90 PROPERTIES_LOCATION = {
91 **Postgres.Generator.PROPERTIES_LOCATION,
92 exp.LikeProperty: exp.Properties.Location.POST_WITH,
93 }
94
95 TRANSFORMS = {
96 **Postgres.Generator.TRANSFORMS,
97 exp.CurrentTimestamp: lambda self, e: "SYSDATE",
98 exp.DateAdd: lambda self, e: self.func(
99 "DATEADD", exp.var(e.text("unit") or "day"), e.expression, e.this
100 ),
101 exp.DateDiff: lambda self, e: self.func(
102 "DATEDIFF", exp.var(e.text("unit") or "day"), e.expression, e.this
103 ),
104 exp.DistKeyProperty: lambda self, e: f"DISTKEY({e.name})",
105 exp.DistStyleProperty: lambda self, e: self.naked_property(e),
106 exp.JSONExtract: _json_sql,
107 exp.JSONExtractScalar: _json_sql,
108 exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]),
109 exp.SortKeyProperty: lambda self, e: f"{'COMPOUND ' if e.args['compound'] else ''}SORTKEY({self.format_args(*e.this)})",
110 exp.FromBase: rename_func("STRTOL"),
111 }
112
113 # Postgres maps exp.Pivot to no_pivot_sql, but Redshift support pivots
114 TRANSFORMS.pop(exp.Pivot)
115
116 # Redshift uses the POW | POWER (expr1, expr2) syntax instead of expr1 ^ expr2 (postgres)
117 TRANSFORMS.pop(exp.Pow)
118
119 RESERVED_KEYWORDS = {*Postgres.Generator.RESERVED_KEYWORDS, "snapshot", "type"}
120
121 def values_sql(self, expression: exp.Values) -> str:
122 """
123 Converts `VALUES...` expression into a series of unions.
124
125 Note: If you have a lot of unions then this will result in a large number of recursive statements to
126 evaluate the expression. You may need to increase `sys.setrecursionlimit` to run and it can also be
127 very slow.
128 """
129
130 # The VALUES clause is still valid in an `INSERT INTO ..` statement, for example
131 if not expression.find_ancestor(exp.From, exp.Join):
132 return super().values_sql(expression)
133
134 column_names = expression.alias and expression.args["alias"].columns
135
136 selects = []
137 rows = [tuple_exp.expressions for tuple_exp in expression.expressions]
138
139 for i, row in enumerate(rows):
140 if i == 0 and column_names:
141 row = [
142 exp.alias_(value, column_name)
143 for value, column_name in zip(row, column_names)
144 ]
145
146 selects.append(exp.Select(expressions=row))
147
148 subquery_expression: exp.Select | exp.Union = selects[0]
149 if len(selects) > 1:
150 for select in selects[1:]:
151 subquery_expression = exp.union(subquery_expression, select, distinct=False)
152
153 return self.subquery_sql(subquery_expression.subquery(expression.alias))
154
155 def with_properties(self, properties: exp.Properties) -> str:
156 """Redshift doesn't have `WITH` as part of their with_properties so we remove it"""
157 return self.properties(properties, prefix=" ", suffix="")
158
159 def datatype_sql(self, expression: exp.DataType) -> str:
160 """
161 Redshift converts the `TEXT` data type to `VARCHAR(255)` by default when people more generally mean
162 VARCHAR of max length which is `VARCHAR(max)` in Redshift. Therefore if we get a `TEXT` data type
163 without precision we convert it to `VARCHAR(max)` and if it does have precision then we just convert
164 `TEXT` to `VARCHAR`.
165 """
166 if expression.is_type("text"):
167 expression = expression.copy()
168 expression.set("this", exp.DataType.Type.VARCHAR)
169 precision = expression.args.get("expressions")
170
171 if not precision:
172 expression.append("expressions", exp.Var(this="MAX"))
173
174 return super().datatype_sql(expression)
175
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sqlglot/dialects/redshift.py b/sqlglot/dialects/redshift.py
--- a/sqlglot/dialects/redshift.py
+++ b/sqlglot/dialects/redshift.py
@@ -25,13 +25,13 @@
FUNCTIONS = {
**Postgres.Parser.FUNCTIONS,
"DATEADD": lambda args: exp.DateAdd(
- this=seq_get(args, 2),
+ this=exp.TsOrDsToDate(this=seq_get(args, 2)),
expression=seq_get(args, 1),
unit=seq_get(args, 0),
),
"DATEDIFF": lambda args: exp.DateDiff(
- this=seq_get(args, 2),
- expression=seq_get(args, 1),
+ this=exp.TsOrDsToDate(this=seq_get(args, 2)),
+ expression=exp.TsOrDsToDate(this=seq_get(args, 1)),
unit=seq_get(args, 0),
),
"NVL": exp.Coalesce.from_arg_list,
@@ -103,11 +103,12 @@
),
exp.DistKeyProperty: lambda self, e: f"DISTKEY({e.name})",
exp.DistStyleProperty: lambda self, e: self.naked_property(e),
+ exp.FromBase: rename_func("STRTOL"),
exp.JSONExtract: _json_sql,
exp.JSONExtractScalar: _json_sql,
exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]),
exp.SortKeyProperty: lambda self, e: f"{'COMPOUND ' if e.args['compound'] else ''}SORTKEY({self.format_args(*e.this)})",
- exp.FromBase: rename_func("STRTOL"),
+ exp.TsOrDsToDate: lambda self, e: self.sql(e.this),
}
# Postgres maps exp.Pivot to no_pivot_sql, but Redshift support pivots
| {"golden_diff": "diff --git a/sqlglot/dialects/redshift.py b/sqlglot/dialects/redshift.py\n--- a/sqlglot/dialects/redshift.py\n+++ b/sqlglot/dialects/redshift.py\n@@ -25,13 +25,13 @@\n FUNCTIONS = {\n **Postgres.Parser.FUNCTIONS,\n \"DATEADD\": lambda args: exp.DateAdd(\n- this=seq_get(args, 2),\n+ this=exp.TsOrDsToDate(this=seq_get(args, 2)),\n expression=seq_get(args, 1),\n unit=seq_get(args, 0),\n ),\n \"DATEDIFF\": lambda args: exp.DateDiff(\n- this=seq_get(args, 2),\n- expression=seq_get(args, 1),\n+ this=exp.TsOrDsToDate(this=seq_get(args, 2)),\n+ expression=exp.TsOrDsToDate(this=seq_get(args, 1)),\n unit=seq_get(args, 0),\n ),\n \"NVL\": exp.Coalesce.from_arg_list,\n@@ -103,11 +103,12 @@\n ),\n exp.DistKeyProperty: lambda self, e: f\"DISTKEY({e.name})\",\n exp.DistStyleProperty: lambda self, e: self.naked_property(e),\n+ exp.FromBase: rename_func(\"STRTOL\"),\n exp.JSONExtract: _json_sql,\n exp.JSONExtractScalar: _json_sql,\n exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]),\n exp.SortKeyProperty: lambda self, e: f\"{'COMPOUND ' if e.args['compound'] else ''}SORTKEY({self.format_args(*e.this)})\",\n- exp.FromBase: rename_func(\"STRTOL\"),\n+ exp.TsOrDsToDate: lambda self, e: self.sql(e.this),\n }\n \n # Postgres maps exp.Pivot to no_pivot_sql, but Redshift support pivots\n", "issue": "Type error when converting datediff from redshift to trino \n```\r\nsql = \"select datediff(week,'2009-01-01','2009-12-31')\"\r\nconverted_sql = sqlglot.transpile(sql, read=\"redshift\", write=\"trino\")[0]\r\nprint(converted_sql)\r\nSELECT DATE_DIFF('week', '2009-01-01', '2009-12-31')\r\n```\r\n\r\nTrino error: `Unexpected parameters (varchar(4), varchar(10), varchar(10)) for function date_diff. Expected: date_diff(varchar(x), date, date), date_diff(varchar(x), timestamp(p), timestamp(p)), date_diff(varchar(x), timestamp(p) with time zone, timestamp(p) with time zone), date_diff(varchar(x), time(p), time(p)), date_diff(varchar(x), time(p) with time zone, time(p) with time zone)'\r\n`\r\n\r\nChanging the SQL to `SELECT DATE_DIFF('week', DATE'2009-01-01', DATE'2009-12-31')` works in Trino\r\n\r\nhttps://trino.io/docs/current/functions/datetime.html\n", "before_files": [{"content": "from __future__ import annotations\n\nimport typing as t\n\nfrom sqlglot import exp, transforms\nfrom sqlglot.dialects.dialect import rename_func\nfrom sqlglot.dialects.postgres import Postgres\nfrom sqlglot.helper import seq_get\nfrom sqlglot.tokens import TokenType\n\n\ndef _json_sql(self: Postgres.Generator, expression: exp.JSONExtract | exp.JSONExtractScalar) -> str:\n return f'{self.sql(expression, \"this\")}.\"{expression.expression.name}\"'\n\n\nclass Redshift(Postgres):\n time_format = \"'YYYY-MM-DD HH:MI:SS'\"\n time_mapping = {\n **Postgres.time_mapping,\n \"MON\": \"%b\",\n \"HH\": \"%H\",\n }\n\n class Parser(Postgres.Parser):\n FUNCTIONS = {\n **Postgres.Parser.FUNCTIONS,\n \"DATEADD\": lambda args: exp.DateAdd(\n this=seq_get(args, 2),\n expression=seq_get(args, 1),\n unit=seq_get(args, 0),\n ),\n \"DATEDIFF\": lambda args: exp.DateDiff(\n this=seq_get(args, 2),\n expression=seq_get(args, 1),\n unit=seq_get(args, 0),\n ),\n \"NVL\": exp.Coalesce.from_arg_list,\n \"STRTOL\": exp.FromBase.from_arg_list,\n }\n\n CONVERT_TYPE_FIRST = True\n\n def _parse_types(\n self, check_func: bool = False, schema: bool = False\n ) -> t.Optional[exp.Expression]:\n this = super()._parse_types(check_func=check_func, schema=schema)\n\n if (\n isinstance(this, exp.DataType)\n and this.is_type(\"varchar\")\n and this.expressions\n and this.expressions[0].this == exp.column(\"MAX\")\n ):\n this.set(\"expressions\", [exp.Var(this=\"MAX\")])\n\n return this\n\n class Tokenizer(Postgres.Tokenizer):\n BIT_STRINGS = []\n HEX_STRINGS = []\n STRING_ESCAPES = [\"\\\\\"]\n\n KEYWORDS = {\n **Postgres.Tokenizer.KEYWORDS,\n \"HLLSKETCH\": TokenType.HLLSKETCH,\n \"SUPER\": TokenType.SUPER,\n \"SYSDATE\": TokenType.CURRENT_TIMESTAMP,\n \"TIME\": TokenType.TIMESTAMP,\n \"TIMETZ\": TokenType.TIMESTAMPTZ,\n \"TOP\": TokenType.TOP,\n \"UNLOAD\": TokenType.COMMAND,\n \"VARBYTE\": TokenType.VARBINARY,\n }\n\n # Redshift allows # to appear as a table identifier prefix\n SINGLE_TOKENS = Postgres.Tokenizer.SINGLE_TOKENS.copy()\n SINGLE_TOKENS.pop(\"#\")\n\n class Generator(Postgres.Generator):\n LOCKING_READS_SUPPORTED = False\n RENAME_TABLE_WITH_DB = False\n\n TYPE_MAPPING = {\n **Postgres.Generator.TYPE_MAPPING,\n exp.DataType.Type.BINARY: \"VARBYTE\",\n exp.DataType.Type.VARBINARY: \"VARBYTE\",\n exp.DataType.Type.INT: \"INTEGER\",\n }\n\n PROPERTIES_LOCATION = {\n **Postgres.Generator.PROPERTIES_LOCATION,\n exp.LikeProperty: exp.Properties.Location.POST_WITH,\n }\n\n TRANSFORMS = {\n **Postgres.Generator.TRANSFORMS,\n exp.CurrentTimestamp: lambda self, e: \"SYSDATE\",\n exp.DateAdd: lambda self, e: self.func(\n \"DATEADD\", exp.var(e.text(\"unit\") or \"day\"), e.expression, e.this\n ),\n exp.DateDiff: lambda self, e: self.func(\n \"DATEDIFF\", exp.var(e.text(\"unit\") or \"day\"), e.expression, e.this\n ),\n exp.DistKeyProperty: lambda self, e: f\"DISTKEY({e.name})\",\n exp.DistStyleProperty: lambda self, e: self.naked_property(e),\n exp.JSONExtract: _json_sql,\n exp.JSONExtractScalar: _json_sql,\n exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]),\n exp.SortKeyProperty: lambda self, e: f\"{'COMPOUND ' if e.args['compound'] else ''}SORTKEY({self.format_args(*e.this)})\",\n exp.FromBase: rename_func(\"STRTOL\"),\n }\n\n # Postgres maps exp.Pivot to no_pivot_sql, but Redshift support pivots\n TRANSFORMS.pop(exp.Pivot)\n\n # Redshift uses the POW | POWER (expr1, expr2) syntax instead of expr1 ^ expr2 (postgres)\n TRANSFORMS.pop(exp.Pow)\n\n RESERVED_KEYWORDS = {*Postgres.Generator.RESERVED_KEYWORDS, \"snapshot\", \"type\"}\n\n def values_sql(self, expression: exp.Values) -> str:\n \"\"\"\n Converts `VALUES...` expression into a series of unions.\n\n Note: If you have a lot of unions then this will result in a large number of recursive statements to\n evaluate the expression. You may need to increase `sys.setrecursionlimit` to run and it can also be\n very slow.\n \"\"\"\n\n # The VALUES clause is still valid in an `INSERT INTO ..` statement, for example\n if not expression.find_ancestor(exp.From, exp.Join):\n return super().values_sql(expression)\n\n column_names = expression.alias and expression.args[\"alias\"].columns\n\n selects = []\n rows = [tuple_exp.expressions for tuple_exp in expression.expressions]\n\n for i, row in enumerate(rows):\n if i == 0 and column_names:\n row = [\n exp.alias_(value, column_name)\n for value, column_name in zip(row, column_names)\n ]\n\n selects.append(exp.Select(expressions=row))\n\n subquery_expression: exp.Select | exp.Union = selects[0]\n if len(selects) > 1:\n for select in selects[1:]:\n subquery_expression = exp.union(subquery_expression, select, distinct=False)\n\n return self.subquery_sql(subquery_expression.subquery(expression.alias))\n\n def with_properties(self, properties: exp.Properties) -> str:\n \"\"\"Redshift doesn't have `WITH` as part of their with_properties so we remove it\"\"\"\n return self.properties(properties, prefix=\" \", suffix=\"\")\n\n def datatype_sql(self, expression: exp.DataType) -> str:\n \"\"\"\n Redshift converts the `TEXT` data type to `VARCHAR(255)` by default when people more generally mean\n VARCHAR of max length which is `VARCHAR(max)` in Redshift. Therefore if we get a `TEXT` data type\n without precision we convert it to `VARCHAR(max)` and if it does have precision then we just convert\n `TEXT` to `VARCHAR`.\n \"\"\"\n if expression.is_type(\"text\"):\n expression = expression.copy()\n expression.set(\"this\", exp.DataType.Type.VARCHAR)\n precision = expression.args.get(\"expressions\")\n\n if not precision:\n expression.append(\"expressions\", exp.Var(this=\"MAX\"))\n\n return super().datatype_sql(expression)\n", "path": "sqlglot/dialects/redshift.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport typing as t\n\nfrom sqlglot import exp, transforms\nfrom sqlglot.dialects.dialect import rename_func\nfrom sqlglot.dialects.postgres import Postgres\nfrom sqlglot.helper import seq_get\nfrom sqlglot.tokens import TokenType\n\n\ndef _json_sql(self: Postgres.Generator, expression: exp.JSONExtract | exp.JSONExtractScalar) -> str:\n return f'{self.sql(expression, \"this\")}.\"{expression.expression.name}\"'\n\n\nclass Redshift(Postgres):\n time_format = \"'YYYY-MM-DD HH:MI:SS'\"\n time_mapping = {\n **Postgres.time_mapping,\n \"MON\": \"%b\",\n \"HH\": \"%H\",\n }\n\n class Parser(Postgres.Parser):\n FUNCTIONS = {\n **Postgres.Parser.FUNCTIONS,\n \"DATEADD\": lambda args: exp.DateAdd(\n this=exp.TsOrDsToDate(this=seq_get(args, 2)),\n expression=seq_get(args, 1),\n unit=seq_get(args, 0),\n ),\n \"DATEDIFF\": lambda args: exp.DateDiff(\n this=exp.TsOrDsToDate(this=seq_get(args, 2)),\n expression=exp.TsOrDsToDate(this=seq_get(args, 1)),\n unit=seq_get(args, 0),\n ),\n \"NVL\": exp.Coalesce.from_arg_list,\n \"STRTOL\": exp.FromBase.from_arg_list,\n }\n\n CONVERT_TYPE_FIRST = True\n\n def _parse_types(\n self, check_func: bool = False, schema: bool = False\n ) -> t.Optional[exp.Expression]:\n this = super()._parse_types(check_func=check_func, schema=schema)\n\n if (\n isinstance(this, exp.DataType)\n and this.is_type(\"varchar\")\n and this.expressions\n and this.expressions[0].this == exp.column(\"MAX\")\n ):\n this.set(\"expressions\", [exp.Var(this=\"MAX\")])\n\n return this\n\n class Tokenizer(Postgres.Tokenizer):\n BIT_STRINGS = []\n HEX_STRINGS = []\n STRING_ESCAPES = [\"\\\\\"]\n\n KEYWORDS = {\n **Postgres.Tokenizer.KEYWORDS,\n \"HLLSKETCH\": TokenType.HLLSKETCH,\n \"SUPER\": TokenType.SUPER,\n \"SYSDATE\": TokenType.CURRENT_TIMESTAMP,\n \"TIME\": TokenType.TIMESTAMP,\n \"TIMETZ\": TokenType.TIMESTAMPTZ,\n \"TOP\": TokenType.TOP,\n \"UNLOAD\": TokenType.COMMAND,\n \"VARBYTE\": TokenType.VARBINARY,\n }\n\n # Redshift allows # to appear as a table identifier prefix\n SINGLE_TOKENS = Postgres.Tokenizer.SINGLE_TOKENS.copy()\n SINGLE_TOKENS.pop(\"#\")\n\n class Generator(Postgres.Generator):\n LOCKING_READS_SUPPORTED = False\n RENAME_TABLE_WITH_DB = False\n\n TYPE_MAPPING = {\n **Postgres.Generator.TYPE_MAPPING,\n exp.DataType.Type.BINARY: \"VARBYTE\",\n exp.DataType.Type.VARBINARY: \"VARBYTE\",\n exp.DataType.Type.INT: \"INTEGER\",\n }\n\n PROPERTIES_LOCATION = {\n **Postgres.Generator.PROPERTIES_LOCATION,\n exp.LikeProperty: exp.Properties.Location.POST_WITH,\n }\n\n TRANSFORMS = {\n **Postgres.Generator.TRANSFORMS,\n exp.CurrentTimestamp: lambda self, e: \"SYSDATE\",\n exp.DateAdd: lambda self, e: self.func(\n \"DATEADD\", exp.var(e.text(\"unit\") or \"day\"), e.expression, e.this\n ),\n exp.DateDiff: lambda self, e: self.func(\n \"DATEDIFF\", exp.var(e.text(\"unit\") or \"day\"), e.expression, e.this\n ),\n exp.DistKeyProperty: lambda self, e: f\"DISTKEY({e.name})\",\n exp.DistStyleProperty: lambda self, e: self.naked_property(e),\n exp.FromBase: rename_func(\"STRTOL\"),\n exp.JSONExtract: _json_sql,\n exp.JSONExtractScalar: _json_sql,\n exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]),\n exp.SortKeyProperty: lambda self, e: f\"{'COMPOUND ' if e.args['compound'] else ''}SORTKEY({self.format_args(*e.this)})\",\n exp.TsOrDsToDate: lambda self, e: self.sql(e.this),\n }\n\n # Postgres maps exp.Pivot to no_pivot_sql, but Redshift support pivots\n TRANSFORMS.pop(exp.Pivot)\n\n # Redshift uses the POW | POWER (expr1, expr2) syntax instead of expr1 ^ expr2 (postgres)\n TRANSFORMS.pop(exp.Pow)\n\n RESERVED_KEYWORDS = {*Postgres.Generator.RESERVED_KEYWORDS, \"snapshot\", \"type\"}\n\n def values_sql(self, expression: exp.Values) -> str:\n \"\"\"\n Converts `VALUES...` expression into a series of unions.\n\n Note: If you have a lot of unions then this will result in a large number of recursive statements to\n evaluate the expression. You may need to increase `sys.setrecursionlimit` to run and it can also be\n very slow.\n \"\"\"\n\n # The VALUES clause is still valid in an `INSERT INTO ..` statement, for example\n if not expression.find_ancestor(exp.From, exp.Join):\n return super().values_sql(expression)\n\n column_names = expression.alias and expression.args[\"alias\"].columns\n\n selects = []\n rows = [tuple_exp.expressions for tuple_exp in expression.expressions]\n\n for i, row in enumerate(rows):\n if i == 0 and column_names:\n row = [\n exp.alias_(value, column_name)\n for value, column_name in zip(row, column_names)\n ]\n\n selects.append(exp.Select(expressions=row))\n\n subquery_expression: exp.Select | exp.Union = selects[0]\n if len(selects) > 1:\n for select in selects[1:]:\n subquery_expression = exp.union(subquery_expression, select, distinct=False)\n\n return self.subquery_sql(subquery_expression.subquery(expression.alias))\n\n def with_properties(self, properties: exp.Properties) -> str:\n \"\"\"Redshift doesn't have `WITH` as part of their with_properties so we remove it\"\"\"\n return self.properties(properties, prefix=\" \", suffix=\"\")\n\n def datatype_sql(self, expression: exp.DataType) -> str:\n \"\"\"\n Redshift converts the `TEXT` data type to `VARCHAR(255)` by default when people more generally mean\n VARCHAR of max length which is `VARCHAR(max)` in Redshift. Therefore if we get a `TEXT` data type\n without precision we convert it to `VARCHAR(max)` and if it does have precision then we just convert\n `TEXT` to `VARCHAR`.\n \"\"\"\n if expression.is_type(\"text\"):\n expression = expression.copy()\n expression.set(\"this\", exp.DataType.Type.VARCHAR)\n precision = expression.args.get(\"expressions\")\n\n if not precision:\n expression.append(\"expressions\", exp.Var(this=\"MAX\"))\n\n return super().datatype_sql(expression)\n", "path": "sqlglot/dialects/redshift.py"}]} | 2,455 | 434 |
gh_patches_debug_16169 | rasdani/github-patches | git_diff | networkx__networkx-4999 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Wrong degree_assortativity_coefficient for directed graphs
### Current Behavior
``degree_assortativity_coefficient`` will fail for most directed graphs except if the set of in- or out-degrees is the same as the set of total-degrees.
This issue was introduced in 2.6 by #4928 ([L78](https://github.com/networkx/networkx/pull/4928/files#diff-76675aa4f0d3a79d394219c8e15ec346b3f5af9f4a733d5ef9e7026421d43bd9R78)).
### Expected Behavior
The mapping should include all relevant in- and out-degrees for directed graphs.
### Steps to Reproduce
```python
G = nx.DiGraph()
G.add_edges_from([(0, 3), (1, 0), (1, 2), (2, 4), (4, 1), (4, 3), (4, 2)])
nx.degree_assortativity_coefficient(G) # returns NaN
nx.degree_pearson_correlation_coefficient(G) # returns the correct value 0.14852
```
### Environment
Python version: 3.9
NetworkX version: 2.6+
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `networkx/algorithms/assortativity/correlation.py`
Content:
```
1 """Node assortativity coefficients and correlation measures.
2 """
3 from networkx.algorithms.assortativity.mixing import (
4 degree_mixing_matrix,
5 attribute_mixing_matrix,
6 numeric_mixing_matrix,
7 )
8 from networkx.algorithms.assortativity.pairs import node_degree_xy
9
10 __all__ = [
11 "degree_pearson_correlation_coefficient",
12 "degree_assortativity_coefficient",
13 "attribute_assortativity_coefficient",
14 "numeric_assortativity_coefficient",
15 ]
16
17
18 def degree_assortativity_coefficient(G, x="out", y="in", weight=None, nodes=None):
19 """Compute degree assortativity of graph.
20
21 Assortativity measures the similarity of connections
22 in the graph with respect to the node degree.
23
24 Parameters
25 ----------
26 G : NetworkX graph
27
28 x: string ('in','out')
29 The degree type for source node (directed graphs only).
30
31 y: string ('in','out')
32 The degree type for target node (directed graphs only).
33
34 weight: string or None, optional (default=None)
35 The edge attribute that holds the numerical value used
36 as a weight. If None, then each edge has weight 1.
37 The degree is the sum of the edge weights adjacent to the node.
38
39 nodes: list or iterable (optional)
40 Compute degree assortativity only for nodes in container.
41 The default is all nodes.
42
43 Returns
44 -------
45 r : float
46 Assortativity of graph by degree.
47
48 Examples
49 --------
50 >>> G = nx.path_graph(4)
51 >>> r = nx.degree_assortativity_coefficient(G)
52 >>> print(f"{r:3.1f}")
53 -0.5
54
55 See Also
56 --------
57 attribute_assortativity_coefficient
58 numeric_assortativity_coefficient
59 degree_mixing_dict
60 degree_mixing_matrix
61
62 Notes
63 -----
64 This computes Eq. (21) in Ref. [1]_ , where e is the joint
65 probability distribution (mixing matrix) of the degrees. If G is
66 directed than the matrix e is the joint probability of the
67 user-specified degree type for the source and target.
68
69 References
70 ----------
71 .. [1] M. E. J. Newman, Mixing patterns in networks,
72 Physical Review E, 67 026126, 2003
73 .. [2] Foster, J.G., Foster, D.V., Grassberger, P. & Paczuski, M.
74 Edge direction and the structure of networks, PNAS 107, 10815-20 (2010).
75 """
76 if nodes is None:
77 nodes = G.nodes
78 degrees = set([d for n, d in G.degree(nodes, weight=weight)])
79 mapping = {d: i for i, d, in enumerate(degrees)}
80 M = degree_mixing_matrix(G, x=x, y=y, nodes=nodes, weight=weight, mapping=mapping)
81 return numeric_ac(M, mapping=mapping)
82
83
84 def degree_pearson_correlation_coefficient(G, x="out", y="in", weight=None, nodes=None):
85 """Compute degree assortativity of graph.
86
87 Assortativity measures the similarity of connections
88 in the graph with respect to the node degree.
89
90 This is the same as degree_assortativity_coefficient but uses the
91 potentially faster scipy.stats.pearsonr function.
92
93 Parameters
94 ----------
95 G : NetworkX graph
96
97 x: string ('in','out')
98 The degree type for source node (directed graphs only).
99
100 y: string ('in','out')
101 The degree type for target node (directed graphs only).
102
103 weight: string or None, optional (default=None)
104 The edge attribute that holds the numerical value used
105 as a weight. If None, then each edge has weight 1.
106 The degree is the sum of the edge weights adjacent to the node.
107
108 nodes: list or iterable (optional)
109 Compute pearson correlation of degrees only for specified nodes.
110 The default is all nodes.
111
112 Returns
113 -------
114 r : float
115 Assortativity of graph by degree.
116
117 Examples
118 --------
119 >>> G = nx.path_graph(4)
120 >>> r = nx.degree_pearson_correlation_coefficient(G)
121 >>> print(f"{r:3.1f}")
122 -0.5
123
124 Notes
125 -----
126 This calls scipy.stats.pearsonr.
127
128 References
129 ----------
130 .. [1] M. E. J. Newman, Mixing patterns in networks
131 Physical Review E, 67 026126, 2003
132 .. [2] Foster, J.G., Foster, D.V., Grassberger, P. & Paczuski, M.
133 Edge direction and the structure of networks, PNAS 107, 10815-20 (2010).
134 """
135 import scipy as sp
136 import scipy.stats # call as sp.stats
137
138 xy = node_degree_xy(G, x=x, y=y, nodes=nodes, weight=weight)
139 x, y = zip(*xy)
140 return sp.stats.pearsonr(x, y)[0]
141
142
143 def attribute_assortativity_coefficient(G, attribute, nodes=None):
144 """Compute assortativity for node attributes.
145
146 Assortativity measures the similarity of connections
147 in the graph with respect to the given attribute.
148
149 Parameters
150 ----------
151 G : NetworkX graph
152
153 attribute : string
154 Node attribute key
155
156 nodes: list or iterable (optional)
157 Compute attribute assortativity for nodes in container.
158 The default is all nodes.
159
160 Returns
161 -------
162 r: float
163 Assortativity of graph for given attribute
164
165 Examples
166 --------
167 >>> G = nx.Graph()
168 >>> G.add_nodes_from([0, 1], color="red")
169 >>> G.add_nodes_from([2, 3], color="blue")
170 >>> G.add_edges_from([(0, 1), (2, 3)])
171 >>> print(nx.attribute_assortativity_coefficient(G, "color"))
172 1.0
173
174 Notes
175 -----
176 This computes Eq. (2) in Ref. [1]_ , (trace(M)-sum(M^2))/(1-sum(M^2)),
177 where M is the joint probability distribution (mixing matrix)
178 of the specified attribute.
179
180 References
181 ----------
182 .. [1] M. E. J. Newman, Mixing patterns in networks,
183 Physical Review E, 67 026126, 2003
184 """
185 M = attribute_mixing_matrix(G, attribute, nodes)
186 return attribute_ac(M)
187
188
189 def numeric_assortativity_coefficient(G, attribute, nodes=None):
190 """Compute assortativity for numerical node attributes.
191
192 Assortativity measures the similarity of connections
193 in the graph with respect to the given numeric attribute.
194
195 Parameters
196 ----------
197 G : NetworkX graph
198
199 attribute : string
200 Node attribute key.
201
202 nodes: list or iterable (optional)
203 Compute numeric assortativity only for attributes of nodes in
204 container. The default is all nodes.
205
206 Returns
207 -------
208 r: float
209 Assortativity of graph for given attribute
210
211 Examples
212 --------
213 >>> G = nx.Graph()
214 >>> G.add_nodes_from([0, 1], size=2)
215 >>> G.add_nodes_from([2, 3], size=3)
216 >>> G.add_edges_from([(0, 1), (2, 3)])
217 >>> print(nx.numeric_assortativity_coefficient(G, "size"))
218 1.0
219
220 Notes
221 -----
222 This computes Eq. (21) in Ref. [1]_ , for the mixing matrix
223 of the specified attribute.
224
225 References
226 ----------
227 .. [1] M. E. J. Newman, Mixing patterns in networks
228 Physical Review E, 67 026126, 2003
229 """
230 if nodes is None:
231 nodes = G.nodes
232 vals = set(G.nodes[n][attribute] for n in nodes)
233 mapping = {d: i for i, d, in enumerate(vals)}
234 M = attribute_mixing_matrix(G, attribute, nodes, mapping)
235 return numeric_ac(M, mapping)
236
237
238 def attribute_ac(M):
239 """Compute assortativity for attribute matrix M.
240
241 Parameters
242 ----------
243 M : numpy.ndarray
244 2D ndarray representing the attribute mixing matrix.
245
246 Notes
247 -----
248 This computes Eq. (2) in Ref. [1]_ , (trace(e)-sum(e^2))/(1-sum(e^2)),
249 where e is the joint probability distribution (mixing matrix)
250 of the specified attribute.
251
252 References
253 ----------
254 .. [1] M. E. J. Newman, Mixing patterns in networks,
255 Physical Review E, 67 026126, 2003
256 """
257 if M.sum() != 1.0:
258 M = M / M.sum()
259 s = (M @ M).sum()
260 t = M.trace()
261 r = (t - s) / (1 - s)
262 return r
263
264
265 def numeric_ac(M, mapping):
266 # M is a numpy matrix or array
267 # numeric assortativity coefficient, pearsonr
268 import numpy as np
269
270 if M.sum() != 1.0:
271 M = M / float(M.sum())
272 nx, ny = M.shape # nx=ny
273 x = np.array(list(mapping.keys()))
274 y = x # x and y have the same support
275 idx = list(mapping.values())
276 a = M.sum(axis=0)
277 b = M.sum(axis=1)
278 vara = (a[idx] * x ** 2).sum() - ((a[idx] * x).sum()) ** 2
279 varb = (b[idx] * y ** 2).sum() - ((b[idx] * y).sum()) ** 2
280 xy = np.outer(x, y)
281 ab = np.outer(a[idx], b[idx])
282 return (xy * (M - ab)).sum() / np.sqrt(vara * varb)
283
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/networkx/algorithms/assortativity/correlation.py b/networkx/algorithms/assortativity/correlation.py
--- a/networkx/algorithms/assortativity/correlation.py
+++ b/networkx/algorithms/assortativity/correlation.py
@@ -75,9 +75,27 @@
"""
if nodes is None:
nodes = G.nodes
- degrees = set([d for n, d in G.degree(nodes, weight=weight)])
+
+ degrees = None
+
+ if G.is_directed():
+ indeg = (
+ set([d for _, d in G.in_degree(nodes, weight=weight)])
+ if "in" in (x, y)
+ else set()
+ )
+ outdeg = (
+ set([d for _, d in G.out_degree(nodes, weight=weight)])
+ if "out" in (x, y)
+ else set()
+ )
+ degrees = set.union(indeg, outdeg)
+ else:
+ degrees = set([d for _, d in G.degree(nodes, weight=weight)])
+
mapping = {d: i for i, d, in enumerate(degrees)}
M = degree_mixing_matrix(G, x=x, y=y, nodes=nodes, weight=weight, mapping=mapping)
+
return numeric_ac(M, mapping=mapping)
| {"golden_diff": "diff --git a/networkx/algorithms/assortativity/correlation.py b/networkx/algorithms/assortativity/correlation.py\n--- a/networkx/algorithms/assortativity/correlation.py\n+++ b/networkx/algorithms/assortativity/correlation.py\n@@ -75,9 +75,27 @@\n \"\"\"\n if nodes is None:\n nodes = G.nodes\n- degrees = set([d for n, d in G.degree(nodes, weight=weight)])\n+\n+ degrees = None\n+\n+ if G.is_directed():\n+ indeg = (\n+ set([d for _, d in G.in_degree(nodes, weight=weight)])\n+ if \"in\" in (x, y)\n+ else set()\n+ )\n+ outdeg = (\n+ set([d for _, d in G.out_degree(nodes, weight=weight)])\n+ if \"out\" in (x, y)\n+ else set()\n+ )\n+ degrees = set.union(indeg, outdeg)\n+ else:\n+ degrees = set([d for _, d in G.degree(nodes, weight=weight)])\n+\n mapping = {d: i for i, d, in enumerate(degrees)}\n M = degree_mixing_matrix(G, x=x, y=y, nodes=nodes, weight=weight, mapping=mapping)\n+\n return numeric_ac(M, mapping=mapping)\n", "issue": "Wrong degree_assortativity_coefficient for directed graphs\n### Current Behavior\r\n``degree_assortativity_coefficient`` will fail for most directed graphs except if the set of in- or out-degrees is the same as the set of total-degrees.\r\nThis issue was introduced in 2.6 by #4928 ([L78](https://github.com/networkx/networkx/pull/4928/files#diff-76675aa4f0d3a79d394219c8e15ec346b3f5af9f4a733d5ef9e7026421d43bd9R78)).\r\n\r\n### Expected Behavior\r\nThe mapping should include all relevant in- and out-degrees for directed graphs.\r\n\r\n### Steps to Reproduce\r\n```python\r\nG = nx.DiGraph()\r\nG.add_edges_from([(0, 3), (1, 0), (1, 2), (2, 4), (4, 1), (4, 3), (4, 2)])\r\n\r\nnx.degree_assortativity_coefficient(G) # returns NaN\r\nnx.degree_pearson_correlation_coefficient(G) # returns the correct value 0.14852\r\n```\r\n\r\n### Environment\r\nPython version: 3.9\r\nNetworkX version: 2.6+\r\n\n", "before_files": [{"content": "\"\"\"Node assortativity coefficients and correlation measures.\n\"\"\"\nfrom networkx.algorithms.assortativity.mixing import (\n degree_mixing_matrix,\n attribute_mixing_matrix,\n numeric_mixing_matrix,\n)\nfrom networkx.algorithms.assortativity.pairs import node_degree_xy\n\n__all__ = [\n \"degree_pearson_correlation_coefficient\",\n \"degree_assortativity_coefficient\",\n \"attribute_assortativity_coefficient\",\n \"numeric_assortativity_coefficient\",\n]\n\n\ndef degree_assortativity_coefficient(G, x=\"out\", y=\"in\", weight=None, nodes=None):\n \"\"\"Compute degree assortativity of graph.\n\n Assortativity measures the similarity of connections\n in the graph with respect to the node degree.\n\n Parameters\n ----------\n G : NetworkX graph\n\n x: string ('in','out')\n The degree type for source node (directed graphs only).\n\n y: string ('in','out')\n The degree type for target node (directed graphs only).\n\n weight: string or None, optional (default=None)\n The edge attribute that holds the numerical value used\n as a weight. If None, then each edge has weight 1.\n The degree is the sum of the edge weights adjacent to the node.\n\n nodes: list or iterable (optional)\n Compute degree assortativity only for nodes in container.\n The default is all nodes.\n\n Returns\n -------\n r : float\n Assortativity of graph by degree.\n\n Examples\n --------\n >>> G = nx.path_graph(4)\n >>> r = nx.degree_assortativity_coefficient(G)\n >>> print(f\"{r:3.1f}\")\n -0.5\n\n See Also\n --------\n attribute_assortativity_coefficient\n numeric_assortativity_coefficient\n degree_mixing_dict\n degree_mixing_matrix\n\n Notes\n -----\n This computes Eq. (21) in Ref. [1]_ , where e is the joint\n probability distribution (mixing matrix) of the degrees. If G is\n directed than the matrix e is the joint probability of the\n user-specified degree type for the source and target.\n\n References\n ----------\n .. [1] M. E. J. Newman, Mixing patterns in networks,\n Physical Review E, 67 026126, 2003\n .. [2] Foster, J.G., Foster, D.V., Grassberger, P. & Paczuski, M.\n Edge direction and the structure of networks, PNAS 107, 10815-20 (2010).\n \"\"\"\n if nodes is None:\n nodes = G.nodes\n degrees = set([d for n, d in G.degree(nodes, weight=weight)])\n mapping = {d: i for i, d, in enumerate(degrees)}\n M = degree_mixing_matrix(G, x=x, y=y, nodes=nodes, weight=weight, mapping=mapping)\n return numeric_ac(M, mapping=mapping)\n\n\ndef degree_pearson_correlation_coefficient(G, x=\"out\", y=\"in\", weight=None, nodes=None):\n \"\"\"Compute degree assortativity of graph.\n\n Assortativity measures the similarity of connections\n in the graph with respect to the node degree.\n\n This is the same as degree_assortativity_coefficient but uses the\n potentially faster scipy.stats.pearsonr function.\n\n Parameters\n ----------\n G : NetworkX graph\n\n x: string ('in','out')\n The degree type for source node (directed graphs only).\n\n y: string ('in','out')\n The degree type for target node (directed graphs only).\n\n weight: string or None, optional (default=None)\n The edge attribute that holds the numerical value used\n as a weight. If None, then each edge has weight 1.\n The degree is the sum of the edge weights adjacent to the node.\n\n nodes: list or iterable (optional)\n Compute pearson correlation of degrees only for specified nodes.\n The default is all nodes.\n\n Returns\n -------\n r : float\n Assortativity of graph by degree.\n\n Examples\n --------\n >>> G = nx.path_graph(4)\n >>> r = nx.degree_pearson_correlation_coefficient(G)\n >>> print(f\"{r:3.1f}\")\n -0.5\n\n Notes\n -----\n This calls scipy.stats.pearsonr.\n\n References\n ----------\n .. [1] M. E. J. Newman, Mixing patterns in networks\n Physical Review E, 67 026126, 2003\n .. [2] Foster, J.G., Foster, D.V., Grassberger, P. & Paczuski, M.\n Edge direction and the structure of networks, PNAS 107, 10815-20 (2010).\n \"\"\"\n import scipy as sp\n import scipy.stats # call as sp.stats\n\n xy = node_degree_xy(G, x=x, y=y, nodes=nodes, weight=weight)\n x, y = zip(*xy)\n return sp.stats.pearsonr(x, y)[0]\n\n\ndef attribute_assortativity_coefficient(G, attribute, nodes=None):\n \"\"\"Compute assortativity for node attributes.\n\n Assortativity measures the similarity of connections\n in the graph with respect to the given attribute.\n\n Parameters\n ----------\n G : NetworkX graph\n\n attribute : string\n Node attribute key\n\n nodes: list or iterable (optional)\n Compute attribute assortativity for nodes in container.\n The default is all nodes.\n\n Returns\n -------\n r: float\n Assortativity of graph for given attribute\n\n Examples\n --------\n >>> G = nx.Graph()\n >>> G.add_nodes_from([0, 1], color=\"red\")\n >>> G.add_nodes_from([2, 3], color=\"blue\")\n >>> G.add_edges_from([(0, 1), (2, 3)])\n >>> print(nx.attribute_assortativity_coefficient(G, \"color\"))\n 1.0\n\n Notes\n -----\n This computes Eq. (2) in Ref. [1]_ , (trace(M)-sum(M^2))/(1-sum(M^2)),\n where M is the joint probability distribution (mixing matrix)\n of the specified attribute.\n\n References\n ----------\n .. [1] M. E. J. Newman, Mixing patterns in networks,\n Physical Review E, 67 026126, 2003\n \"\"\"\n M = attribute_mixing_matrix(G, attribute, nodes)\n return attribute_ac(M)\n\n\ndef numeric_assortativity_coefficient(G, attribute, nodes=None):\n \"\"\"Compute assortativity for numerical node attributes.\n\n Assortativity measures the similarity of connections\n in the graph with respect to the given numeric attribute.\n\n Parameters\n ----------\n G : NetworkX graph\n\n attribute : string\n Node attribute key.\n\n nodes: list or iterable (optional)\n Compute numeric assortativity only for attributes of nodes in\n container. The default is all nodes.\n\n Returns\n -------\n r: float\n Assortativity of graph for given attribute\n\n Examples\n --------\n >>> G = nx.Graph()\n >>> G.add_nodes_from([0, 1], size=2)\n >>> G.add_nodes_from([2, 3], size=3)\n >>> G.add_edges_from([(0, 1), (2, 3)])\n >>> print(nx.numeric_assortativity_coefficient(G, \"size\"))\n 1.0\n\n Notes\n -----\n This computes Eq. (21) in Ref. [1]_ , for the mixing matrix\n of the specified attribute.\n\n References\n ----------\n .. [1] M. E. J. Newman, Mixing patterns in networks\n Physical Review E, 67 026126, 2003\n \"\"\"\n if nodes is None:\n nodes = G.nodes\n vals = set(G.nodes[n][attribute] for n in nodes)\n mapping = {d: i for i, d, in enumerate(vals)}\n M = attribute_mixing_matrix(G, attribute, nodes, mapping)\n return numeric_ac(M, mapping)\n\n\ndef attribute_ac(M):\n \"\"\"Compute assortativity for attribute matrix M.\n\n Parameters\n ----------\n M : numpy.ndarray\n 2D ndarray representing the attribute mixing matrix.\n\n Notes\n -----\n This computes Eq. (2) in Ref. [1]_ , (trace(e)-sum(e^2))/(1-sum(e^2)),\n where e is the joint probability distribution (mixing matrix)\n of the specified attribute.\n\n References\n ----------\n .. [1] M. E. J. Newman, Mixing patterns in networks,\n Physical Review E, 67 026126, 2003\n \"\"\"\n if M.sum() != 1.0:\n M = M / M.sum()\n s = (M @ M).sum()\n t = M.trace()\n r = (t - s) / (1 - s)\n return r\n\n\ndef numeric_ac(M, mapping):\n # M is a numpy matrix or array\n # numeric assortativity coefficient, pearsonr\n import numpy as np\n\n if M.sum() != 1.0:\n M = M / float(M.sum())\n nx, ny = M.shape # nx=ny\n x = np.array(list(mapping.keys()))\n y = x # x and y have the same support\n idx = list(mapping.values())\n a = M.sum(axis=0)\n b = M.sum(axis=1)\n vara = (a[idx] * x ** 2).sum() - ((a[idx] * x).sum()) ** 2\n varb = (b[idx] * y ** 2).sum() - ((b[idx] * y).sum()) ** 2\n xy = np.outer(x, y)\n ab = np.outer(a[idx], b[idx])\n return (xy * (M - ab)).sum() / np.sqrt(vara * varb)\n", "path": "networkx/algorithms/assortativity/correlation.py"}], "after_files": [{"content": "\"\"\"Node assortativity coefficients and correlation measures.\n\"\"\"\nfrom networkx.algorithms.assortativity.mixing import (\n degree_mixing_matrix,\n attribute_mixing_matrix,\n numeric_mixing_matrix,\n)\nfrom networkx.algorithms.assortativity.pairs import node_degree_xy\n\n__all__ = [\n \"degree_pearson_correlation_coefficient\",\n \"degree_assortativity_coefficient\",\n \"attribute_assortativity_coefficient\",\n \"numeric_assortativity_coefficient\",\n]\n\n\ndef degree_assortativity_coefficient(G, x=\"out\", y=\"in\", weight=None, nodes=None):\n \"\"\"Compute degree assortativity of graph.\n\n Assortativity measures the similarity of connections\n in the graph with respect to the node degree.\n\n Parameters\n ----------\n G : NetworkX graph\n\n x: string ('in','out')\n The degree type for source node (directed graphs only).\n\n y: string ('in','out')\n The degree type for target node (directed graphs only).\n\n weight: string or None, optional (default=None)\n The edge attribute that holds the numerical value used\n as a weight. If None, then each edge has weight 1.\n The degree is the sum of the edge weights adjacent to the node.\n\n nodes: list or iterable (optional)\n Compute degree assortativity only for nodes in container.\n The default is all nodes.\n\n Returns\n -------\n r : float\n Assortativity of graph by degree.\n\n Examples\n --------\n >>> G = nx.path_graph(4)\n >>> r = nx.degree_assortativity_coefficient(G)\n >>> print(f\"{r:3.1f}\")\n -0.5\n\n See Also\n --------\n attribute_assortativity_coefficient\n numeric_assortativity_coefficient\n degree_mixing_dict\n degree_mixing_matrix\n\n Notes\n -----\n This computes Eq. (21) in Ref. [1]_ , where e is the joint\n probability distribution (mixing matrix) of the degrees. If G is\n directed than the matrix e is the joint probability of the\n user-specified degree type for the source and target.\n\n References\n ----------\n .. [1] M. E. J. Newman, Mixing patterns in networks,\n Physical Review E, 67 026126, 2003\n .. [2] Foster, J.G., Foster, D.V., Grassberger, P. & Paczuski, M.\n Edge direction and the structure of networks, PNAS 107, 10815-20 (2010).\n \"\"\"\n if nodes is None:\n nodes = G.nodes\n\n degrees = None\n\n if G.is_directed():\n indeg = (\n set([d for _, d in G.in_degree(nodes, weight=weight)])\n if \"in\" in (x, y)\n else set()\n )\n outdeg = (\n set([d for _, d in G.out_degree(nodes, weight=weight)])\n if \"out\" in (x, y)\n else set()\n )\n degrees = set.union(indeg, outdeg)\n else:\n degrees = set([d for _, d in G.degree(nodes, weight=weight)])\n\n mapping = {d: i for i, d, in enumerate(degrees)}\n M = degree_mixing_matrix(G, x=x, y=y, nodes=nodes, weight=weight, mapping=mapping)\n\n return numeric_ac(M, mapping=mapping)\n\n\ndef degree_pearson_correlation_coefficient(G, x=\"out\", y=\"in\", weight=None, nodes=None):\n \"\"\"Compute degree assortativity of graph.\n\n Assortativity measures the similarity of connections\n in the graph with respect to the node degree.\n\n This is the same as degree_assortativity_coefficient but uses the\n potentially faster scipy.stats.pearsonr function.\n\n Parameters\n ----------\n G : NetworkX graph\n\n x: string ('in','out')\n The degree type for source node (directed graphs only).\n\n y: string ('in','out')\n The degree type for target node (directed graphs only).\n\n weight: string or None, optional (default=None)\n The edge attribute that holds the numerical value used\n as a weight. If None, then each edge has weight 1.\n The degree is the sum of the edge weights adjacent to the node.\n\n nodes: list or iterable (optional)\n Compute pearson correlation of degrees only for specified nodes.\n The default is all nodes.\n\n Returns\n -------\n r : float\n Assortativity of graph by degree.\n\n Examples\n --------\n >>> G = nx.path_graph(4)\n >>> r = nx.degree_pearson_correlation_coefficient(G)\n >>> print(f\"{r:3.1f}\")\n -0.5\n\n Notes\n -----\n This calls scipy.stats.pearsonr.\n\n References\n ----------\n .. [1] M. E. J. Newman, Mixing patterns in networks\n Physical Review E, 67 026126, 2003\n .. [2] Foster, J.G., Foster, D.V., Grassberger, P. & Paczuski, M.\n Edge direction and the structure of networks, PNAS 107, 10815-20 (2010).\n \"\"\"\n import scipy as sp\n import scipy.stats # call as sp.stats\n\n xy = node_degree_xy(G, x=x, y=y, nodes=nodes, weight=weight)\n x, y = zip(*xy)\n return sp.stats.pearsonr(x, y)[0]\n\n\ndef attribute_assortativity_coefficient(G, attribute, nodes=None):\n \"\"\"Compute assortativity for node attributes.\n\n Assortativity measures the similarity of connections\n in the graph with respect to the given attribute.\n\n Parameters\n ----------\n G : NetworkX graph\n\n attribute : string\n Node attribute key\n\n nodes: list or iterable (optional)\n Compute attribute assortativity for nodes in container.\n The default is all nodes.\n\n Returns\n -------\n r: float\n Assortativity of graph for given attribute\n\n Examples\n --------\n >>> G = nx.Graph()\n >>> G.add_nodes_from([0, 1], color=\"red\")\n >>> G.add_nodes_from([2, 3], color=\"blue\")\n >>> G.add_edges_from([(0, 1), (2, 3)])\n >>> print(nx.attribute_assortativity_coefficient(G, \"color\"))\n 1.0\n\n Notes\n -----\n This computes Eq. (2) in Ref. [1]_ , (trace(M)-sum(M^2))/(1-sum(M^2)),\n where M is the joint probability distribution (mixing matrix)\n of the specified attribute.\n\n References\n ----------\n .. [1] M. E. J. Newman, Mixing patterns in networks,\n Physical Review E, 67 026126, 2003\n \"\"\"\n M = attribute_mixing_matrix(G, attribute, nodes)\n return attribute_ac(M)\n\n\ndef numeric_assortativity_coefficient(G, attribute, nodes=None):\n \"\"\"Compute assortativity for numerical node attributes.\n\n Assortativity measures the similarity of connections\n in the graph with respect to the given numeric attribute.\n\n Parameters\n ----------\n G : NetworkX graph\n\n attribute : string\n Node attribute key.\n\n nodes: list or iterable (optional)\n Compute numeric assortativity only for attributes of nodes in\n container. The default is all nodes.\n\n Returns\n -------\n r: float\n Assortativity of graph for given attribute\n\n Examples\n --------\n >>> G = nx.Graph()\n >>> G.add_nodes_from([0, 1], size=2)\n >>> G.add_nodes_from([2, 3], size=3)\n >>> G.add_edges_from([(0, 1), (2, 3)])\n >>> print(nx.numeric_assortativity_coefficient(G, \"size\"))\n 1.0\n\n Notes\n -----\n This computes Eq. (21) in Ref. [1]_ , for the mixing matrix\n of the specified attribute.\n\n References\n ----------\n .. [1] M. E. J. Newman, Mixing patterns in networks\n Physical Review E, 67 026126, 2003\n \"\"\"\n if nodes is None:\n nodes = G.nodes\n vals = set(G.nodes[n][attribute] for n in nodes)\n mapping = {d: i for i, d, in enumerate(vals)}\n M = attribute_mixing_matrix(G, attribute, nodes, mapping)\n return numeric_ac(M, mapping)\n\n\ndef attribute_ac(M):\n \"\"\"Compute assortativity for attribute matrix M.\n\n Parameters\n ----------\n M : numpy.ndarray\n 2D ndarray representing the attribute mixing matrix.\n\n Notes\n -----\n This computes Eq. (2) in Ref. [1]_ , (trace(e)-sum(e^2))/(1-sum(e^2)),\n where e is the joint probability distribution (mixing matrix)\n of the specified attribute.\n\n References\n ----------\n .. [1] M. E. J. Newman, Mixing patterns in networks,\n Physical Review E, 67 026126, 2003\n \"\"\"\n if M.sum() != 1.0:\n M = M / M.sum()\n s = (M @ M).sum()\n t = M.trace()\n r = (t - s) / (1 - s)\n return r\n\n\ndef numeric_ac(M, mapping):\n # M is a numpy matrix or array\n # numeric assortativity coefficient, pearsonr\n import numpy as np\n\n if M.sum() != 1.0:\n M = M / float(M.sum())\n nx, ny = M.shape # nx=ny\n x = np.array(list(mapping.keys()))\n y = x # x and y have the same support\n idx = list(mapping.values())\n a = M.sum(axis=0)\n b = M.sum(axis=1)\n vara = (a[idx] * x ** 2).sum() - ((a[idx] * x).sum()) ** 2\n varb = (b[idx] * y ** 2).sum() - ((b[idx] * y).sum()) ** 2\n xy = np.outer(x, y)\n ab = np.outer(a[idx], b[idx])\n return (xy * (M - ab)).sum() / np.sqrt(vara * varb)\n", "path": "networkx/algorithms/assortativity/correlation.py"}]} | 3,561 | 302 |
gh_patches_debug_10306 | rasdani/github-patches | git_diff | pre-commit__pre-commit-1474 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
UnicodeDecodeError: 'utf-8' codec can't decode byte 0xc0 in position 9: invalid start byte
Hi, there! Get such error on commit.
Probably, because of cyrillic symbols in user name: 'C:\\Users\\Администратор\...'.
Is there a way to avoid this problem exept renaming user?
Thanks for your cool product!
### version information
```
pre-commit version: 2.4.0
sys.version:
3.6.8 (tags/v3.6.8:3c6b436a57, Dec 24 2018, 00:16:47) [MSC v.1916 64 bit (AMD64)]
sys.executable: c:\program files\git\dev\core\venv\scripts\python.exe
os.name: nt
sys.platform: win32
```
### error information
```
An unexpected error has occurred: UnicodeDecodeError: 'utf-8' codec can't decode byte 0xc0 in position 9: invalid start byte
```
```
Traceback (most recent call last):
File "c:\program files\git\dev\core\venv\lib\site-packages\pre_commit\error_handler.py", line 56, in error_handler
yield
File "c:\program files\git\dev\core\venv\lib\site-packages\pre_commit\main.py", line 372, in main
args=args.rest[1:],
File "c:\program files\git\dev\core\venv\lib\site-packages\pre_commit\commands\hook_impl.py", line 217, in hook_impl
return retv | run(config, store, ns)
File "c:\program files\git\dev\core\venv\lib\site-packages\pre_commit\commands\run.py", line 357, in run
for hook in all_hooks(config, store)
File "c:\program files\git\dev\core\venv\lib\site-packages\pre_commit\repository.py", line 206, in all_hooks
for repo in root_config['repos']
File "c:\program files\git\dev\core\venv\lib\site-packages\pre_commit\repository.py", line 207, in <genexpr>
for hook in _repository_hooks(repo, store, root_config)
File "c:\program files\git\dev\core\venv\lib\site-packages\pre_commit\repository.py", line 182, in _repository_hooks
return _cloned_repository_hooks(repo_config, store, root_config)
File "c:\program files\git\dev\core\venv\lib\site-packages\pre_commit\repository.py", line 162, in _cloned_repository_hooks
for hook in repo_config['hooks']
File "c:\program files\git\dev\core\venv\lib\site-packages\pre_commit\repository.py", line 162, in <listcomp>
for hook in repo_config['hooks']
File "c:\program files\git\dev\core\venv\lib\site-packages\pre_commit\repository.py", line 110, in _hook
ret['language_version'] = languages[lang].get_default_version()
File "c:\program files\git\dev\core\venv\lib\site-packages\pre_commit\languages\python.py", line 113, in get_default_version
if _find_by_py_launcher(exe):
File "c:\program files\git\dev\core\venv\lib\site-packages\pre_commit\languages\python.py", line 72, in _find_by_py_launcher
return cmd_output(*cmd)[1].strip()
File "c:\program files\git\dev\core\venv\lib\site-packages\pre_commit\util.py", line 164, in cmd_output
stdout = stdout_b.decode() if stdout_b is not None else None
UnicodeDecodeError: 'utf-8' codec can't decode byte 0xc0 in position 9: invalid start byte
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pre_commit/languages/python.py`
Content:
```
1 import contextlib
2 import functools
3 import os
4 import sys
5 from typing import Dict
6 from typing import Generator
7 from typing import Optional
8 from typing import Sequence
9 from typing import Tuple
10
11 import pre_commit.constants as C
12 from pre_commit.envcontext import envcontext
13 from pre_commit.envcontext import PatchesT
14 from pre_commit.envcontext import UNSET
15 from pre_commit.envcontext import Var
16 from pre_commit.hook import Hook
17 from pre_commit.languages import helpers
18 from pre_commit.parse_shebang import find_executable
19 from pre_commit.prefix import Prefix
20 from pre_commit.util import CalledProcessError
21 from pre_commit.util import clean_path_on_failure
22 from pre_commit.util import cmd_output
23 from pre_commit.util import cmd_output_b
24
25 ENVIRONMENT_DIR = 'py_env'
26
27
28 @functools.lru_cache(maxsize=None)
29 def _version_info(exe: str) -> str:
30 prog = 'import sys;print(".".join(str(p) for p in sys.version_info))'
31 try:
32 return cmd_output(exe, '-S', '-c', prog)[1].strip()
33 except CalledProcessError:
34 return f'<<error retrieving version from {exe}>>'
35
36
37 def _read_pyvenv_cfg(filename: str) -> Dict[str, str]:
38 ret = {}
39 with open(filename) as f:
40 for line in f:
41 try:
42 k, v = line.split('=')
43 except ValueError: # blank line / comment / etc.
44 continue
45 else:
46 ret[k.strip()] = v.strip()
47 return ret
48
49
50 def bin_dir(venv: str) -> str:
51 """On windows there's a different directory for the virtualenv"""
52 bin_part = 'Scripts' if os.name == 'nt' else 'bin'
53 return os.path.join(venv, bin_part)
54
55
56 def get_env_patch(venv: str) -> PatchesT:
57 return (
58 ('PIP_DISABLE_PIP_VERSION_CHECK', '1'),
59 ('PYTHONHOME', UNSET),
60 ('VIRTUAL_ENV', venv),
61 ('PATH', (bin_dir(venv), os.pathsep, Var('PATH'))),
62 )
63
64
65 def _find_by_py_launcher(
66 version: str,
67 ) -> Optional[str]: # pragma: no cover (windows only)
68 if version.startswith('python'):
69 num = version[len('python'):]
70 try:
71 cmd = ('py', f'-{num}', '-c', 'import sys; print(sys.executable)')
72 return cmd_output(*cmd)[1].strip()
73 except CalledProcessError:
74 pass
75 return None
76
77
78 def _find_by_sys_executable() -> Optional[str]:
79 def _norm(path: str) -> Optional[str]:
80 _, exe = os.path.split(path.lower())
81 exe, _, _ = exe.partition('.exe')
82 if exe not in {'python', 'pythonw'} and find_executable(exe):
83 return exe
84 return None
85
86 # On linux, I see these common sys.executables:
87 #
88 # system `python`: /usr/bin/python -> python2.7
89 # system `python2`: /usr/bin/python2 -> python2.7
90 # virtualenv v: v/bin/python (will not return from this loop)
91 # virtualenv v -ppython2: v/bin/python -> python2
92 # virtualenv v -ppython2.7: v/bin/python -> python2.7
93 # virtualenv v -ppypy: v/bin/python -> v/bin/pypy
94 for path in (sys.executable, os.path.realpath(sys.executable)):
95 exe = _norm(path)
96 if exe:
97 return exe
98 return None
99
100
101 @functools.lru_cache(maxsize=1)
102 def get_default_version() -> str: # pragma: no cover (platform dependent)
103 # First attempt from `sys.executable` (or the realpath)
104 exe = _find_by_sys_executable()
105 if exe:
106 return exe
107
108 # Next try the `pythonX.X` executable
109 exe = f'python{sys.version_info[0]}.{sys.version_info[1]}'
110 if find_executable(exe):
111 return exe
112
113 if _find_by_py_launcher(exe):
114 return exe
115
116 # Give a best-effort try for windows
117 default_folder_name = exe.replace('.', '')
118 if os.path.exists(fr'C:\{default_folder_name}\python.exe'):
119 return exe
120
121 # We tried!
122 return C.DEFAULT
123
124
125 def _sys_executable_matches(version: str) -> bool:
126 if version == 'python':
127 return True
128 elif not version.startswith('python'):
129 return False
130
131 try:
132 info = tuple(int(p) for p in version[len('python'):].split('.'))
133 except ValueError:
134 return False
135
136 return sys.version_info[:len(info)] == info
137
138
139 def norm_version(version: str) -> str:
140 if version == C.DEFAULT:
141 return os.path.realpath(sys.executable)
142
143 # first see if our current executable is appropriate
144 if _sys_executable_matches(version):
145 return sys.executable
146
147 if os.name == 'nt': # pragma: no cover (windows)
148 version_exec = _find_by_py_launcher(version)
149 if version_exec:
150 return version_exec
151
152 # Try looking up by name
153 version_exec = find_executable(version)
154 if version_exec and version_exec != version:
155 return version_exec
156
157 # If it is in the form pythonx.x search in the default
158 # place on windows
159 if version.startswith('python'):
160 default_folder_name = version.replace('.', '')
161 return fr'C:\{default_folder_name}\python.exe'
162
163 # Otherwise assume it is a path
164 return os.path.expanduser(version)
165
166
167 @contextlib.contextmanager
168 def in_env(
169 prefix: Prefix,
170 language_version: str,
171 ) -> Generator[None, None, None]:
172 directory = helpers.environment_dir(ENVIRONMENT_DIR, language_version)
173 envdir = prefix.path(directory)
174 with envcontext(get_env_patch(envdir)):
175 yield
176
177
178 def healthy(prefix: Prefix, language_version: str) -> bool:
179 directory = helpers.environment_dir(ENVIRONMENT_DIR, language_version)
180 envdir = prefix.path(directory)
181 pyvenv_cfg = os.path.join(envdir, 'pyvenv.cfg')
182
183 # created with "old" virtualenv
184 if not os.path.exists(pyvenv_cfg):
185 return False
186
187 exe_name = 'python.exe' if sys.platform == 'win32' else 'python'
188 py_exe = prefix.path(bin_dir(envdir), exe_name)
189 cfg = _read_pyvenv_cfg(pyvenv_cfg)
190
191 return (
192 'version_info' in cfg and
193 _version_info(py_exe) == cfg['version_info'] and (
194 'base-executable' not in cfg or
195 _version_info(cfg['base-executable']) == cfg['version_info']
196 )
197 )
198
199
200 def install_environment(
201 prefix: Prefix,
202 version: str,
203 additional_dependencies: Sequence[str],
204 ) -> None:
205 envdir = prefix.path(helpers.environment_dir(ENVIRONMENT_DIR, version))
206 python = norm_version(version)
207 venv_cmd = (sys.executable, '-mvirtualenv', envdir, '-p', python)
208 install_cmd = ('python', '-mpip', 'install', '.', *additional_dependencies)
209
210 with clean_path_on_failure(envdir):
211 cmd_output_b(*venv_cmd, cwd='/')
212 with in_env(prefix, version):
213 helpers.run_setup_cmd(prefix, install_cmd)
214
215
216 def run_hook(
217 hook: Hook,
218 file_args: Sequence[str],
219 color: bool,
220 ) -> Tuple[int, bytes]:
221 with in_env(hook.prefix, hook.language_version):
222 return helpers.run_xargs(hook, hook.cmd, file_args, color=color)
223
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pre_commit/languages/python.py b/pre_commit/languages/python.py
--- a/pre_commit/languages/python.py
+++ b/pre_commit/languages/python.py
@@ -67,9 +67,10 @@
) -> Optional[str]: # pragma: no cover (windows only)
if version.startswith('python'):
num = version[len('python'):]
+ cmd = ('py', f'-{num}', '-c', 'import sys; print(sys.executable)')
+ env = dict(os.environ, PYTHONIOENCODING='UTF-8')
try:
- cmd = ('py', f'-{num}', '-c', 'import sys; print(sys.executable)')
- return cmd_output(*cmd)[1].strip()
+ return cmd_output(*cmd, env=env)[1].strip()
except CalledProcessError:
pass
return None
| {"golden_diff": "diff --git a/pre_commit/languages/python.py b/pre_commit/languages/python.py\n--- a/pre_commit/languages/python.py\n+++ b/pre_commit/languages/python.py\n@@ -67,9 +67,10 @@\n ) -> Optional[str]: # pragma: no cover (windows only)\n if version.startswith('python'):\n num = version[len('python'):]\n+ cmd = ('py', f'-{num}', '-c', 'import sys; print(sys.executable)')\n+ env = dict(os.environ, PYTHONIOENCODING='UTF-8')\n try:\n- cmd = ('py', f'-{num}', '-c', 'import sys; print(sys.executable)')\n- return cmd_output(*cmd)[1].strip()\n+ return cmd_output(*cmd, env=env)[1].strip()\n except CalledProcessError:\n pass\n return None\n", "issue": "UnicodeDecodeError: 'utf-8' codec can't decode byte 0xc0 in position 9: invalid start byte\nHi, there! Get such error on commit.\r\n\r\nProbably, because of cyrillic symbols in user name: 'C:\\\\Users\\\\\u0410\u0434\u043c\u0438\u043d\u0438\u0441\u0442\u0440\u0430\u0442\u043e\u0440\\...'.\r\n\r\nIs there a way to avoid this problem exept renaming user?\r\n\r\nThanks for your cool product!\r\n\r\n### version information\r\n\r\n```\r\npre-commit version: 2.4.0\r\nsys.version:\r\n 3.6.8 (tags/v3.6.8:3c6b436a57, Dec 24 2018, 00:16:47) [MSC v.1916 64 bit (AMD64)]\r\nsys.executable: c:\\program files\\git\\dev\\core\\venv\\scripts\\python.exe\r\nos.name: nt\r\nsys.platform: win32\r\n```\r\n\r\n### error information\r\n\r\n```\r\nAn unexpected error has occurred: UnicodeDecodeError: 'utf-8' codec can't decode byte 0xc0 in position 9: invalid start byte\r\n```\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"c:\\program files\\git\\dev\\core\\venv\\lib\\site-packages\\pre_commit\\error_handler.py\", line 56, in error_handler\r\n yield\r\n File \"c:\\program files\\git\\dev\\core\\venv\\lib\\site-packages\\pre_commit\\main.py\", line 372, in main\r\n args=args.rest[1:],\r\n File \"c:\\program files\\git\\dev\\core\\venv\\lib\\site-packages\\pre_commit\\commands\\hook_impl.py\", line 217, in hook_impl\r\n return retv | run(config, store, ns)\r\n File \"c:\\program files\\git\\dev\\core\\venv\\lib\\site-packages\\pre_commit\\commands\\run.py\", line 357, in run\r\n for hook in all_hooks(config, store)\r\n File \"c:\\program files\\git\\dev\\core\\venv\\lib\\site-packages\\pre_commit\\repository.py\", line 206, in all_hooks\r\n for repo in root_config['repos']\r\n File \"c:\\program files\\git\\dev\\core\\venv\\lib\\site-packages\\pre_commit\\repository.py\", line 207, in <genexpr>\r\n for hook in _repository_hooks(repo, store, root_config)\r\n File \"c:\\program files\\git\\dev\\core\\venv\\lib\\site-packages\\pre_commit\\repository.py\", line 182, in _repository_hooks\r\n return _cloned_repository_hooks(repo_config, store, root_config)\r\n File \"c:\\program files\\git\\dev\\core\\venv\\lib\\site-packages\\pre_commit\\repository.py\", line 162, in _cloned_repository_hooks\r\n for hook in repo_config['hooks']\r\n File \"c:\\program files\\git\\dev\\core\\venv\\lib\\site-packages\\pre_commit\\repository.py\", line 162, in <listcomp>\r\n for hook in repo_config['hooks']\r\n File \"c:\\program files\\git\\dev\\core\\venv\\lib\\site-packages\\pre_commit\\repository.py\", line 110, in _hook\r\n ret['language_version'] = languages[lang].get_default_version()\r\n File \"c:\\program files\\git\\dev\\core\\venv\\lib\\site-packages\\pre_commit\\languages\\python.py\", line 113, in get_default_version\r\n if _find_by_py_launcher(exe):\r\n File \"c:\\program files\\git\\dev\\core\\venv\\lib\\site-packages\\pre_commit\\languages\\python.py\", line 72, in _find_by_py_launcher\r\n return cmd_output(*cmd)[1].strip()\r\n File \"c:\\program files\\git\\dev\\core\\venv\\lib\\site-packages\\pre_commit\\util.py\", line 164, in cmd_output\r\n stdout = stdout_b.decode() if stdout_b is not None else None\r\nUnicodeDecodeError: 'utf-8' codec can't decode byte 0xc0 in position 9: invalid start byte\r\n\r\n```\n", "before_files": [{"content": "import contextlib\nimport functools\nimport os\nimport sys\nfrom typing import Dict\nfrom typing import Generator\nfrom typing import Optional\nfrom typing import Sequence\nfrom typing import Tuple\n\nimport pre_commit.constants as C\nfrom pre_commit.envcontext import envcontext\nfrom pre_commit.envcontext import PatchesT\nfrom pre_commit.envcontext import UNSET\nfrom pre_commit.envcontext import Var\nfrom pre_commit.hook import Hook\nfrom pre_commit.languages import helpers\nfrom pre_commit.parse_shebang import find_executable\nfrom pre_commit.prefix import Prefix\nfrom pre_commit.util import CalledProcessError\nfrom pre_commit.util import clean_path_on_failure\nfrom pre_commit.util import cmd_output\nfrom pre_commit.util import cmd_output_b\n\nENVIRONMENT_DIR = 'py_env'\n\n\[email protected]_cache(maxsize=None)\ndef _version_info(exe: str) -> str:\n prog = 'import sys;print(\".\".join(str(p) for p in sys.version_info))'\n try:\n return cmd_output(exe, '-S', '-c', prog)[1].strip()\n except CalledProcessError:\n return f'<<error retrieving version from {exe}>>'\n\n\ndef _read_pyvenv_cfg(filename: str) -> Dict[str, str]:\n ret = {}\n with open(filename) as f:\n for line in f:\n try:\n k, v = line.split('=')\n except ValueError: # blank line / comment / etc.\n continue\n else:\n ret[k.strip()] = v.strip()\n return ret\n\n\ndef bin_dir(venv: str) -> str:\n \"\"\"On windows there's a different directory for the virtualenv\"\"\"\n bin_part = 'Scripts' if os.name == 'nt' else 'bin'\n return os.path.join(venv, bin_part)\n\n\ndef get_env_patch(venv: str) -> PatchesT:\n return (\n ('PIP_DISABLE_PIP_VERSION_CHECK', '1'),\n ('PYTHONHOME', UNSET),\n ('VIRTUAL_ENV', venv),\n ('PATH', (bin_dir(venv), os.pathsep, Var('PATH'))),\n )\n\n\ndef _find_by_py_launcher(\n version: str,\n) -> Optional[str]: # pragma: no cover (windows only)\n if version.startswith('python'):\n num = version[len('python'):]\n try:\n cmd = ('py', f'-{num}', '-c', 'import sys; print(sys.executable)')\n return cmd_output(*cmd)[1].strip()\n except CalledProcessError:\n pass\n return None\n\n\ndef _find_by_sys_executable() -> Optional[str]:\n def _norm(path: str) -> Optional[str]:\n _, exe = os.path.split(path.lower())\n exe, _, _ = exe.partition('.exe')\n if exe not in {'python', 'pythonw'} and find_executable(exe):\n return exe\n return None\n\n # On linux, I see these common sys.executables:\n #\n # system `python`: /usr/bin/python -> python2.7\n # system `python2`: /usr/bin/python2 -> python2.7\n # virtualenv v: v/bin/python (will not return from this loop)\n # virtualenv v -ppython2: v/bin/python -> python2\n # virtualenv v -ppython2.7: v/bin/python -> python2.7\n # virtualenv v -ppypy: v/bin/python -> v/bin/pypy\n for path in (sys.executable, os.path.realpath(sys.executable)):\n exe = _norm(path)\n if exe:\n return exe\n return None\n\n\[email protected]_cache(maxsize=1)\ndef get_default_version() -> str: # pragma: no cover (platform dependent)\n # First attempt from `sys.executable` (or the realpath)\n exe = _find_by_sys_executable()\n if exe:\n return exe\n\n # Next try the `pythonX.X` executable\n exe = f'python{sys.version_info[0]}.{sys.version_info[1]}'\n if find_executable(exe):\n return exe\n\n if _find_by_py_launcher(exe):\n return exe\n\n # Give a best-effort try for windows\n default_folder_name = exe.replace('.', '')\n if os.path.exists(fr'C:\\{default_folder_name}\\python.exe'):\n return exe\n\n # We tried!\n return C.DEFAULT\n\n\ndef _sys_executable_matches(version: str) -> bool:\n if version == 'python':\n return True\n elif not version.startswith('python'):\n return False\n\n try:\n info = tuple(int(p) for p in version[len('python'):].split('.'))\n except ValueError:\n return False\n\n return sys.version_info[:len(info)] == info\n\n\ndef norm_version(version: str) -> str:\n if version == C.DEFAULT:\n return os.path.realpath(sys.executable)\n\n # first see if our current executable is appropriate\n if _sys_executable_matches(version):\n return sys.executable\n\n if os.name == 'nt': # pragma: no cover (windows)\n version_exec = _find_by_py_launcher(version)\n if version_exec:\n return version_exec\n\n # Try looking up by name\n version_exec = find_executable(version)\n if version_exec and version_exec != version:\n return version_exec\n\n # If it is in the form pythonx.x search in the default\n # place on windows\n if version.startswith('python'):\n default_folder_name = version.replace('.', '')\n return fr'C:\\{default_folder_name}\\python.exe'\n\n # Otherwise assume it is a path\n return os.path.expanduser(version)\n\n\[email protected]\ndef in_env(\n prefix: Prefix,\n language_version: str,\n) -> Generator[None, None, None]:\n directory = helpers.environment_dir(ENVIRONMENT_DIR, language_version)\n envdir = prefix.path(directory)\n with envcontext(get_env_patch(envdir)):\n yield\n\n\ndef healthy(prefix: Prefix, language_version: str) -> bool:\n directory = helpers.environment_dir(ENVIRONMENT_DIR, language_version)\n envdir = prefix.path(directory)\n pyvenv_cfg = os.path.join(envdir, 'pyvenv.cfg')\n\n # created with \"old\" virtualenv\n if not os.path.exists(pyvenv_cfg):\n return False\n\n exe_name = 'python.exe' if sys.platform == 'win32' else 'python'\n py_exe = prefix.path(bin_dir(envdir), exe_name)\n cfg = _read_pyvenv_cfg(pyvenv_cfg)\n\n return (\n 'version_info' in cfg and\n _version_info(py_exe) == cfg['version_info'] and (\n 'base-executable' not in cfg or\n _version_info(cfg['base-executable']) == cfg['version_info']\n )\n )\n\n\ndef install_environment(\n prefix: Prefix,\n version: str,\n additional_dependencies: Sequence[str],\n) -> None:\n envdir = prefix.path(helpers.environment_dir(ENVIRONMENT_DIR, version))\n python = norm_version(version)\n venv_cmd = (sys.executable, '-mvirtualenv', envdir, '-p', python)\n install_cmd = ('python', '-mpip', 'install', '.', *additional_dependencies)\n\n with clean_path_on_failure(envdir):\n cmd_output_b(*venv_cmd, cwd='/')\n with in_env(prefix, version):\n helpers.run_setup_cmd(prefix, install_cmd)\n\n\ndef run_hook(\n hook: Hook,\n file_args: Sequence[str],\n color: bool,\n) -> Tuple[int, bytes]:\n with in_env(hook.prefix, hook.language_version):\n return helpers.run_xargs(hook, hook.cmd, file_args, color=color)\n", "path": "pre_commit/languages/python.py"}], "after_files": [{"content": "import contextlib\nimport functools\nimport os\nimport sys\nfrom typing import Dict\nfrom typing import Generator\nfrom typing import Optional\nfrom typing import Sequence\nfrom typing import Tuple\n\nimport pre_commit.constants as C\nfrom pre_commit.envcontext import envcontext\nfrom pre_commit.envcontext import PatchesT\nfrom pre_commit.envcontext import UNSET\nfrom pre_commit.envcontext import Var\nfrom pre_commit.hook import Hook\nfrom pre_commit.languages import helpers\nfrom pre_commit.parse_shebang import find_executable\nfrom pre_commit.prefix import Prefix\nfrom pre_commit.util import CalledProcessError\nfrom pre_commit.util import clean_path_on_failure\nfrom pre_commit.util import cmd_output\nfrom pre_commit.util import cmd_output_b\n\nENVIRONMENT_DIR = 'py_env'\n\n\[email protected]_cache(maxsize=None)\ndef _version_info(exe: str) -> str:\n prog = 'import sys;print(\".\".join(str(p) for p in sys.version_info))'\n try:\n return cmd_output(exe, '-S', '-c', prog)[1].strip()\n except CalledProcessError:\n return f'<<error retrieving version from {exe}>>'\n\n\ndef _read_pyvenv_cfg(filename: str) -> Dict[str, str]:\n ret = {}\n with open(filename) as f:\n for line in f:\n try:\n k, v = line.split('=')\n except ValueError: # blank line / comment / etc.\n continue\n else:\n ret[k.strip()] = v.strip()\n return ret\n\n\ndef bin_dir(venv: str) -> str:\n \"\"\"On windows there's a different directory for the virtualenv\"\"\"\n bin_part = 'Scripts' if os.name == 'nt' else 'bin'\n return os.path.join(venv, bin_part)\n\n\ndef get_env_patch(venv: str) -> PatchesT:\n return (\n ('PIP_DISABLE_PIP_VERSION_CHECK', '1'),\n ('PYTHONHOME', UNSET),\n ('VIRTUAL_ENV', venv),\n ('PATH', (bin_dir(venv), os.pathsep, Var('PATH'))),\n )\n\n\ndef _find_by_py_launcher(\n version: str,\n) -> Optional[str]: # pragma: no cover (windows only)\n if version.startswith('python'):\n num = version[len('python'):]\n cmd = ('py', f'-{num}', '-c', 'import sys; print(sys.executable)')\n env = dict(os.environ, PYTHONIOENCODING='UTF-8')\n try:\n return cmd_output(*cmd, env=env)[1].strip()\n except CalledProcessError:\n pass\n return None\n\n\ndef _find_by_sys_executable() -> Optional[str]:\n def _norm(path: str) -> Optional[str]:\n _, exe = os.path.split(path.lower())\n exe, _, _ = exe.partition('.exe')\n if exe not in {'python', 'pythonw'} and find_executable(exe):\n return exe\n return None\n\n # On linux, I see these common sys.executables:\n #\n # system `python`: /usr/bin/python -> python2.7\n # system `python2`: /usr/bin/python2 -> python2.7\n # virtualenv v: v/bin/python (will not return from this loop)\n # virtualenv v -ppython2: v/bin/python -> python2\n # virtualenv v -ppython2.7: v/bin/python -> python2.7\n # virtualenv v -ppypy: v/bin/python -> v/bin/pypy\n for path in (sys.executable, os.path.realpath(sys.executable)):\n exe = _norm(path)\n if exe:\n return exe\n return None\n\n\[email protected]_cache(maxsize=1)\ndef get_default_version() -> str: # pragma: no cover (platform dependent)\n # First attempt from `sys.executable` (or the realpath)\n exe = _find_by_sys_executable()\n if exe:\n return exe\n\n # Next try the `pythonX.X` executable\n exe = f'python{sys.version_info[0]}.{sys.version_info[1]}'\n if find_executable(exe):\n return exe\n\n if _find_by_py_launcher(exe):\n return exe\n\n # Give a best-effort try for windows\n default_folder_name = exe.replace('.', '')\n if os.path.exists(fr'C:\\{default_folder_name}\\python.exe'):\n return exe\n\n # We tried!\n return C.DEFAULT\n\n\ndef _sys_executable_matches(version: str) -> bool:\n if version == 'python':\n return True\n elif not version.startswith('python'):\n return False\n\n try:\n info = tuple(int(p) for p in version[len('python'):].split('.'))\n except ValueError:\n return False\n\n return sys.version_info[:len(info)] == info\n\n\ndef norm_version(version: str) -> str:\n if version == C.DEFAULT:\n return os.path.realpath(sys.executable)\n\n # first see if our current executable is appropriate\n if _sys_executable_matches(version):\n return sys.executable\n\n if os.name == 'nt': # pragma: no cover (windows)\n version_exec = _find_by_py_launcher(version)\n if version_exec:\n return version_exec\n\n # Try looking up by name\n version_exec = find_executable(version)\n if version_exec and version_exec != version:\n return version_exec\n\n # If it is in the form pythonx.x search in the default\n # place on windows\n if version.startswith('python'):\n default_folder_name = version.replace('.', '')\n return fr'C:\\{default_folder_name}\\python.exe'\n\n # Otherwise assume it is a path\n return os.path.expanduser(version)\n\n\[email protected]\ndef in_env(\n prefix: Prefix,\n language_version: str,\n) -> Generator[None, None, None]:\n directory = helpers.environment_dir(ENVIRONMENT_DIR, language_version)\n envdir = prefix.path(directory)\n with envcontext(get_env_patch(envdir)):\n yield\n\n\ndef healthy(prefix: Prefix, language_version: str) -> bool:\n directory = helpers.environment_dir(ENVIRONMENT_DIR, language_version)\n envdir = prefix.path(directory)\n pyvenv_cfg = os.path.join(envdir, 'pyvenv.cfg')\n\n # created with \"old\" virtualenv\n if not os.path.exists(pyvenv_cfg):\n return False\n\n exe_name = 'python.exe' if sys.platform == 'win32' else 'python'\n py_exe = prefix.path(bin_dir(envdir), exe_name)\n cfg = _read_pyvenv_cfg(pyvenv_cfg)\n\n return (\n 'version_info' in cfg and\n _version_info(py_exe) == cfg['version_info'] and (\n 'base-executable' not in cfg or\n _version_info(cfg['base-executable']) == cfg['version_info']\n )\n )\n\n\ndef install_environment(\n prefix: Prefix,\n version: str,\n additional_dependencies: Sequence[str],\n) -> None:\n envdir = prefix.path(helpers.environment_dir(ENVIRONMENT_DIR, version))\n python = norm_version(version)\n venv_cmd = (sys.executable, '-mvirtualenv', envdir, '-p', python)\n install_cmd = ('python', '-mpip', 'install', '.', *additional_dependencies)\n\n with clean_path_on_failure(envdir):\n cmd_output_b(*venv_cmd, cwd='/')\n with in_env(prefix, version):\n helpers.run_setup_cmd(prefix, install_cmd)\n\n\ndef run_hook(\n hook: Hook,\n file_args: Sequence[str],\n color: bool,\n) -> Tuple[int, bytes]:\n with in_env(hook.prefix, hook.language_version):\n return helpers.run_xargs(hook, hook.cmd, file_args, color=color)\n", "path": "pre_commit/languages/python.py"}]} | 3,438 | 190 |
gh_patches_debug_1296 | rasdani/github-patches | git_diff | wandb__wandb-424 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Install issue on DLAMI images, conflict with PyYAML
wandb has a dependency conflict when installing on AWS Deep Learning images -- DLAMI v23
You can get arround it with 'pip install wandb --ignore-installed', but also perhaps wandb could relax PyYAML version requirement to make life easier (ie, I can't put wandb in requirements.txt because of this)
```
(pytorch_p36) ubuntu@ip-172-31-28-233:~$ pip install wandb
Collecting wandb
Using cached https://files.pythonhosted.org/packages/6a/d1/af8371f39d9383f4f1e9ba76c8894f75c01d5eddf4ec57bd45952fefab74/wandb-0.8.3-py2.py3-none-any.whl
Collecting watchdog>=0.8.3 (from wandb)
Requirement already satisfied: psutil>=5.0.0 in ./anaconda3/envs/pytorch_p36/lib/python3.6/site-packages (from wandb) (5.4.5)
Collecting backports.tempfile>=1.0 (from wandb)
Using cached https://files.pythonhosted.org/packages/b4/5c/077f910632476281428fe254807952eb47ca78e720d059a46178c541e669/backports.tempfile-1.0-py2.py3-none-any.whl
Requirement already satisfied: requests>=2.0.0 in ./anaconda3/envs/pytorch_p36/lib/python3.6/site-packages (from wandb) (2.20.0)
Requirement already satisfied: sentry-sdk>=0.4.0 in ./anaconda3/envs/pytorch_p36/lib/python3.6/site-packages (from wandb) (0.9.5)
Requirement already satisfied: six>=1.10.0 in ./anaconda3/envs/pytorch_p36/lib/python3.6/site-packages (from wandb) (1.11.0)
Collecting shortuuid>=0.5.0 (from wandb)
Collecting gql>=0.1.0 (from wandb)
Requirement already satisfied: subprocess32>=3.5.3 in ./anaconda3/envs/pytorch_p36/lib/python3.6/site-packages (from wandb) (3.5.4)
Collecting GitPython>=1.0.0 (from wandb)
Using cached https://files.pythonhosted.org/packages/fe/e5/fafe827507644c32d6dc553a1c435cdf882e0c28918a5bab29f7fbebfb70/GitPython-2.1.11-py2.py3-none-any.whl
Requirement already satisfied: docker-pycreds>=0.4.0 in ./anaconda3/envs/pytorch_p36/lib/python3.6/site-packages (from wandb) (0.4.0)
Requirement already satisfied: nvidia-ml-py3>=7.352.0 in ./anaconda3/envs/pytorch_p36/lib/python3.6/site-packages (from wandb) (7.352.0)
Requirement already satisfied: Click>=7.0 in ./anaconda3/envs/pytorch_p36/lib/python3.6/site-packages (from wandb) (7.0)
Requirement already satisfied: python-dateutil>=2.6.1 in ./anaconda3/envs/pytorch_p36/lib/python3.6/site-packages (from wandb) (2.7.3)
Collecting PyYAML>=4.2b4 (from wandb)
Requirement already satisfied: argh>=0.24.1 in ./anaconda3/envs/pytorch_p36/lib/python3.6/site-packages (from watchdog>=0.8.3->wandb) (0.26.2)
Collecting pathtools>=0.1.1 (from watchdog>=0.8.3->wandb)
Collecting backports.weakref (from backports.tempfile>=1.0->wandb)
Using cached https://files.pythonhosted.org/packages/88/ec/f598b633c3d5ffe267aaada57d961c94fdfa183c5c3ebda2b6d151943db6/backports.weakref-1.0.post1-py2.py3-none-any.whl
Requirement already satisfied: urllib3<1.25,>=1.21.1 in ./anaconda3/envs/pytorch_p36/lib/python3.6/site-packages (from requests>=2.0.0->wandb) (1.23)
Requirement already satisfied: certifi>=2017.4.17 in ./anaconda3/envs/pytorch_p36/lib/python3.6/site-packages (from requests>=2.0.0->wandb) (2019.3.9)
Requirement already satisfied: idna<2.8,>=2.5 in ./anaconda3/envs/pytorch_p36/lib/python3.6/site-packages (from requests>=2.0.0->wandb) (2.6)
Requirement already satisfied: chardet<3.1.0,>=3.0.2 in ./anaconda3/envs/pytorch_p36/lib/python3.6/site-packages (from requests>=2.0.0->wandb) (3.0.4)
Collecting graphql-core>=0.5.0 (from gql>=0.1.0->wandb)
Using cached https://files.pythonhosted.org/packages/f1/88/a4a7bf8ab66c35b146e44d77a1f9fd2c36e0ec9fb1a51581608c16deb6e3/graphql_core-2.2-py2.py3-none-any.whl
Collecting promise>=0.4.0 (from gql>=0.1.0->wandb)
Collecting gitdb2>=2.0.0 (from GitPython>=1.0.0->wandb)
Using cached https://files.pythonhosted.org/packages/da/30/a407568aa8d8f25db817cf50121a958722f3fc5f87e3a6fba1f40c0633e3/gitdb2-2.0.5-py2.py3-none-any.whl
Collecting rx>=1.6.0 (from graphql-core>=0.5.0->gql>=0.1.0->wandb)
Using cached https://files.pythonhosted.org/packages/33/0f/5ef4ac78e2a538cc1b054eb86285fe0bf7a5dbaeaac2c584757c300515e2/Rx-1.6.1-py2.py3-none-any.whl
Collecting smmap2>=2.0.0 (from gitdb2>=2.0.0->GitPython>=1.0.0->wandb)
Using cached https://files.pythonhosted.org/packages/55/d2/866d45e3a121ee15a1dc013824d58072fd5c7799c9c34d01378eb262ca8f/smmap2-2.0.5-py2.py3-none-any.whl
thinc 6.12.1 has requirement msgpack<0.6.0,>=0.5.6, but you'll have msgpack 0.6.0 which is incompatible.
tensorflow 1.13.1 has requirement protobuf>=3.6.1, but you'll have protobuf 3.5.2 which is incompatible.
tensorboard 1.13.1 has requirement protobuf>=3.6.0, but you'll have protobuf 3.5.2 which is incompatible.
docker-compose 1.24.0 has requirement PyYAML<4.3,>=3.10, but you'll have pyyaml 5.1.1 which is incompatible.
Installing collected packages: PyYAML, pathtools, watchdog, backports.weakref, backports.tempfile, shortuuid, rx, promise, graphql-core, gql, smmap2, gitdb2, GitPython, wandb
Found existing installation: PyYAML 3.12
Cannot uninstall 'PyYAML'. It is a distutils installed project and thus we cannot accurately determine which files belong to it which would lead to only a partial uninstall.
You are using pip version 10.0.1, however version 19.1.1 is available.
You should consider upgrading via the 'pip install --upgrade pip' command.
(pytorch_p36) ubuntu@ip-172-31-28-233:~$ echo $?
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 from setuptools import setup
5
6 with open('README.md') as readme_file:
7 readme = readme_file.read()
8
9 requirements = [
10 'backports.tempfile>=1.0',
11 'Click>=7.0',
12 'GitPython>=1.0.0',
13 'gql>=0.1.0',
14 'nvidia-ml-py3>=7.352.0',
15 'python-dateutil>=2.6.1',
16 'requests>=2.0.0',
17 'shortuuid>=0.5.0',
18 'six>=1.10.0',
19 'watchdog>=0.8.3',
20 'PyYAML>=4.2b4', # watchdog depends on pyyaml but doesnt specify safe version
21 'psutil>=5.0.0',
22 'sentry-sdk>=0.4.0',
23 'subprocess32>=3.5.3',
24 'docker-pycreds>=0.4.0',
25 # Removed until we bring back the board
26 # 'flask-cors>=3.0.3',
27 # 'flask-graphql>=1.4.0',
28 # 'graphene>=2.0.0',
29 ]
30
31 test_requirements = [
32 'mock>=2.0.0',
33 'tox-pyenv>=1.0.3'
34 ]
35
36 kubeflow_requirements = ['kubernetes', 'minio', 'google-cloud-storage', 'sh']
37
38 setup(
39 name='wandb',
40 version='0.8.4',
41 description="A CLI and library for interacting with the Weights and Biases API.",
42 long_description=readme,
43 long_description_content_type="text/markdown",
44 author="Weights & Biases",
45 author_email='[email protected]',
46 url='https://github.com/wandb/client',
47 packages=[
48 'wandb'
49 ],
50 package_dir={'wandb': 'wandb'},
51 entry_points={
52 'console_scripts': [
53 'wandb=wandb.cli:cli',
54 'wb=wandb.cli:cli',
55 'wanbd=wandb.cli:cli',
56 'wandb-docker-run=wandb.cli:docker_run'
57 ]
58 },
59 include_package_data=True,
60 install_requires=requirements,
61 license="MIT license",
62 zip_safe=False,
63 keywords='wandb',
64 python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',
65 classifiers=[
66 'Development Status :: 5 - Production/Stable',
67 'Intended Audience :: Developers',
68 'Intended Audience :: Science/Research',
69 'License :: OSI Approved :: MIT License',
70 'Natural Language :: English',
71 'Programming Language :: Python :: 2',
72 'Programming Language :: Python :: 2.7',
73 'Programming Language :: Python :: 3',
74 'Programming Language :: Python :: 3.4',
75 'Programming Language :: Python :: 3.5',
76 'Programming Language :: Python :: 3.6',
77 'Programming Language :: Python :: 3.7',
78 'Topic :: Scientific/Engineering :: Artificial Intelligence',
79 'Topic :: Software Development :: Libraries :: Python Modules',
80 'Topic :: System :: Logging',
81 'Topic :: System :: Monitoring'
82 ],
83 test_suite='tests',
84 tests_require=test_requirements,
85 extras_require={
86 'kubeflow': kubeflow_requirements
87 }
88 )
89
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -17,7 +17,6 @@
'shortuuid>=0.5.0',
'six>=1.10.0',
'watchdog>=0.8.3',
- 'PyYAML>=4.2b4', # watchdog depends on pyyaml but doesnt specify safe version
'psutil>=5.0.0',
'sentry-sdk>=0.4.0',
'subprocess32>=3.5.3',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -17,7 +17,6 @@\n 'shortuuid>=0.5.0',\n 'six>=1.10.0',\n 'watchdog>=0.8.3',\n- 'PyYAML>=4.2b4', # watchdog depends on pyyaml but doesnt specify safe version\n 'psutil>=5.0.0',\n 'sentry-sdk>=0.4.0',\n 'subprocess32>=3.5.3',\n", "issue": "Install issue on DLAMI images, conflict with PyYAML\nwandb has a dependency conflict when installing on AWS Deep Learning images -- DLAMI v23\r\nYou can get arround it with 'pip install wandb --ignore-installed', but also perhaps wandb could relax PyYAML version requirement to make life easier (ie, I can't put wandb in requirements.txt because of this)\r\n\r\n```\r\n(pytorch_p36) ubuntu@ip-172-31-28-233:~$ pip install wandb\r\nCollecting wandb\r\n Using cached https://files.pythonhosted.org/packages/6a/d1/af8371f39d9383f4f1e9ba76c8894f75c01d5eddf4ec57bd45952fefab74/wandb-0.8.3-py2.py3-none-any.whl\r\nCollecting watchdog>=0.8.3 (from wandb)\r\nRequirement already satisfied: psutil>=5.0.0 in ./anaconda3/envs/pytorch_p36/lib/python3.6/site-packages (from wandb) (5.4.5)\r\nCollecting backports.tempfile>=1.0 (from wandb)\r\n Using cached https://files.pythonhosted.org/packages/b4/5c/077f910632476281428fe254807952eb47ca78e720d059a46178c541e669/backports.tempfile-1.0-py2.py3-none-any.whl\r\nRequirement already satisfied: requests>=2.0.0 in ./anaconda3/envs/pytorch_p36/lib/python3.6/site-packages (from wandb) (2.20.0)\r\nRequirement already satisfied: sentry-sdk>=0.4.0 in ./anaconda3/envs/pytorch_p36/lib/python3.6/site-packages (from wandb) (0.9.5)\r\nRequirement already satisfied: six>=1.10.0 in ./anaconda3/envs/pytorch_p36/lib/python3.6/site-packages (from wandb) (1.11.0)\r\nCollecting shortuuid>=0.5.0 (from wandb)\r\nCollecting gql>=0.1.0 (from wandb)\r\nRequirement already satisfied: subprocess32>=3.5.3 in ./anaconda3/envs/pytorch_p36/lib/python3.6/site-packages (from wandb) (3.5.4)\r\nCollecting GitPython>=1.0.0 (from wandb)\r\n Using cached https://files.pythonhosted.org/packages/fe/e5/fafe827507644c32d6dc553a1c435cdf882e0c28918a5bab29f7fbebfb70/GitPython-2.1.11-py2.py3-none-any.whl\r\nRequirement already satisfied: docker-pycreds>=0.4.0 in ./anaconda3/envs/pytorch_p36/lib/python3.6/site-packages (from wandb) (0.4.0)\r\nRequirement already satisfied: nvidia-ml-py3>=7.352.0 in ./anaconda3/envs/pytorch_p36/lib/python3.6/site-packages (from wandb) (7.352.0)\r\nRequirement already satisfied: Click>=7.0 in ./anaconda3/envs/pytorch_p36/lib/python3.6/site-packages (from wandb) (7.0)\r\nRequirement already satisfied: python-dateutil>=2.6.1 in ./anaconda3/envs/pytorch_p36/lib/python3.6/site-packages (from wandb) (2.7.3)\r\nCollecting PyYAML>=4.2b4 (from wandb)\r\nRequirement already satisfied: argh>=0.24.1 in ./anaconda3/envs/pytorch_p36/lib/python3.6/site-packages (from watchdog>=0.8.3->wandb) (0.26.2)\r\nCollecting pathtools>=0.1.1 (from watchdog>=0.8.3->wandb)\r\nCollecting backports.weakref (from backports.tempfile>=1.0->wandb)\r\n Using cached https://files.pythonhosted.org/packages/88/ec/f598b633c3d5ffe267aaada57d961c94fdfa183c5c3ebda2b6d151943db6/backports.weakref-1.0.post1-py2.py3-none-any.whl\r\nRequirement already satisfied: urllib3<1.25,>=1.21.1 in ./anaconda3/envs/pytorch_p36/lib/python3.6/site-packages (from requests>=2.0.0->wandb) (1.23)\r\nRequirement already satisfied: certifi>=2017.4.17 in ./anaconda3/envs/pytorch_p36/lib/python3.6/site-packages (from requests>=2.0.0->wandb) (2019.3.9)\r\nRequirement already satisfied: idna<2.8,>=2.5 in ./anaconda3/envs/pytorch_p36/lib/python3.6/site-packages (from requests>=2.0.0->wandb) (2.6)\r\nRequirement already satisfied: chardet<3.1.0,>=3.0.2 in ./anaconda3/envs/pytorch_p36/lib/python3.6/site-packages (from requests>=2.0.0->wandb) (3.0.4)\r\nCollecting graphql-core>=0.5.0 (from gql>=0.1.0->wandb)\r\n Using cached https://files.pythonhosted.org/packages/f1/88/a4a7bf8ab66c35b146e44d77a1f9fd2c36e0ec9fb1a51581608c16deb6e3/graphql_core-2.2-py2.py3-none-any.whl\r\nCollecting promise>=0.4.0 (from gql>=0.1.0->wandb)\r\nCollecting gitdb2>=2.0.0 (from GitPython>=1.0.0->wandb)\r\n Using cached https://files.pythonhosted.org/packages/da/30/a407568aa8d8f25db817cf50121a958722f3fc5f87e3a6fba1f40c0633e3/gitdb2-2.0.5-py2.py3-none-any.whl\r\nCollecting rx>=1.6.0 (from graphql-core>=0.5.0->gql>=0.1.0->wandb)\r\n Using cached https://files.pythonhosted.org/packages/33/0f/5ef4ac78e2a538cc1b054eb86285fe0bf7a5dbaeaac2c584757c300515e2/Rx-1.6.1-py2.py3-none-any.whl\r\nCollecting smmap2>=2.0.0 (from gitdb2>=2.0.0->GitPython>=1.0.0->wandb)\r\n Using cached https://files.pythonhosted.org/packages/55/d2/866d45e3a121ee15a1dc013824d58072fd5c7799c9c34d01378eb262ca8f/smmap2-2.0.5-py2.py3-none-any.whl\r\nthinc 6.12.1 has requirement msgpack<0.6.0,>=0.5.6, but you'll have msgpack 0.6.0 which is incompatible.\r\ntensorflow 1.13.1 has requirement protobuf>=3.6.1, but you'll have protobuf 3.5.2 which is incompatible.\r\ntensorboard 1.13.1 has requirement protobuf>=3.6.0, but you'll have protobuf 3.5.2 which is incompatible.\r\ndocker-compose 1.24.0 has requirement PyYAML<4.3,>=3.10, but you'll have pyyaml 5.1.1 which is incompatible.\r\nInstalling collected packages: PyYAML, pathtools, watchdog, backports.weakref, backports.tempfile, shortuuid, rx, promise, graphql-core, gql, smmap2, gitdb2, GitPython, wandb\r\n Found existing installation: PyYAML 3.12\r\nCannot uninstall 'PyYAML'. It is a distutils installed project and thus we cannot accurately determine which files belong to it which would lead to only a partial uninstall.\r\nYou are using pip version 10.0.1, however version 19.1.1 is available.\r\nYou should consider upgrading via the 'pip install --upgrade pip' command.\r\n(pytorch_p36) ubuntu@ip-172-31-28-233:~$ echo $?\r\n\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom setuptools import setup\n\nwith open('README.md') as readme_file:\n readme = readme_file.read()\n\nrequirements = [\n 'backports.tempfile>=1.0',\n 'Click>=7.0',\n 'GitPython>=1.0.0',\n 'gql>=0.1.0',\n 'nvidia-ml-py3>=7.352.0',\n 'python-dateutil>=2.6.1',\n 'requests>=2.0.0',\n 'shortuuid>=0.5.0',\n 'six>=1.10.0',\n 'watchdog>=0.8.3',\n 'PyYAML>=4.2b4', # watchdog depends on pyyaml but doesnt specify safe version\n 'psutil>=5.0.0',\n 'sentry-sdk>=0.4.0',\n 'subprocess32>=3.5.3',\n 'docker-pycreds>=0.4.0',\n # Removed until we bring back the board\n # 'flask-cors>=3.0.3',\n # 'flask-graphql>=1.4.0',\n # 'graphene>=2.0.0',\n]\n\ntest_requirements = [\n 'mock>=2.0.0',\n 'tox-pyenv>=1.0.3'\n]\n\nkubeflow_requirements = ['kubernetes', 'minio', 'google-cloud-storage', 'sh']\n\nsetup(\n name='wandb',\n version='0.8.4',\n description=\"A CLI and library for interacting with the Weights and Biases API.\",\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n author=\"Weights & Biases\",\n author_email='[email protected]',\n url='https://github.com/wandb/client',\n packages=[\n 'wandb'\n ],\n package_dir={'wandb': 'wandb'},\n entry_points={\n 'console_scripts': [\n 'wandb=wandb.cli:cli',\n 'wb=wandb.cli:cli',\n 'wanbd=wandb.cli:cli',\n 'wandb-docker-run=wandb.cli:docker_run'\n ]\n },\n include_package_data=True,\n install_requires=requirements,\n license=\"MIT license\",\n zip_safe=False,\n keywords='wandb',\n python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: System :: Logging',\n 'Topic :: System :: Monitoring'\n ],\n test_suite='tests',\n tests_require=test_requirements,\n extras_require={\n 'kubeflow': kubeflow_requirements\n }\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom setuptools import setup\n\nwith open('README.md') as readme_file:\n readme = readme_file.read()\n\nrequirements = [\n 'backports.tempfile>=1.0',\n 'Click>=7.0',\n 'GitPython>=1.0.0',\n 'gql>=0.1.0',\n 'nvidia-ml-py3>=7.352.0',\n 'python-dateutil>=2.6.1',\n 'requests>=2.0.0',\n 'shortuuid>=0.5.0',\n 'six>=1.10.0',\n 'watchdog>=0.8.3',\n 'psutil>=5.0.0',\n 'sentry-sdk>=0.4.0',\n 'subprocess32>=3.5.3',\n 'docker-pycreds>=0.4.0',\n # Removed until we bring back the board\n # 'flask-cors>=3.0.3',\n # 'flask-graphql>=1.4.0',\n # 'graphene>=2.0.0',\n]\n\ntest_requirements = [\n 'mock>=2.0.0',\n 'tox-pyenv>=1.0.3'\n]\n\nkubeflow_requirements = ['kubernetes', 'minio', 'google-cloud-storage', 'sh']\n\nsetup(\n name='wandb',\n version='0.8.4',\n description=\"A CLI and library for interacting with the Weights and Biases API.\",\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n author=\"Weights & Biases\",\n author_email='[email protected]',\n url='https://github.com/wandb/client',\n packages=[\n 'wandb'\n ],\n package_dir={'wandb': 'wandb'},\n entry_points={\n 'console_scripts': [\n 'wandb=wandb.cli:cli',\n 'wb=wandb.cli:cli',\n 'wanbd=wandb.cli:cli',\n 'wandb-docker-run=wandb.cli:docker_run'\n ]\n },\n include_package_data=True,\n install_requires=requirements,\n license=\"MIT license\",\n zip_safe=False,\n keywords='wandb',\n python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: System :: Logging',\n 'Topic :: System :: Monitoring'\n ],\n test_suite='tests',\n tests_require=test_requirements,\n extras_require={\n 'kubeflow': kubeflow_requirements\n }\n)\n", "path": "setup.py"}]} | 3,276 | 127 |
gh_patches_debug_28242 | rasdani/github-patches | git_diff | kivy__kivy-4127 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Crash when Slider is imported before forking process on Mac OS X
Consider
```
#!/usr/bin/env python2
import multiprocessing
from kivy.app import App
from kivy.uix.slider import Slider
class Test(App):
def build(self):
return Slider()
def run_app():
app = Test()
app.run()
running_app = multiprocessing.Process(target=run_app)
running_app.daemon = True
running_app.start()
running_app.join()
```
This currently crashes on Mac OS X:
> **\* multi-threaded process forked ***
> crashed on child side of fork pre-exec
> USING_FORK_WITHOUT_EXEC_IS_NOT_SUPPORTED_BY_FILE_MANAGER
This is because the property `padding` is initialized with `NumericProperty(sp(16))`. This call to `sp` will attempt to initialize SDL. Cf. [this question on StackOverflow](http://stackoverflow.com/questions/8106002/using-the-python-multiprocessing-module-for-io-with-pygame-on-mac-os-10-7)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kivy/uix/slider.py`
Content:
```
1 """
2 Slider
3 ======
4
5 .. image:: images/slider.jpg
6
7 The :class:`Slider` widget looks like a scrollbar. It supports horizontal and
8 vertical orientations, min/max values and a default value.
9
10 To create a slider from -100 to 100 starting from 25::
11
12 from kivy.uix.slider import Slider
13 s = Slider(min=-100, max=100, value=25)
14
15 To create a vertical slider::
16
17 from kivy.uix.slider import Slider
18 s = Slider(orientation='vertical')
19
20 """
21 __all__ = ('Slider', )
22
23 from kivy.uix.widget import Widget
24 from kivy.properties import (NumericProperty, AliasProperty, OptionProperty,
25 ReferenceListProperty, BoundedNumericProperty)
26 from kivy.metrics import sp
27
28
29 class Slider(Widget):
30 """Class for creating a Slider widget.
31
32 Check module documentation for more details.
33 """
34
35 value = NumericProperty(0.)
36 '''Current value used for the slider.
37
38 :attr:`value` is a :class:`~kivy.properties.NumericProperty` and defaults
39 to 0.'''
40
41 min = NumericProperty(0.)
42 '''Minimum value allowed for :attr:`value`.
43
44 :attr:`min` is a :class:`~kivy.properties.NumericProperty` and defaults to
45 0.'''
46
47 max = NumericProperty(100.)
48 '''Maximum value allowed for :attr:`value`.
49
50 :attr:`max` is a :class:`~kivy.properties.NumericProperty` and defaults to
51 100.'''
52
53 padding = NumericProperty(sp(16))
54 '''Padding of the slider. The padding is used for graphical representation
55 and interaction. It prevents the cursor from going out of the bounds of the
56 slider bounding box.
57
58 By default, padding is sp(16). The range of the slider is reduced from
59 padding \*2 on the screen. It allows drawing the default cursor of sp(32)
60 width without having the cursor go out of the widget.
61
62 :attr:`padding` is a :class:`~kivy.properties.NumericProperty` and defaults
63 to sp(16).'''
64
65 orientation = OptionProperty('horizontal', options=(
66 'vertical', 'horizontal'))
67 '''Orientation of the slider.
68
69 :attr:`orientation` is an :class:`~kivy.properties.OptionProperty` and
70 defaults to 'horizontal'. Can take a value of 'vertical' or 'horizontal'.
71 '''
72
73 range = ReferenceListProperty(min, max)
74 '''Range of the slider in the format (minimum value, maximum value)::
75
76 >>> slider = Slider(min=10, max=80)
77 >>> slider.range
78 [10, 80]
79 >>> slider.range = (20, 100)
80 >>> slider.min
81 20
82 >>> slider.max
83 100
84
85 :attr:`range` is a :class:`~kivy.properties.ReferenceListProperty` of
86 (:attr:`min`, :attr:`max`) properties.
87 '''
88
89 step = BoundedNumericProperty(0, min=0)
90 '''Step size of the slider.
91
92 .. versionadded:: 1.4.0
93
94 Determines the size of each interval or step the slider takes between
95 min and max. If the value range can't be evenly divisible by step the
96 last step will be capped by slider.max
97
98 :attr:`step` is a :class:`~kivy.properties.NumericProperty` and defaults
99 to 1.'''
100
101 # The following two methods constrain the slider's value
102 # to range(min,max). Otherwise it may happen that self.value < self.min
103 # at init.
104
105 def on_min(self, *largs):
106 self.value = min(self.max, max(self.min, self.value))
107
108 def on_max(self, *largs):
109 self.value = min(self.max, max(self.min, self.value))
110
111 def get_norm_value(self):
112 vmin = self.min
113 d = self.max - vmin
114 if d == 0:
115 return 0
116 return (self.value - vmin) / float(d)
117
118 def set_norm_value(self, value):
119 vmin = self.min
120 vmax = self.max
121 step = self.step
122 val = min(value * (vmax - vmin) + vmin, vmax)
123 if step == 0:
124 self.value = val
125 else:
126 self.value = min(round((val - vmin) / step) * step + vmin,
127 vmax)
128 value_normalized = AliasProperty(get_norm_value, set_norm_value,
129 bind=('value', 'min', 'max', 'step'))
130 '''Normalized value inside the :attr:`range` (min/max) to 0-1 range::
131
132 >>> slider = Slider(value=50, min=0, max=100)
133 >>> slider.value
134 50
135 >>> slider.value_normalized
136 0.5
137 >>> slider.value = 0
138 >>> slider.value_normalized
139 0
140 >>> slider.value = 100
141 >>> slider.value_normalized
142 1
143
144 You can also use it for setting the real value without knowing the minimum
145 and maximum::
146
147 >>> slider = Slider(min=0, max=200)
148 >>> slider.value_normalized = .5
149 >>> slider.value
150 100
151 >>> slider.value_normalized = 1.
152 >>> slider.value
153 200
154
155 :attr:`value_normalized` is an :class:`~kivy.properties.AliasProperty`.
156 '''
157
158 def get_value_pos(self):
159 padding = self.padding
160 x = self.x
161 y = self.y
162 nval = self.value_normalized
163 if self.orientation == 'horizontal':
164 return (x + padding + nval * (self.width - 2 * padding), y)
165 else:
166 return (x, y + padding + nval * (self.height - 2 * padding))
167
168 def set_value_pos(self, pos):
169 padding = self.padding
170 x = min(self.right - padding, max(pos[0], self.x + padding))
171 y = min(self.top - padding, max(pos[1], self.y + padding))
172 if self.orientation == 'horizontal':
173 if self.width == 0:
174 self.value_normalized = 0
175 else:
176 self.value_normalized = (x - self.x - padding
177 ) / float(self.width - 2 * padding)
178 else:
179 if self.height == 0:
180 self.value_normalized = 0
181 else:
182 self.value_normalized = (y - self.y - padding
183 ) / float(self.height - 2 * padding)
184 value_pos = AliasProperty(get_value_pos, set_value_pos,
185 bind=('x', 'y', 'width', 'height', 'min',
186 'max', 'value_normalized', 'orientation'))
187 '''Position of the internal cursor, based on the normalized value.
188
189 :attr:`value_pos` is an :class:`~kivy.properties.AliasProperty`.
190 '''
191
192 def on_touch_down(self, touch):
193 if self.disabled or not self.collide_point(*touch.pos):
194 return
195 if touch.is_mouse_scrolling:
196 if 'down' in touch.button or 'left' in touch.button:
197 if self.step:
198 self.value = min(self.max, self.value + self.step)
199 else:
200 self.value = min(
201 self.max,
202 self.value + (self.max - self.min) / 20)
203 if 'up' in touch.button or 'right' in touch.button:
204 if self.step:
205 self.value = max(self.min, self.value - self.step)
206 else:
207 self.value = max(
208 self.min,
209 self.value - (self.max - self.min) / 20)
210 else:
211 touch.grab(self)
212 self.value_pos = touch.pos
213 return True
214
215 def on_touch_move(self, touch):
216 if touch.grab_current == self:
217 self.value_pos = touch.pos
218 return True
219
220 def on_touch_up(self, touch):
221 if touch.grab_current == self:
222 self.value_pos = touch.pos
223 return True
224
225 if __name__ == '__main__':
226 from kivy.app import App
227
228 class SliderApp(App):
229 def build(self):
230 return Slider(padding=25)
231
232 SliderApp().run()
233
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kivy/uix/slider.py b/kivy/uix/slider.py
--- a/kivy/uix/slider.py
+++ b/kivy/uix/slider.py
@@ -23,7 +23,6 @@
from kivy.uix.widget import Widget
from kivy.properties import (NumericProperty, AliasProperty, OptionProperty,
ReferenceListProperty, BoundedNumericProperty)
-from kivy.metrics import sp
class Slider(Widget):
@@ -50,17 +49,17 @@
:attr:`max` is a :class:`~kivy.properties.NumericProperty` and defaults to
100.'''
- padding = NumericProperty(sp(16))
+ padding = NumericProperty('16sp')
'''Padding of the slider. The padding is used for graphical representation
and interaction. It prevents the cursor from going out of the bounds of the
slider bounding box.
- By default, padding is sp(16). The range of the slider is reduced from
- padding \*2 on the screen. It allows drawing the default cursor of sp(32)
+ By default, padding is 16sp. The range of the slider is reduced from
+ padding \*2 on the screen. It allows drawing the default cursor of 32sp
width without having the cursor go out of the widget.
:attr:`padding` is a :class:`~kivy.properties.NumericProperty` and defaults
- to sp(16).'''
+ to 16sp.'''
orientation = OptionProperty('horizontal', options=(
'vertical', 'horizontal'))
| {"golden_diff": "diff --git a/kivy/uix/slider.py b/kivy/uix/slider.py\n--- a/kivy/uix/slider.py\n+++ b/kivy/uix/slider.py\n@@ -23,7 +23,6 @@\n from kivy.uix.widget import Widget\n from kivy.properties import (NumericProperty, AliasProperty, OptionProperty,\n ReferenceListProperty, BoundedNumericProperty)\n-from kivy.metrics import sp\n \n \n class Slider(Widget):\n@@ -50,17 +49,17 @@\n :attr:`max` is a :class:`~kivy.properties.NumericProperty` and defaults to\n 100.'''\n \n- padding = NumericProperty(sp(16))\n+ padding = NumericProperty('16sp')\n '''Padding of the slider. The padding is used for graphical representation\n and interaction. It prevents the cursor from going out of the bounds of the\n slider bounding box.\n \n- By default, padding is sp(16). The range of the slider is reduced from\n- padding \\*2 on the screen. It allows drawing the default cursor of sp(32)\n+ By default, padding is 16sp. The range of the slider is reduced from\n+ padding \\*2 on the screen. It allows drawing the default cursor of 32sp\n width without having the cursor go out of the widget.\n \n :attr:`padding` is a :class:`~kivy.properties.NumericProperty` and defaults\n- to sp(16).'''\n+ to 16sp.'''\n \n orientation = OptionProperty('horizontal', options=(\n 'vertical', 'horizontal'))\n", "issue": "Crash when Slider is imported before forking process on Mac OS X\nConsider\n\n```\n#!/usr/bin/env python2\nimport multiprocessing\nfrom kivy.app import App\n\nfrom kivy.uix.slider import Slider\n\nclass Test(App):\n def build(self):\n return Slider()\n\ndef run_app():\n app = Test()\n app.run()\n\nrunning_app = multiprocessing.Process(target=run_app)\nrunning_app.daemon = True\nrunning_app.start()\nrunning_app.join()\n```\n\nThis currently crashes on Mac OS X:\n\n> **\\* multi-threaded process forked ***\n> crashed on child side of fork pre-exec\n> USING_FORK_WITHOUT_EXEC_IS_NOT_SUPPORTED_BY_FILE_MANAGER\n\nThis is because the property `padding` is initialized with `NumericProperty(sp(16))`. This call to `sp` will attempt to initialize SDL. Cf. [this question on StackOverflow](http://stackoverflow.com/questions/8106002/using-the-python-multiprocessing-module-for-io-with-pygame-on-mac-os-10-7)\n\n", "before_files": [{"content": "\"\"\"\nSlider\n======\n\n.. image:: images/slider.jpg\n\nThe :class:`Slider` widget looks like a scrollbar. It supports horizontal and\nvertical orientations, min/max values and a default value.\n\nTo create a slider from -100 to 100 starting from 25::\n\n from kivy.uix.slider import Slider\n s = Slider(min=-100, max=100, value=25)\n\nTo create a vertical slider::\n\n from kivy.uix.slider import Slider\n s = Slider(orientation='vertical')\n\n\"\"\"\n__all__ = ('Slider', )\n\nfrom kivy.uix.widget import Widget\nfrom kivy.properties import (NumericProperty, AliasProperty, OptionProperty,\n ReferenceListProperty, BoundedNumericProperty)\nfrom kivy.metrics import sp\n\n\nclass Slider(Widget):\n \"\"\"Class for creating a Slider widget.\n\n Check module documentation for more details.\n \"\"\"\n\n value = NumericProperty(0.)\n '''Current value used for the slider.\n\n :attr:`value` is a :class:`~kivy.properties.NumericProperty` and defaults\n to 0.'''\n\n min = NumericProperty(0.)\n '''Minimum value allowed for :attr:`value`.\n\n :attr:`min` is a :class:`~kivy.properties.NumericProperty` and defaults to\n 0.'''\n\n max = NumericProperty(100.)\n '''Maximum value allowed for :attr:`value`.\n\n :attr:`max` is a :class:`~kivy.properties.NumericProperty` and defaults to\n 100.'''\n\n padding = NumericProperty(sp(16))\n '''Padding of the slider. The padding is used for graphical representation\n and interaction. It prevents the cursor from going out of the bounds of the\n slider bounding box.\n\n By default, padding is sp(16). The range of the slider is reduced from\n padding \\*2 on the screen. It allows drawing the default cursor of sp(32)\n width without having the cursor go out of the widget.\n\n :attr:`padding` is a :class:`~kivy.properties.NumericProperty` and defaults\n to sp(16).'''\n\n orientation = OptionProperty('horizontal', options=(\n 'vertical', 'horizontal'))\n '''Orientation of the slider.\n\n :attr:`orientation` is an :class:`~kivy.properties.OptionProperty` and\n defaults to 'horizontal'. Can take a value of 'vertical' or 'horizontal'.\n '''\n\n range = ReferenceListProperty(min, max)\n '''Range of the slider in the format (minimum value, maximum value)::\n\n >>> slider = Slider(min=10, max=80)\n >>> slider.range\n [10, 80]\n >>> slider.range = (20, 100)\n >>> slider.min\n 20\n >>> slider.max\n 100\n\n :attr:`range` is a :class:`~kivy.properties.ReferenceListProperty` of\n (:attr:`min`, :attr:`max`) properties.\n '''\n\n step = BoundedNumericProperty(0, min=0)\n '''Step size of the slider.\n\n .. versionadded:: 1.4.0\n\n Determines the size of each interval or step the slider takes between\n min and max. If the value range can't be evenly divisible by step the\n last step will be capped by slider.max\n\n :attr:`step` is a :class:`~kivy.properties.NumericProperty` and defaults\n to 1.'''\n\n # The following two methods constrain the slider's value\n # to range(min,max). Otherwise it may happen that self.value < self.min\n # at init.\n\n def on_min(self, *largs):\n self.value = min(self.max, max(self.min, self.value))\n\n def on_max(self, *largs):\n self.value = min(self.max, max(self.min, self.value))\n\n def get_norm_value(self):\n vmin = self.min\n d = self.max - vmin\n if d == 0:\n return 0\n return (self.value - vmin) / float(d)\n\n def set_norm_value(self, value):\n vmin = self.min\n vmax = self.max\n step = self.step\n val = min(value * (vmax - vmin) + vmin, vmax)\n if step == 0:\n self.value = val\n else:\n self.value = min(round((val - vmin) / step) * step + vmin,\n vmax)\n value_normalized = AliasProperty(get_norm_value, set_norm_value,\n bind=('value', 'min', 'max', 'step'))\n '''Normalized value inside the :attr:`range` (min/max) to 0-1 range::\n\n >>> slider = Slider(value=50, min=0, max=100)\n >>> slider.value\n 50\n >>> slider.value_normalized\n 0.5\n >>> slider.value = 0\n >>> slider.value_normalized\n 0\n >>> slider.value = 100\n >>> slider.value_normalized\n 1\n\n You can also use it for setting the real value without knowing the minimum\n and maximum::\n\n >>> slider = Slider(min=0, max=200)\n >>> slider.value_normalized = .5\n >>> slider.value\n 100\n >>> slider.value_normalized = 1.\n >>> slider.value\n 200\n\n :attr:`value_normalized` is an :class:`~kivy.properties.AliasProperty`.\n '''\n\n def get_value_pos(self):\n padding = self.padding\n x = self.x\n y = self.y\n nval = self.value_normalized\n if self.orientation == 'horizontal':\n return (x + padding + nval * (self.width - 2 * padding), y)\n else:\n return (x, y + padding + nval * (self.height - 2 * padding))\n\n def set_value_pos(self, pos):\n padding = self.padding\n x = min(self.right - padding, max(pos[0], self.x + padding))\n y = min(self.top - padding, max(pos[1], self.y + padding))\n if self.orientation == 'horizontal':\n if self.width == 0:\n self.value_normalized = 0\n else:\n self.value_normalized = (x - self.x - padding\n ) / float(self.width - 2 * padding)\n else:\n if self.height == 0:\n self.value_normalized = 0\n else:\n self.value_normalized = (y - self.y - padding\n ) / float(self.height - 2 * padding)\n value_pos = AliasProperty(get_value_pos, set_value_pos,\n bind=('x', 'y', 'width', 'height', 'min',\n 'max', 'value_normalized', 'orientation'))\n '''Position of the internal cursor, based on the normalized value.\n\n :attr:`value_pos` is an :class:`~kivy.properties.AliasProperty`.\n '''\n\n def on_touch_down(self, touch):\n if self.disabled or not self.collide_point(*touch.pos):\n return\n if touch.is_mouse_scrolling:\n if 'down' in touch.button or 'left' in touch.button:\n if self.step:\n self.value = min(self.max, self.value + self.step)\n else:\n self.value = min(\n self.max,\n self.value + (self.max - self.min) / 20)\n if 'up' in touch.button or 'right' in touch.button:\n if self.step:\n self.value = max(self.min, self.value - self.step)\n else:\n self.value = max(\n self.min,\n self.value - (self.max - self.min) / 20)\n else:\n touch.grab(self)\n self.value_pos = touch.pos\n return True\n\n def on_touch_move(self, touch):\n if touch.grab_current == self:\n self.value_pos = touch.pos\n return True\n\n def on_touch_up(self, touch):\n if touch.grab_current == self:\n self.value_pos = touch.pos\n return True\n\nif __name__ == '__main__':\n from kivy.app import App\n\n class SliderApp(App):\n def build(self):\n return Slider(padding=25)\n\n SliderApp().run()\n", "path": "kivy/uix/slider.py"}], "after_files": [{"content": "\"\"\"\nSlider\n======\n\n.. image:: images/slider.jpg\n\nThe :class:`Slider` widget looks like a scrollbar. It supports horizontal and\nvertical orientations, min/max values and a default value.\n\nTo create a slider from -100 to 100 starting from 25::\n\n from kivy.uix.slider import Slider\n s = Slider(min=-100, max=100, value=25)\n\nTo create a vertical slider::\n\n from kivy.uix.slider import Slider\n s = Slider(orientation='vertical')\n\n\"\"\"\n__all__ = ('Slider', )\n\nfrom kivy.uix.widget import Widget\nfrom kivy.properties import (NumericProperty, AliasProperty, OptionProperty,\n ReferenceListProperty, BoundedNumericProperty)\n\n\nclass Slider(Widget):\n \"\"\"Class for creating a Slider widget.\n\n Check module documentation for more details.\n \"\"\"\n\n value = NumericProperty(0.)\n '''Current value used for the slider.\n\n :attr:`value` is a :class:`~kivy.properties.NumericProperty` and defaults\n to 0.'''\n\n min = NumericProperty(0.)\n '''Minimum value allowed for :attr:`value`.\n\n :attr:`min` is a :class:`~kivy.properties.NumericProperty` and defaults to\n 0.'''\n\n max = NumericProperty(100.)\n '''Maximum value allowed for :attr:`value`.\n\n :attr:`max` is a :class:`~kivy.properties.NumericProperty` and defaults to\n 100.'''\n\n padding = NumericProperty('16sp')\n '''Padding of the slider. The padding is used for graphical representation\n and interaction. It prevents the cursor from going out of the bounds of the\n slider bounding box.\n\n By default, padding is 16sp. The range of the slider is reduced from\n padding \\*2 on the screen. It allows drawing the default cursor of 32sp\n width without having the cursor go out of the widget.\n\n :attr:`padding` is a :class:`~kivy.properties.NumericProperty` and defaults\n to 16sp.'''\n\n orientation = OptionProperty('horizontal', options=(\n 'vertical', 'horizontal'))\n '''Orientation of the slider.\n\n :attr:`orientation` is an :class:`~kivy.properties.OptionProperty` and\n defaults to 'horizontal'. Can take a value of 'vertical' or 'horizontal'.\n '''\n\n range = ReferenceListProperty(min, max)\n '''Range of the slider in the format (minimum value, maximum value)::\n\n >>> slider = Slider(min=10, max=80)\n >>> slider.range\n [10, 80]\n >>> slider.range = (20, 100)\n >>> slider.min\n 20\n >>> slider.max\n 100\n\n :attr:`range` is a :class:`~kivy.properties.ReferenceListProperty` of\n (:attr:`min`, :attr:`max`) properties.\n '''\n\n step = BoundedNumericProperty(0, min=0)\n '''Step size of the slider.\n\n .. versionadded:: 1.4.0\n\n Determines the size of each interval or step the slider takes between\n min and max. If the value range can't be evenly divisible by step the\n last step will be capped by slider.max\n\n :attr:`step` is a :class:`~kivy.properties.NumericProperty` and defaults\n to 1.'''\n\n # The following two methods constrain the slider's value\n # to range(min,max). Otherwise it may happen that self.value < self.min\n # at init.\n\n def on_min(self, *largs):\n self.value = min(self.max, max(self.min, self.value))\n\n def on_max(self, *largs):\n self.value = min(self.max, max(self.min, self.value))\n\n def get_norm_value(self):\n vmin = self.min\n d = self.max - vmin\n if d == 0:\n return 0\n return (self.value - vmin) / float(d)\n\n def set_norm_value(self, value):\n vmin = self.min\n vmax = self.max\n step = self.step\n val = min(value * (vmax - vmin) + vmin, vmax)\n if step == 0:\n self.value = val\n else:\n self.value = min(round((val - vmin) / step) * step + vmin,\n vmax)\n value_normalized = AliasProperty(get_norm_value, set_norm_value,\n bind=('value', 'min', 'max', 'step'))\n '''Normalized value inside the :attr:`range` (min/max) to 0-1 range::\n\n >>> slider = Slider(value=50, min=0, max=100)\n >>> slider.value\n 50\n >>> slider.value_normalized\n 0.5\n >>> slider.value = 0\n >>> slider.value_normalized\n 0\n >>> slider.value = 100\n >>> slider.value_normalized\n 1\n\n You can also use it for setting the real value without knowing the minimum\n and maximum::\n\n >>> slider = Slider(min=0, max=200)\n >>> slider.value_normalized = .5\n >>> slider.value\n 100\n >>> slider.value_normalized = 1.\n >>> slider.value\n 200\n\n :attr:`value_normalized` is an :class:`~kivy.properties.AliasProperty`.\n '''\n\n def get_value_pos(self):\n padding = self.padding\n x = self.x\n y = self.y\n nval = self.value_normalized\n if self.orientation == 'horizontal':\n return (x + padding + nval * (self.width - 2 * padding), y)\n else:\n return (x, y + padding + nval * (self.height - 2 * padding))\n\n def set_value_pos(self, pos):\n padding = self.padding\n x = min(self.right - padding, max(pos[0], self.x + padding))\n y = min(self.top - padding, max(pos[1], self.y + padding))\n if self.orientation == 'horizontal':\n if self.width == 0:\n self.value_normalized = 0\n else:\n self.value_normalized = (x - self.x - padding\n ) / float(self.width - 2 * padding)\n else:\n if self.height == 0:\n self.value_normalized = 0\n else:\n self.value_normalized = (y - self.y - padding\n ) / float(self.height - 2 * padding)\n value_pos = AliasProperty(get_value_pos, set_value_pos,\n bind=('x', 'y', 'width', 'height', 'min',\n 'max', 'value_normalized', 'orientation'))\n '''Position of the internal cursor, based on the normalized value.\n\n :attr:`value_pos` is an :class:`~kivy.properties.AliasProperty`.\n '''\n\n def on_touch_down(self, touch):\n if self.disabled or not self.collide_point(*touch.pos):\n return\n if touch.is_mouse_scrolling:\n if 'down' in touch.button or 'left' in touch.button:\n if self.step:\n self.value = min(self.max, self.value + self.step)\n else:\n self.value = min(\n self.max,\n self.value + (self.max - self.min) / 20)\n if 'up' in touch.button or 'right' in touch.button:\n if self.step:\n self.value = max(self.min, self.value - self.step)\n else:\n self.value = max(\n self.min,\n self.value - (self.max - self.min) / 20)\n else:\n touch.grab(self)\n self.value_pos = touch.pos\n return True\n\n def on_touch_move(self, touch):\n if touch.grab_current == self:\n self.value_pos = touch.pos\n return True\n\n def on_touch_up(self, touch):\n if touch.grab_current == self:\n self.value_pos = touch.pos\n return True\n\nif __name__ == '__main__':\n from kivy.app import App\n\n class SliderApp(App):\n def build(self):\n return Slider(padding=25)\n\n SliderApp().run()\n", "path": "kivy/uix/slider.py"}]} | 2,901 | 353 |
gh_patches_debug_22351 | rasdani/github-patches | git_diff | scalableminds__webknossos-libs-225 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Allow explicitly setting a layer's bounding box
For example, when one cuts data away and knows that the bounding box needs to be shrunken, there is no way of setting the box explicitly with the api.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `wkcuber/api/Layer.py`
Content:
```
1 from shutil import rmtree
2 from os.path import join
3 from os import makedirs
4 from wkw import wkw
5
6 from wkcuber.api.MagDataset import (
7 MagDataset,
8 WKMagDataset,
9 TiffMagDataset,
10 TiledTiffMagDataset,
11 )
12 from wkcuber.mag import Mag
13 from wkcuber.utils import DEFAULT_WKW_FILE_LEN
14
15
16 class Layer:
17
18 COLOR_TYPE = "color"
19 SEGMENTATION_TYPE = "segmentation"
20
21 def __init__(self, name, dataset, dtype, num_channels):
22 self.name = name
23 self.dataset = dataset
24 self.dtype = dtype
25 self.num_channels = num_channels
26 self.mags = {}
27
28 full_path = join(dataset.path, name)
29 makedirs(full_path, exist_ok=True)
30
31 def get_mag(self, mag) -> MagDataset:
32 mag = Mag(mag).to_layer_name()
33 if mag not in self.mags.keys():
34 raise IndexError("The mag {} is not a mag of this layer".format(mag))
35 return self.mags[mag]
36
37 def delete_mag(self, mag):
38 mag = Mag(mag).to_layer_name()
39 if mag not in self.mags.keys():
40 raise IndexError(
41 "Deleting mag {} failed. There is no mag with this name".format(mag)
42 )
43
44 del self.mags[mag]
45 self.dataset.properties._delete_mag(self.name, mag)
46 # delete files on disk
47 full_path = join(self.dataset.path, self.name, mag)
48 rmtree(full_path)
49
50 def _create_dir_for_mag(self, mag):
51 mag = Mag(mag).to_layer_name()
52 full_path = join(self.dataset.path, self.name, mag)
53 makedirs(full_path, exist_ok=True)
54
55 def _assert_mag_does_not_exist_yet(self, mag):
56 mag = Mag(mag).to_layer_name()
57 if mag in self.mags.keys():
58 raise IndexError(
59 "Adding mag {} failed. There is already a mag with this name".format(
60 mag
61 )
62 )
63
64
65 class WKLayer(Layer):
66 def add_mag(
67 self, mag, block_len=None, file_len=None, block_type=None
68 ) -> WKMagDataset:
69 if block_len is None:
70 block_len = 32
71 if file_len is None:
72 file_len = DEFAULT_WKW_FILE_LEN
73 if block_type is None:
74 block_type = wkw.Header.BLOCK_TYPE_RAW
75
76 # normalize the name of the mag
77 mag = Mag(mag).to_layer_name()
78
79 self._assert_mag_does_not_exist_yet(mag)
80 self._create_dir_for_mag(mag)
81
82 self.mags[mag] = WKMagDataset.create(self, mag, block_len, file_len, block_type)
83 self.dataset.properties._add_mag(self.name, mag, block_len * file_len)
84
85 return self.mags[mag]
86
87 def get_or_add_mag(
88 self, mag, block_len=None, file_len=None, block_type=None
89 ) -> WKMagDataset:
90 # normalize the name of the mag
91 mag = Mag(mag).to_layer_name()
92
93 if mag in self.mags.keys():
94 assert (
95 block_len is None or self.mags[mag].header.block_len == block_len
96 ), f"Cannot get_or_add_mag: The mag {mag} already exists, but the block lengths do not match"
97 assert (
98 file_len is None or self.mags[mag].header.file_len == file_len
99 ), f"Cannot get_or_add_mag: The mag {mag} already exists, but the file lengths do not match"
100 assert (
101 block_type is None or self.mags[mag].header.block_type == block_type
102 ), f"Cannot get_or_add_mag: The mag {mag} already exists, but the block types do not match"
103 return self.get_mag(mag)
104 else:
105 return self.add_mag(mag, block_len, file_len, block_type)
106
107 def setup_mag(self, mag):
108 # This method is used to initialize the mag when opening the Dataset. This does not create e.g. the wk_header.
109
110 # normalize the name of the mag
111 mag = Mag(mag).to_layer_name()
112
113 self._assert_mag_does_not_exist_yet(mag)
114
115 with wkw.Dataset.open(join(self.dataset.path, self.name, mag)) as wkw_dataset:
116 wk_header = wkw_dataset.header
117
118 self.mags[mag] = WKMagDataset(
119 self, mag, wk_header.block_len, wk_header.file_len, wk_header.block_type
120 )
121 self.dataset.properties._add_mag(
122 self.name, mag, wk_header.block_len * wk_header.file_len
123 )
124
125
126 class TiffLayer(Layer):
127 def add_mag(self, mag) -> MagDataset:
128 # normalize the name of the mag
129 mag = Mag(mag).to_layer_name()
130
131 self._assert_mag_does_not_exist_yet(mag)
132 self._create_dir_for_mag(mag)
133
134 self.mags[mag] = self._get_mag_dataset_class().create(
135 self, mag, self.dataset.properties.pattern
136 )
137 self.dataset.properties._add_mag(self.name, mag)
138
139 return self.mags[mag]
140
141 def get_or_add_mag(self, mag) -> MagDataset:
142 # normalize the name of the mag
143 mag = Mag(mag).to_layer_name()
144
145 if mag in self.mags.keys():
146 return self.get_mag(mag)
147 else:
148 return self.add_mag(mag)
149
150 def setup_mag(self, mag):
151 # This method is used to initialize the mag when opening the Dataset. This does not create e.g. folders.
152
153 # normalize the name of the mag
154 mag = Mag(mag).to_layer_name()
155
156 self._assert_mag_does_not_exist_yet(mag)
157
158 self.mags[mag] = self._get_mag_dataset_class()(
159 self, mag, self.dataset.properties.pattern
160 )
161 self.dataset.properties._add_mag(self.name, mag)
162
163 def _get_mag_dataset_class(self):
164 return TiffMagDataset
165
166
167 class TiledTiffLayer(TiffLayer):
168 def _get_mag_dataset_class(self):
169 return TiledTiffMagDataset
170
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/wkcuber/api/Layer.py b/wkcuber/api/Layer.py
--- a/wkcuber/api/Layer.py
+++ b/wkcuber/api/Layer.py
@@ -1,6 +1,10 @@
from shutil import rmtree
from os.path import join
from os import makedirs
+from typing import Tuple
+
+import numpy as np
+
from wkw import wkw
from wkcuber.api.MagDataset import (
@@ -61,6 +65,28 @@
)
)
+ def set_bounding_box(
+ self, offset: Tuple[int, int, int], size: Tuple[int, int, int]
+ ):
+ self.set_bounding_box_offset(offset)
+ self.set_bounding_box_size(size)
+
+ def set_bounding_box_offset(self, offset: Tuple[int, int, int]):
+ size = self.dataset.properties.data_layers["color"].get_bounding_box_size()
+ self.dataset.properties._set_bounding_box_of_layer(
+ self.name, tuple(offset), tuple(size)
+ )
+ for _, mag in self.mags.items():
+ mag.view.global_offset = offset
+
+ def set_bounding_box_size(self, size: Tuple[int, int, int]):
+ offset = self.dataset.properties.data_layers["color"].get_bounding_box_offset()
+ self.dataset.properties._set_bounding_box_of_layer(
+ self.name, tuple(offset), tuple(size)
+ )
+ for _, mag in self.mags.items():
+ mag.view.size = size
+
class WKLayer(Layer):
def add_mag(
| {"golden_diff": "diff --git a/wkcuber/api/Layer.py b/wkcuber/api/Layer.py\n--- a/wkcuber/api/Layer.py\n+++ b/wkcuber/api/Layer.py\n@@ -1,6 +1,10 @@\n from shutil import rmtree\n from os.path import join\n from os import makedirs\n+from typing import Tuple\n+\n+import numpy as np\n+\n from wkw import wkw\n \n from wkcuber.api.MagDataset import (\n@@ -61,6 +65,28 @@\n )\n )\n \n+ def set_bounding_box(\n+ self, offset: Tuple[int, int, int], size: Tuple[int, int, int]\n+ ):\n+ self.set_bounding_box_offset(offset)\n+ self.set_bounding_box_size(size)\n+\n+ def set_bounding_box_offset(self, offset: Tuple[int, int, int]):\n+ size = self.dataset.properties.data_layers[\"color\"].get_bounding_box_size()\n+ self.dataset.properties._set_bounding_box_of_layer(\n+ self.name, tuple(offset), tuple(size)\n+ )\n+ for _, mag in self.mags.items():\n+ mag.view.global_offset = offset\n+\n+ def set_bounding_box_size(self, size: Tuple[int, int, int]):\n+ offset = self.dataset.properties.data_layers[\"color\"].get_bounding_box_offset()\n+ self.dataset.properties._set_bounding_box_of_layer(\n+ self.name, tuple(offset), tuple(size)\n+ )\n+ for _, mag in self.mags.items():\n+ mag.view.size = size\n+\n \n class WKLayer(Layer):\n def add_mag(\n", "issue": "Allow explicitly setting a layer's bounding box\nFor example, when one cuts data away and knows that the bounding box needs to be shrunken, there is no way of setting the box explicitly with the api.\n", "before_files": [{"content": "from shutil import rmtree\nfrom os.path import join\nfrom os import makedirs\nfrom wkw import wkw\n\nfrom wkcuber.api.MagDataset import (\n MagDataset,\n WKMagDataset,\n TiffMagDataset,\n TiledTiffMagDataset,\n)\nfrom wkcuber.mag import Mag\nfrom wkcuber.utils import DEFAULT_WKW_FILE_LEN\n\n\nclass Layer:\n\n COLOR_TYPE = \"color\"\n SEGMENTATION_TYPE = \"segmentation\"\n\n def __init__(self, name, dataset, dtype, num_channels):\n self.name = name\n self.dataset = dataset\n self.dtype = dtype\n self.num_channels = num_channels\n self.mags = {}\n\n full_path = join(dataset.path, name)\n makedirs(full_path, exist_ok=True)\n\n def get_mag(self, mag) -> MagDataset:\n mag = Mag(mag).to_layer_name()\n if mag not in self.mags.keys():\n raise IndexError(\"The mag {} is not a mag of this layer\".format(mag))\n return self.mags[mag]\n\n def delete_mag(self, mag):\n mag = Mag(mag).to_layer_name()\n if mag not in self.mags.keys():\n raise IndexError(\n \"Deleting mag {} failed. There is no mag with this name\".format(mag)\n )\n\n del self.mags[mag]\n self.dataset.properties._delete_mag(self.name, mag)\n # delete files on disk\n full_path = join(self.dataset.path, self.name, mag)\n rmtree(full_path)\n\n def _create_dir_for_mag(self, mag):\n mag = Mag(mag).to_layer_name()\n full_path = join(self.dataset.path, self.name, mag)\n makedirs(full_path, exist_ok=True)\n\n def _assert_mag_does_not_exist_yet(self, mag):\n mag = Mag(mag).to_layer_name()\n if mag in self.mags.keys():\n raise IndexError(\n \"Adding mag {} failed. There is already a mag with this name\".format(\n mag\n )\n )\n\n\nclass WKLayer(Layer):\n def add_mag(\n self, mag, block_len=None, file_len=None, block_type=None\n ) -> WKMagDataset:\n if block_len is None:\n block_len = 32\n if file_len is None:\n file_len = DEFAULT_WKW_FILE_LEN\n if block_type is None:\n block_type = wkw.Header.BLOCK_TYPE_RAW\n\n # normalize the name of the mag\n mag = Mag(mag).to_layer_name()\n\n self._assert_mag_does_not_exist_yet(mag)\n self._create_dir_for_mag(mag)\n\n self.mags[mag] = WKMagDataset.create(self, mag, block_len, file_len, block_type)\n self.dataset.properties._add_mag(self.name, mag, block_len * file_len)\n\n return self.mags[mag]\n\n def get_or_add_mag(\n self, mag, block_len=None, file_len=None, block_type=None\n ) -> WKMagDataset:\n # normalize the name of the mag\n mag = Mag(mag).to_layer_name()\n\n if mag in self.mags.keys():\n assert (\n block_len is None or self.mags[mag].header.block_len == block_len\n ), f\"Cannot get_or_add_mag: The mag {mag} already exists, but the block lengths do not match\"\n assert (\n file_len is None or self.mags[mag].header.file_len == file_len\n ), f\"Cannot get_or_add_mag: The mag {mag} already exists, but the file lengths do not match\"\n assert (\n block_type is None or self.mags[mag].header.block_type == block_type\n ), f\"Cannot get_or_add_mag: The mag {mag} already exists, but the block types do not match\"\n return self.get_mag(mag)\n else:\n return self.add_mag(mag, block_len, file_len, block_type)\n\n def setup_mag(self, mag):\n # This method is used to initialize the mag when opening the Dataset. This does not create e.g. the wk_header.\n\n # normalize the name of the mag\n mag = Mag(mag).to_layer_name()\n\n self._assert_mag_does_not_exist_yet(mag)\n\n with wkw.Dataset.open(join(self.dataset.path, self.name, mag)) as wkw_dataset:\n wk_header = wkw_dataset.header\n\n self.mags[mag] = WKMagDataset(\n self, mag, wk_header.block_len, wk_header.file_len, wk_header.block_type\n )\n self.dataset.properties._add_mag(\n self.name, mag, wk_header.block_len * wk_header.file_len\n )\n\n\nclass TiffLayer(Layer):\n def add_mag(self, mag) -> MagDataset:\n # normalize the name of the mag\n mag = Mag(mag).to_layer_name()\n\n self._assert_mag_does_not_exist_yet(mag)\n self._create_dir_for_mag(mag)\n\n self.mags[mag] = self._get_mag_dataset_class().create(\n self, mag, self.dataset.properties.pattern\n )\n self.dataset.properties._add_mag(self.name, mag)\n\n return self.mags[mag]\n\n def get_or_add_mag(self, mag) -> MagDataset:\n # normalize the name of the mag\n mag = Mag(mag).to_layer_name()\n\n if mag in self.mags.keys():\n return self.get_mag(mag)\n else:\n return self.add_mag(mag)\n\n def setup_mag(self, mag):\n # This method is used to initialize the mag when opening the Dataset. This does not create e.g. folders.\n\n # normalize the name of the mag\n mag = Mag(mag).to_layer_name()\n\n self._assert_mag_does_not_exist_yet(mag)\n\n self.mags[mag] = self._get_mag_dataset_class()(\n self, mag, self.dataset.properties.pattern\n )\n self.dataset.properties._add_mag(self.name, mag)\n\n def _get_mag_dataset_class(self):\n return TiffMagDataset\n\n\nclass TiledTiffLayer(TiffLayer):\n def _get_mag_dataset_class(self):\n return TiledTiffMagDataset\n", "path": "wkcuber/api/Layer.py"}], "after_files": [{"content": "from shutil import rmtree\nfrom os.path import join\nfrom os import makedirs\nfrom typing import Tuple\n\nimport numpy as np\n\nfrom wkw import wkw\n\nfrom wkcuber.api.MagDataset import (\n MagDataset,\n WKMagDataset,\n TiffMagDataset,\n TiledTiffMagDataset,\n)\nfrom wkcuber.mag import Mag\nfrom wkcuber.utils import DEFAULT_WKW_FILE_LEN\n\n\nclass Layer:\n\n COLOR_TYPE = \"color\"\n SEGMENTATION_TYPE = \"segmentation\"\n\n def __init__(self, name, dataset, dtype, num_channels):\n self.name = name\n self.dataset = dataset\n self.dtype = dtype\n self.num_channels = num_channels\n self.mags = {}\n\n full_path = join(dataset.path, name)\n makedirs(full_path, exist_ok=True)\n\n def get_mag(self, mag) -> MagDataset:\n mag = Mag(mag).to_layer_name()\n if mag not in self.mags.keys():\n raise IndexError(\"The mag {} is not a mag of this layer\".format(mag))\n return self.mags[mag]\n\n def delete_mag(self, mag):\n mag = Mag(mag).to_layer_name()\n if mag not in self.mags.keys():\n raise IndexError(\n \"Deleting mag {} failed. There is no mag with this name\".format(mag)\n )\n\n del self.mags[mag]\n self.dataset.properties._delete_mag(self.name, mag)\n # delete files on disk\n full_path = join(self.dataset.path, self.name, mag)\n rmtree(full_path)\n\n def _create_dir_for_mag(self, mag):\n mag = Mag(mag).to_layer_name()\n full_path = join(self.dataset.path, self.name, mag)\n makedirs(full_path, exist_ok=True)\n\n def _assert_mag_does_not_exist_yet(self, mag):\n mag = Mag(mag).to_layer_name()\n if mag in self.mags.keys():\n raise IndexError(\n \"Adding mag {} failed. There is already a mag with this name\".format(\n mag\n )\n )\n\n def set_bounding_box(\n self, offset: Tuple[int, int, int], size: Tuple[int, int, int]\n ):\n self.set_bounding_box_offset(offset)\n self.set_bounding_box_size(size)\n\n def set_bounding_box_offset(self, offset: Tuple[int, int, int]):\n size = self.dataset.properties.data_layers[\"color\"].get_bounding_box_size()\n self.dataset.properties._set_bounding_box_of_layer(\n self.name, tuple(offset), tuple(size)\n )\n for _, mag in self.mags.items():\n mag.view.global_offset = offset\n\n def set_bounding_box_size(self, size: Tuple[int, int, int]):\n offset = self.dataset.properties.data_layers[\"color\"].get_bounding_box_offset()\n self.dataset.properties._set_bounding_box_of_layer(\n self.name, tuple(offset), tuple(size)\n )\n for _, mag in self.mags.items():\n mag.view.size = size\n\n\nclass WKLayer(Layer):\n def add_mag(\n self, mag, block_len=None, file_len=None, block_type=None\n ) -> WKMagDataset:\n if block_len is None:\n block_len = 32\n if file_len is None:\n file_len = DEFAULT_WKW_FILE_LEN\n if block_type is None:\n block_type = wkw.Header.BLOCK_TYPE_RAW\n\n # normalize the name of the mag\n mag = Mag(mag).to_layer_name()\n\n self._assert_mag_does_not_exist_yet(mag)\n self._create_dir_for_mag(mag)\n\n self.mags[mag] = WKMagDataset.create(self, mag, block_len, file_len, block_type)\n self.dataset.properties._add_mag(self.name, mag, block_len * file_len)\n\n return self.mags[mag]\n\n def get_or_add_mag(\n self, mag, block_len=None, file_len=None, block_type=None\n ) -> WKMagDataset:\n # normalize the name of the mag\n mag = Mag(mag).to_layer_name()\n\n if mag in self.mags.keys():\n assert (\n block_len is None or self.mags[mag].header.block_len == block_len\n ), f\"Cannot get_or_add_mag: The mag {mag} already exists, but the block lengths do not match\"\n assert (\n file_len is None or self.mags[mag].header.file_len == file_len\n ), f\"Cannot get_or_add_mag: The mag {mag} already exists, but the file lengths do not match\"\n assert (\n block_type is None or self.mags[mag].header.block_type == block_type\n ), f\"Cannot get_or_add_mag: The mag {mag} already exists, but the block types do not match\"\n return self.get_mag(mag)\n else:\n return self.add_mag(mag, block_len, file_len, block_type)\n\n def setup_mag(self, mag):\n # This method is used to initialize the mag when opening the Dataset. This does not create e.g. the wk_header.\n\n # normalize the name of the mag\n mag = Mag(mag).to_layer_name()\n\n self._assert_mag_does_not_exist_yet(mag)\n\n with wkw.Dataset.open(join(self.dataset.path, self.name, mag)) as wkw_dataset:\n wk_header = wkw_dataset.header\n\n self.mags[mag] = WKMagDataset(\n self, mag, wk_header.block_len, wk_header.file_len, wk_header.block_type\n )\n self.dataset.properties._add_mag(\n self.name, mag, wk_header.block_len * wk_header.file_len\n )\n\n\nclass TiffLayer(Layer):\n def add_mag(self, mag) -> MagDataset:\n # normalize the name of the mag\n mag = Mag(mag).to_layer_name()\n\n self._assert_mag_does_not_exist_yet(mag)\n self._create_dir_for_mag(mag)\n\n self.mags[mag] = self._get_mag_dataset_class().create(\n self, mag, self.dataset.properties.pattern\n )\n self.dataset.properties._add_mag(self.name, mag)\n\n return self.mags[mag]\n\n def get_or_add_mag(self, mag) -> MagDataset:\n # normalize the name of the mag\n mag = Mag(mag).to_layer_name()\n\n if mag in self.mags.keys():\n return self.get_mag(mag)\n else:\n return self.add_mag(mag)\n\n def setup_mag(self, mag):\n # This method is used to initialize the mag when opening the Dataset. This does not create e.g. folders.\n\n # normalize the name of the mag\n mag = Mag(mag).to_layer_name()\n\n self._assert_mag_does_not_exist_yet(mag)\n\n self.mags[mag] = self._get_mag_dataset_class()(\n self, mag, self.dataset.properties.pattern\n )\n self.dataset.properties._add_mag(self.name, mag)\n\n def _get_mag_dataset_class(self):\n return TiffMagDataset\n\n\nclass TiledTiffLayer(TiffLayer):\n def _get_mag_dataset_class(self):\n return TiledTiffMagDataset\n", "path": "wkcuber/api/Layer.py"}]} | 2,072 | 353 |
gh_patches_debug_61488 | rasdani/github-patches | git_diff | Chia-Network__chia-blockchain-13373 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Finalize default amount for CLI when adding a mirror (chia data add_mirror)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `chia/cmds/data.py`
Content:
```
1 import json
2 import logging
3 from pathlib import Path
4 from typing import Any, Coroutine, Dict, List, Optional, TypeVar
5
6 import click
7 from typing_extensions import Protocol
8
9 _T = TypeVar("_T")
10
11
12 class IdentityFunction(Protocol):
13 def __call__(self, __x: _T) -> _T:
14 ...
15
16
17 logger = logging.getLogger(__name__)
18
19
20 # TODO: this is more general and should be part of refactoring the overall CLI code duplication
21 def run(coro: Coroutine[Any, Any, Optional[Dict[str, Any]]]) -> None:
22 import asyncio
23
24 response = asyncio.run(coro)
25
26 success = response is not None and response.get("success", False)
27 logger.info(f"data layer cli call response:{success}")
28 # todo make sure all cli methods follow this pattern, uncomment
29 # if not success:
30 # raise click.ClickException(message=f"query unsuccessful, response: {response}")
31
32
33 @click.group("data", short_help="Manage your data")
34 def data_cmd() -> None:
35 pass
36
37
38 # TODO: maybe use more helpful `type=`s to get click to handle error reporting of
39 # malformed inputs.
40
41
42 def create_changelist_option() -> IdentityFunction:
43 return click.option(
44 "-d",
45 "--changelist",
46 "changelist_string",
47 help="str representing the changelist",
48 type=str,
49 required=True,
50 )
51
52
53 def create_key_option() -> IdentityFunction:
54 return click.option(
55 "-h",
56 "--key",
57 "key_string",
58 help="str representing the key",
59 type=str,
60 required=True,
61 )
62
63
64 def create_data_store_id_option() -> "IdentityFunction":
65 return click.option(
66 "-store",
67 "--id",
68 help="The hexadecimal store id.",
69 type=str,
70 required=True,
71 )
72
73
74 def create_data_store_name_option() -> "IdentityFunction":
75 return click.option(
76 "-n",
77 "--table_name",
78 "table_name",
79 help="The name of the table.",
80 type=str,
81 required=True,
82 )
83
84
85 def create_rpc_port_option() -> "IdentityFunction":
86 return click.option(
87 "-dp",
88 "--data-rpc-port",
89 help="Set the port where the data layer is hosting the RPC interface. See rpc_port under wallet in config.yaml",
90 type=int,
91 default=None,
92 show_default=True,
93 )
94
95
96 def create_fee_option() -> "IdentityFunction":
97 return click.option(
98 "-m",
99 "--fee",
100 help="Set the fees for the transaction, in XCH",
101 type=str,
102 default=None,
103 show_default=True,
104 required=False,
105 )
106
107
108 @data_cmd.command("create_data_store", short_help="Create a new data store")
109 @create_rpc_port_option()
110 @create_fee_option()
111 def create_data_store(
112 data_rpc_port: int,
113 fee: Optional[str],
114 ) -> None:
115 from chia.cmds.data_funcs import create_data_store_cmd
116
117 run(create_data_store_cmd(data_rpc_port, fee))
118
119
120 @data_cmd.command("get_value", short_help="Get the value for a given key and store")
121 @create_data_store_id_option()
122 @create_key_option()
123 @create_rpc_port_option()
124 def get_value(
125 id: str,
126 key_string: str,
127 data_rpc_port: int,
128 ) -> None:
129 from chia.cmds.data_funcs import get_value_cmd
130
131 run(get_value_cmd(data_rpc_port, id, key_string))
132
133
134 @data_cmd.command("update_data_store", short_help="Update a store by providing the changelist operations")
135 @create_data_store_id_option()
136 @create_changelist_option()
137 @create_rpc_port_option()
138 @create_fee_option()
139 def update_data_store(
140 id: str,
141 changelist_string: str,
142 data_rpc_port: int,
143 fee: str,
144 ) -> None:
145 from chia.cmds.data_funcs import update_data_store_cmd
146
147 run(update_data_store_cmd(rpc_port=data_rpc_port, store_id=id, changelist=json.loads(changelist_string), fee=fee))
148
149
150 @data_cmd.command("get_keys", short_help="Get all keys for a given store")
151 @create_data_store_id_option()
152 @create_rpc_port_option()
153 def get_keys(
154 id: str,
155 data_rpc_port: int,
156 ) -> None:
157 from chia.cmds.data_funcs import get_keys_cmd
158
159 run(get_keys_cmd(data_rpc_port, id))
160
161
162 @data_cmd.command("get_keys_values", short_help="Get all keys and values for a given store")
163 @create_data_store_id_option()
164 @create_rpc_port_option()
165 def get_keys_values(
166 id: str,
167 data_rpc_port: int,
168 ) -> None:
169 from chia.cmds.data_funcs import get_keys_values_cmd
170
171 run(get_keys_values_cmd(data_rpc_port, id))
172
173
174 @data_cmd.command("get_root", short_help="Get the published root hash value for a given store")
175 @create_data_store_id_option()
176 @create_rpc_port_option()
177 def get_root(
178 id: str,
179 data_rpc_port: int,
180 ) -> None:
181 from chia.cmds.data_funcs import get_root_cmd
182
183 run(get_root_cmd(rpc_port=data_rpc_port, store_id=id))
184
185
186 @data_cmd.command("subscribe", short_help="Subscribe to a store")
187 @create_data_store_id_option()
188 @click.option(
189 "-u",
190 "--url",
191 "urls",
192 help="Manually provide a list of servers urls for downloading the data",
193 type=str,
194 multiple=True,
195 )
196 @create_rpc_port_option()
197 def subscribe(
198 id: str,
199 urls: List[str],
200 data_rpc_port: int,
201 ) -> None:
202 from chia.cmds.data_funcs import subscribe_cmd
203
204 run(subscribe_cmd(rpc_port=data_rpc_port, store_id=id, urls=urls))
205
206
207 @data_cmd.command("remove_subscription", short_help="Remove server urls that are added via subscribing to urls")
208 @create_data_store_id_option()
209 @click.option("-u", "--url", "urls", help="Server urls to remove", type=str, multiple=True)
210 @create_rpc_port_option()
211 def remove_subscription(
212 id: str,
213 urls: List[str],
214 data_rpc_port: int,
215 ) -> None:
216 from chia.cmds.data_funcs import remove_subscriptions_cmd
217
218 run(remove_subscriptions_cmd(rpc_port=data_rpc_port, store_id=id, urls=urls))
219
220
221 @data_cmd.command("unsubscribe", short_help="Completely untrack a store")
222 @create_data_store_id_option()
223 @create_rpc_port_option()
224 def unsubscribe(
225 id: str,
226 data_rpc_port: int,
227 ) -> None:
228 from chia.cmds.data_funcs import unsubscribe_cmd
229
230 run(unsubscribe_cmd(rpc_port=data_rpc_port, store_id=id))
231
232
233 @data_cmd.command(
234 "get_kv_diff", short_help="Get the inserted and deleted keys and values between an initial and a final hash"
235 )
236 @create_data_store_id_option()
237 @click.option("-hash_1", "--hash_1", help="Initial hash", type=str)
238 @click.option("-hash_2", "--hash_2", help="Final hash", type=str)
239 @create_rpc_port_option()
240 def get_kv_diff(
241 id: str,
242 hash_1: str,
243 hash_2: str,
244 data_rpc_port: int,
245 ) -> None:
246 from chia.cmds.data_funcs import get_kv_diff_cmd
247
248 run(get_kv_diff_cmd(rpc_port=data_rpc_port, store_id=id, hash_1=hash_1, hash_2=hash_2))
249
250
251 @data_cmd.command("get_root_history", short_help="Get all changes of a singleton")
252 @create_data_store_id_option()
253 @create_rpc_port_option()
254 def get_root_history(
255 id: str,
256 data_rpc_port: int,
257 ) -> None:
258 from chia.cmds.data_funcs import get_root_history_cmd
259
260 run(get_root_history_cmd(rpc_port=data_rpc_port, store_id=id))
261
262
263 @data_cmd.command("add_missing_files", short_help="Manually reconstruct server files from the data layer database")
264 @click.option(
265 "-i",
266 "--ids",
267 help="List of stores to reconstruct. If not specified, all stores will be reconstructed",
268 type=str,
269 required=False,
270 )
271 @click.option(
272 "-o/-n",
273 "--overwrite/--no-overwrite",
274 help="Specify if already existing files need to be overwritten by this command",
275 )
276 @click.option(
277 "-f", "--foldername", type=str, help="If specified, use a non-default folder to write the files", required=False
278 )
279 @create_rpc_port_option()
280 def add_missing_files(ids: Optional[str], overwrite: bool, foldername: Optional[str], data_rpc_port: int) -> None:
281 from chia.cmds.data_funcs import add_missing_files_cmd
282
283 run(
284 add_missing_files_cmd(
285 rpc_port=data_rpc_port,
286 ids=None if ids is None else json.loads(ids),
287 overwrite=overwrite,
288 foldername=None if foldername is None else Path(foldername),
289 )
290 )
291
292
293 @data_cmd.command("add_mirror", short_help="Publish mirror urls on chain")
294 @click.option("-i", "--id", help="Store id", type=str, required=True)
295 @click.option("-a", "--amount", help="Amount to spend for this mirror, in mojos", type=int, required=True)
296 @click.option(
297 "-u",
298 "--url",
299 "urls",
300 help="URL to publish on the new coin, multiple accepted and will be published to a single coin.",
301 type=str,
302 multiple=True,
303 )
304 @create_fee_option()
305 @create_rpc_port_option()
306 def add_mirror(id: str, amount: int, urls: List[str], fee: Optional[str], data_rpc_port: int) -> None:
307 from chia.cmds.data_funcs import add_mirror_cmd
308
309 run(
310 add_mirror_cmd(
311 rpc_port=data_rpc_port,
312 store_id=id,
313 urls=urls,
314 amount=amount,
315 fee=fee,
316 )
317 )
318
319
320 @data_cmd.command("delete_mirror", short_help="Delete an owned mirror by its coin id")
321 @click.option("-c", "--coin_id", help="Coin id", type=str, required=True)
322 @create_fee_option()
323 @create_rpc_port_option()
324 def delete_mirror(coin_id: str, fee: Optional[str], data_rpc_port: int) -> None:
325 from chia.cmds.data_funcs import delete_mirror_cmd
326
327 run(
328 delete_mirror_cmd(
329 rpc_port=data_rpc_port,
330 coin_id=coin_id,
331 fee=fee,
332 )
333 )
334
335
336 @data_cmd.command("get_mirrors", short_help="Get a list of all mirrors for a given store")
337 @click.option("-i", "--id", help="Store id", type=str, required=True)
338 @create_rpc_port_option()
339 def get_mirrors(id: str, data_rpc_port: int) -> None:
340 from chia.cmds.data_funcs import get_mirrors_cmd
341
342 run(
343 get_mirrors_cmd(
344 rpc_port=data_rpc_port,
345 store_id=id,
346 )
347 )
348
349
350 @data_cmd.command("get_subscriptions", short_help="Get subscribed stores, including the owned stores")
351 @create_rpc_port_option()
352 def get_subscriptions(data_rpc_port: int) -> None:
353 from chia.cmds.data_funcs import get_subscriptions_cmd
354
355 run(
356 get_subscriptions_cmd(
357 rpc_port=data_rpc_port,
358 )
359 )
360
361
362 @data_cmd.command("get_owned_stores", short_help="Get owned stores")
363 @create_rpc_port_option()
364 def get_owned_stores(data_rpc_port: int) -> None:
365 from chia.cmds.data_funcs import get_owned_stores_cmd
366
367 run(
368 get_owned_stores_cmd(
369 rpc_port=data_rpc_port,
370 )
371 )
372
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/chia/cmds/data.py b/chia/cmds/data.py
--- a/chia/cmds/data.py
+++ b/chia/cmds/data.py
@@ -292,7 +292,9 @@
@data_cmd.command("add_mirror", short_help="Publish mirror urls on chain")
@click.option("-i", "--id", help="Store id", type=str, required=True)
[email protected]("-a", "--amount", help="Amount to spend for this mirror, in mojos", type=int, required=True)
[email protected](
+ "-a", "--amount", help="Amount to spend for this mirror, in mojos", type=int, default=0, show_default=True
+)
@click.option(
"-u",
"--url",
| {"golden_diff": "diff --git a/chia/cmds/data.py b/chia/cmds/data.py\n--- a/chia/cmds/data.py\n+++ b/chia/cmds/data.py\n@@ -292,7 +292,9 @@\n \n @data_cmd.command(\"add_mirror\", short_help=\"Publish mirror urls on chain\")\n @click.option(\"-i\", \"--id\", help=\"Store id\", type=str, required=True)\[email protected](\"-a\", \"--amount\", help=\"Amount to spend for this mirror, in mojos\", type=int, required=True)\[email protected](\n+ \"-a\", \"--amount\", help=\"Amount to spend for this mirror, in mojos\", type=int, default=0, show_default=True\n+)\n @click.option(\n \"-u\",\n \"--url\",\n", "issue": "Finalize default amount for CLI when adding a mirror (chia data add_mirror)\n\n", "before_files": [{"content": "import json\nimport logging\nfrom pathlib import Path\nfrom typing import Any, Coroutine, Dict, List, Optional, TypeVar\n\nimport click\nfrom typing_extensions import Protocol\n\n_T = TypeVar(\"_T\")\n\n\nclass IdentityFunction(Protocol):\n def __call__(self, __x: _T) -> _T:\n ...\n\n\nlogger = logging.getLogger(__name__)\n\n\n# TODO: this is more general and should be part of refactoring the overall CLI code duplication\ndef run(coro: Coroutine[Any, Any, Optional[Dict[str, Any]]]) -> None:\n import asyncio\n\n response = asyncio.run(coro)\n\n success = response is not None and response.get(\"success\", False)\n logger.info(f\"data layer cli call response:{success}\")\n # todo make sure all cli methods follow this pattern, uncomment\n # if not success:\n # raise click.ClickException(message=f\"query unsuccessful, response: {response}\")\n\n\[email protected](\"data\", short_help=\"Manage your data\")\ndef data_cmd() -> None:\n pass\n\n\n# TODO: maybe use more helpful `type=`s to get click to handle error reporting of\n# malformed inputs.\n\n\ndef create_changelist_option() -> IdentityFunction:\n return click.option(\n \"-d\",\n \"--changelist\",\n \"changelist_string\",\n help=\"str representing the changelist\",\n type=str,\n required=True,\n )\n\n\ndef create_key_option() -> IdentityFunction:\n return click.option(\n \"-h\",\n \"--key\",\n \"key_string\",\n help=\"str representing the key\",\n type=str,\n required=True,\n )\n\n\ndef create_data_store_id_option() -> \"IdentityFunction\":\n return click.option(\n \"-store\",\n \"--id\",\n help=\"The hexadecimal store id.\",\n type=str,\n required=True,\n )\n\n\ndef create_data_store_name_option() -> \"IdentityFunction\":\n return click.option(\n \"-n\",\n \"--table_name\",\n \"table_name\",\n help=\"The name of the table.\",\n type=str,\n required=True,\n )\n\n\ndef create_rpc_port_option() -> \"IdentityFunction\":\n return click.option(\n \"-dp\",\n \"--data-rpc-port\",\n help=\"Set the port where the data layer is hosting the RPC interface. See rpc_port under wallet in config.yaml\",\n type=int,\n default=None,\n show_default=True,\n )\n\n\ndef create_fee_option() -> \"IdentityFunction\":\n return click.option(\n \"-m\",\n \"--fee\",\n help=\"Set the fees for the transaction, in XCH\",\n type=str,\n default=None,\n show_default=True,\n required=False,\n )\n\n\n@data_cmd.command(\"create_data_store\", short_help=\"Create a new data store\")\n@create_rpc_port_option()\n@create_fee_option()\ndef create_data_store(\n data_rpc_port: int,\n fee: Optional[str],\n) -> None:\n from chia.cmds.data_funcs import create_data_store_cmd\n\n run(create_data_store_cmd(data_rpc_port, fee))\n\n\n@data_cmd.command(\"get_value\", short_help=\"Get the value for a given key and store\")\n@create_data_store_id_option()\n@create_key_option()\n@create_rpc_port_option()\ndef get_value(\n id: str,\n key_string: str,\n data_rpc_port: int,\n) -> None:\n from chia.cmds.data_funcs import get_value_cmd\n\n run(get_value_cmd(data_rpc_port, id, key_string))\n\n\n@data_cmd.command(\"update_data_store\", short_help=\"Update a store by providing the changelist operations\")\n@create_data_store_id_option()\n@create_changelist_option()\n@create_rpc_port_option()\n@create_fee_option()\ndef update_data_store(\n id: str,\n changelist_string: str,\n data_rpc_port: int,\n fee: str,\n) -> None:\n from chia.cmds.data_funcs import update_data_store_cmd\n\n run(update_data_store_cmd(rpc_port=data_rpc_port, store_id=id, changelist=json.loads(changelist_string), fee=fee))\n\n\n@data_cmd.command(\"get_keys\", short_help=\"Get all keys for a given store\")\n@create_data_store_id_option()\n@create_rpc_port_option()\ndef get_keys(\n id: str,\n data_rpc_port: int,\n) -> None:\n from chia.cmds.data_funcs import get_keys_cmd\n\n run(get_keys_cmd(data_rpc_port, id))\n\n\n@data_cmd.command(\"get_keys_values\", short_help=\"Get all keys and values for a given store\")\n@create_data_store_id_option()\n@create_rpc_port_option()\ndef get_keys_values(\n id: str,\n data_rpc_port: int,\n) -> None:\n from chia.cmds.data_funcs import get_keys_values_cmd\n\n run(get_keys_values_cmd(data_rpc_port, id))\n\n\n@data_cmd.command(\"get_root\", short_help=\"Get the published root hash value for a given store\")\n@create_data_store_id_option()\n@create_rpc_port_option()\ndef get_root(\n id: str,\n data_rpc_port: int,\n) -> None:\n from chia.cmds.data_funcs import get_root_cmd\n\n run(get_root_cmd(rpc_port=data_rpc_port, store_id=id))\n\n\n@data_cmd.command(\"subscribe\", short_help=\"Subscribe to a store\")\n@create_data_store_id_option()\[email protected](\n \"-u\",\n \"--url\",\n \"urls\",\n help=\"Manually provide a list of servers urls for downloading the data\",\n type=str,\n multiple=True,\n)\n@create_rpc_port_option()\ndef subscribe(\n id: str,\n urls: List[str],\n data_rpc_port: int,\n) -> None:\n from chia.cmds.data_funcs import subscribe_cmd\n\n run(subscribe_cmd(rpc_port=data_rpc_port, store_id=id, urls=urls))\n\n\n@data_cmd.command(\"remove_subscription\", short_help=\"Remove server urls that are added via subscribing to urls\")\n@create_data_store_id_option()\[email protected](\"-u\", \"--url\", \"urls\", help=\"Server urls to remove\", type=str, multiple=True)\n@create_rpc_port_option()\ndef remove_subscription(\n id: str,\n urls: List[str],\n data_rpc_port: int,\n) -> None:\n from chia.cmds.data_funcs import remove_subscriptions_cmd\n\n run(remove_subscriptions_cmd(rpc_port=data_rpc_port, store_id=id, urls=urls))\n\n\n@data_cmd.command(\"unsubscribe\", short_help=\"Completely untrack a store\")\n@create_data_store_id_option()\n@create_rpc_port_option()\ndef unsubscribe(\n id: str,\n data_rpc_port: int,\n) -> None:\n from chia.cmds.data_funcs import unsubscribe_cmd\n\n run(unsubscribe_cmd(rpc_port=data_rpc_port, store_id=id))\n\n\n@data_cmd.command(\n \"get_kv_diff\", short_help=\"Get the inserted and deleted keys and values between an initial and a final hash\"\n)\n@create_data_store_id_option()\[email protected](\"-hash_1\", \"--hash_1\", help=\"Initial hash\", type=str)\[email protected](\"-hash_2\", \"--hash_2\", help=\"Final hash\", type=str)\n@create_rpc_port_option()\ndef get_kv_diff(\n id: str,\n hash_1: str,\n hash_2: str,\n data_rpc_port: int,\n) -> None:\n from chia.cmds.data_funcs import get_kv_diff_cmd\n\n run(get_kv_diff_cmd(rpc_port=data_rpc_port, store_id=id, hash_1=hash_1, hash_2=hash_2))\n\n\n@data_cmd.command(\"get_root_history\", short_help=\"Get all changes of a singleton\")\n@create_data_store_id_option()\n@create_rpc_port_option()\ndef get_root_history(\n id: str,\n data_rpc_port: int,\n) -> None:\n from chia.cmds.data_funcs import get_root_history_cmd\n\n run(get_root_history_cmd(rpc_port=data_rpc_port, store_id=id))\n\n\n@data_cmd.command(\"add_missing_files\", short_help=\"Manually reconstruct server files from the data layer database\")\[email protected](\n \"-i\",\n \"--ids\",\n help=\"List of stores to reconstruct. If not specified, all stores will be reconstructed\",\n type=str,\n required=False,\n)\[email protected](\n \"-o/-n\",\n \"--overwrite/--no-overwrite\",\n help=\"Specify if already existing files need to be overwritten by this command\",\n)\[email protected](\n \"-f\", \"--foldername\", type=str, help=\"If specified, use a non-default folder to write the files\", required=False\n)\n@create_rpc_port_option()\ndef add_missing_files(ids: Optional[str], overwrite: bool, foldername: Optional[str], data_rpc_port: int) -> None:\n from chia.cmds.data_funcs import add_missing_files_cmd\n\n run(\n add_missing_files_cmd(\n rpc_port=data_rpc_port,\n ids=None if ids is None else json.loads(ids),\n overwrite=overwrite,\n foldername=None if foldername is None else Path(foldername),\n )\n )\n\n\n@data_cmd.command(\"add_mirror\", short_help=\"Publish mirror urls on chain\")\[email protected](\"-i\", \"--id\", help=\"Store id\", type=str, required=True)\[email protected](\"-a\", \"--amount\", help=\"Amount to spend for this mirror, in mojos\", type=int, required=True)\[email protected](\n \"-u\",\n \"--url\",\n \"urls\",\n help=\"URL to publish on the new coin, multiple accepted and will be published to a single coin.\",\n type=str,\n multiple=True,\n)\n@create_fee_option()\n@create_rpc_port_option()\ndef add_mirror(id: str, amount: int, urls: List[str], fee: Optional[str], data_rpc_port: int) -> None:\n from chia.cmds.data_funcs import add_mirror_cmd\n\n run(\n add_mirror_cmd(\n rpc_port=data_rpc_port,\n store_id=id,\n urls=urls,\n amount=amount,\n fee=fee,\n )\n )\n\n\n@data_cmd.command(\"delete_mirror\", short_help=\"Delete an owned mirror by its coin id\")\[email protected](\"-c\", \"--coin_id\", help=\"Coin id\", type=str, required=True)\n@create_fee_option()\n@create_rpc_port_option()\ndef delete_mirror(coin_id: str, fee: Optional[str], data_rpc_port: int) -> None:\n from chia.cmds.data_funcs import delete_mirror_cmd\n\n run(\n delete_mirror_cmd(\n rpc_port=data_rpc_port,\n coin_id=coin_id,\n fee=fee,\n )\n )\n\n\n@data_cmd.command(\"get_mirrors\", short_help=\"Get a list of all mirrors for a given store\")\[email protected](\"-i\", \"--id\", help=\"Store id\", type=str, required=True)\n@create_rpc_port_option()\ndef get_mirrors(id: str, data_rpc_port: int) -> None:\n from chia.cmds.data_funcs import get_mirrors_cmd\n\n run(\n get_mirrors_cmd(\n rpc_port=data_rpc_port,\n store_id=id,\n )\n )\n\n\n@data_cmd.command(\"get_subscriptions\", short_help=\"Get subscribed stores, including the owned stores\")\n@create_rpc_port_option()\ndef get_subscriptions(data_rpc_port: int) -> None:\n from chia.cmds.data_funcs import get_subscriptions_cmd\n\n run(\n get_subscriptions_cmd(\n rpc_port=data_rpc_port,\n )\n )\n\n\n@data_cmd.command(\"get_owned_stores\", short_help=\"Get owned stores\")\n@create_rpc_port_option()\ndef get_owned_stores(data_rpc_port: int) -> None:\n from chia.cmds.data_funcs import get_owned_stores_cmd\n\n run(\n get_owned_stores_cmd(\n rpc_port=data_rpc_port,\n )\n )\n", "path": "chia/cmds/data.py"}], "after_files": [{"content": "import json\nimport logging\nfrom pathlib import Path\nfrom typing import Any, Coroutine, Dict, List, Optional, TypeVar\n\nimport click\nfrom typing_extensions import Protocol\n\n_T = TypeVar(\"_T\")\n\n\nclass IdentityFunction(Protocol):\n def __call__(self, __x: _T) -> _T:\n ...\n\n\nlogger = logging.getLogger(__name__)\n\n\n# TODO: this is more general and should be part of refactoring the overall CLI code duplication\ndef run(coro: Coroutine[Any, Any, Optional[Dict[str, Any]]]) -> None:\n import asyncio\n\n response = asyncio.run(coro)\n\n success = response is not None and response.get(\"success\", False)\n logger.info(f\"data layer cli call response:{success}\")\n # todo make sure all cli methods follow this pattern, uncomment\n # if not success:\n # raise click.ClickException(message=f\"query unsuccessful, response: {response}\")\n\n\[email protected](\"data\", short_help=\"Manage your data\")\ndef data_cmd() -> None:\n pass\n\n\n# TODO: maybe use more helpful `type=`s to get click to handle error reporting of\n# malformed inputs.\n\n\ndef create_changelist_option() -> IdentityFunction:\n return click.option(\n \"-d\",\n \"--changelist\",\n \"changelist_string\",\n help=\"str representing the changelist\",\n type=str,\n required=True,\n )\n\n\ndef create_key_option() -> IdentityFunction:\n return click.option(\n \"-h\",\n \"--key\",\n \"key_string\",\n help=\"str representing the key\",\n type=str,\n required=True,\n )\n\n\ndef create_data_store_id_option() -> \"IdentityFunction\":\n return click.option(\n \"-store\",\n \"--id\",\n help=\"The hexadecimal store id.\",\n type=str,\n required=True,\n )\n\n\ndef create_data_store_name_option() -> \"IdentityFunction\":\n return click.option(\n \"-n\",\n \"--table_name\",\n \"table_name\",\n help=\"The name of the table.\",\n type=str,\n required=True,\n )\n\n\ndef create_rpc_port_option() -> \"IdentityFunction\":\n return click.option(\n \"-dp\",\n \"--data-rpc-port\",\n help=\"Set the port where the data layer is hosting the RPC interface. See rpc_port under wallet in config.yaml\",\n type=int,\n default=None,\n show_default=True,\n )\n\n\ndef create_fee_option() -> \"IdentityFunction\":\n return click.option(\n \"-m\",\n \"--fee\",\n help=\"Set the fees for the transaction, in XCH\",\n type=str,\n default=None,\n show_default=True,\n required=False,\n )\n\n\n@data_cmd.command(\"create_data_store\", short_help=\"Create a new data store\")\n@create_rpc_port_option()\n@create_fee_option()\ndef create_data_store(\n data_rpc_port: int,\n fee: Optional[str],\n) -> None:\n from chia.cmds.data_funcs import create_data_store_cmd\n\n run(create_data_store_cmd(data_rpc_port, fee))\n\n\n@data_cmd.command(\"get_value\", short_help=\"Get the value for a given key and store\")\n@create_data_store_id_option()\n@create_key_option()\n@create_rpc_port_option()\ndef get_value(\n id: str,\n key_string: str,\n data_rpc_port: int,\n) -> None:\n from chia.cmds.data_funcs import get_value_cmd\n\n run(get_value_cmd(data_rpc_port, id, key_string))\n\n\n@data_cmd.command(\"update_data_store\", short_help=\"Update a store by providing the changelist operations\")\n@create_data_store_id_option()\n@create_changelist_option()\n@create_rpc_port_option()\n@create_fee_option()\ndef update_data_store(\n id: str,\n changelist_string: str,\n data_rpc_port: int,\n fee: str,\n) -> None:\n from chia.cmds.data_funcs import update_data_store_cmd\n\n run(update_data_store_cmd(rpc_port=data_rpc_port, store_id=id, changelist=json.loads(changelist_string), fee=fee))\n\n\n@data_cmd.command(\"get_keys\", short_help=\"Get all keys for a given store\")\n@create_data_store_id_option()\n@create_rpc_port_option()\ndef get_keys(\n id: str,\n data_rpc_port: int,\n) -> None:\n from chia.cmds.data_funcs import get_keys_cmd\n\n run(get_keys_cmd(data_rpc_port, id))\n\n\n@data_cmd.command(\"get_keys_values\", short_help=\"Get all keys and values for a given store\")\n@create_data_store_id_option()\n@create_rpc_port_option()\ndef get_keys_values(\n id: str,\n data_rpc_port: int,\n) -> None:\n from chia.cmds.data_funcs import get_keys_values_cmd\n\n run(get_keys_values_cmd(data_rpc_port, id))\n\n\n@data_cmd.command(\"get_root\", short_help=\"Get the published root hash value for a given store\")\n@create_data_store_id_option()\n@create_rpc_port_option()\ndef get_root(\n id: str,\n data_rpc_port: int,\n) -> None:\n from chia.cmds.data_funcs import get_root_cmd\n\n run(get_root_cmd(rpc_port=data_rpc_port, store_id=id))\n\n\n@data_cmd.command(\"subscribe\", short_help=\"Subscribe to a store\")\n@create_data_store_id_option()\[email protected](\n \"-u\",\n \"--url\",\n \"urls\",\n help=\"Manually provide a list of servers urls for downloading the data\",\n type=str,\n multiple=True,\n)\n@create_rpc_port_option()\ndef subscribe(\n id: str,\n urls: List[str],\n data_rpc_port: int,\n) -> None:\n from chia.cmds.data_funcs import subscribe_cmd\n\n run(subscribe_cmd(rpc_port=data_rpc_port, store_id=id, urls=urls))\n\n\n@data_cmd.command(\"remove_subscription\", short_help=\"Remove server urls that are added via subscribing to urls\")\n@create_data_store_id_option()\[email protected](\"-u\", \"--url\", \"urls\", help=\"Server urls to remove\", type=str, multiple=True)\n@create_rpc_port_option()\ndef remove_subscription(\n id: str,\n urls: List[str],\n data_rpc_port: int,\n) -> None:\n from chia.cmds.data_funcs import remove_subscriptions_cmd\n\n run(remove_subscriptions_cmd(rpc_port=data_rpc_port, store_id=id, urls=urls))\n\n\n@data_cmd.command(\"unsubscribe\", short_help=\"Completely untrack a store\")\n@create_data_store_id_option()\n@create_rpc_port_option()\ndef unsubscribe(\n id: str,\n data_rpc_port: int,\n) -> None:\n from chia.cmds.data_funcs import unsubscribe_cmd\n\n run(unsubscribe_cmd(rpc_port=data_rpc_port, store_id=id))\n\n\n@data_cmd.command(\n \"get_kv_diff\", short_help=\"Get the inserted and deleted keys and values between an initial and a final hash\"\n)\n@create_data_store_id_option()\[email protected](\"-hash_1\", \"--hash_1\", help=\"Initial hash\", type=str)\[email protected](\"-hash_2\", \"--hash_2\", help=\"Final hash\", type=str)\n@create_rpc_port_option()\ndef get_kv_diff(\n id: str,\n hash_1: str,\n hash_2: str,\n data_rpc_port: int,\n) -> None:\n from chia.cmds.data_funcs import get_kv_diff_cmd\n\n run(get_kv_diff_cmd(rpc_port=data_rpc_port, store_id=id, hash_1=hash_1, hash_2=hash_2))\n\n\n@data_cmd.command(\"get_root_history\", short_help=\"Get all changes of a singleton\")\n@create_data_store_id_option()\n@create_rpc_port_option()\ndef get_root_history(\n id: str,\n data_rpc_port: int,\n) -> None:\n from chia.cmds.data_funcs import get_root_history_cmd\n\n run(get_root_history_cmd(rpc_port=data_rpc_port, store_id=id))\n\n\n@data_cmd.command(\"add_missing_files\", short_help=\"Manually reconstruct server files from the data layer database\")\[email protected](\n \"-i\",\n \"--ids\",\n help=\"List of stores to reconstruct. If not specified, all stores will be reconstructed\",\n type=str,\n required=False,\n)\[email protected](\n \"-o/-n\",\n \"--overwrite/--no-overwrite\",\n help=\"Specify if already existing files need to be overwritten by this command\",\n)\[email protected](\n \"-f\", \"--foldername\", type=str, help=\"If specified, use a non-default folder to write the files\", required=False\n)\n@create_rpc_port_option()\ndef add_missing_files(ids: Optional[str], overwrite: bool, foldername: Optional[str], data_rpc_port: int) -> None:\n from chia.cmds.data_funcs import add_missing_files_cmd\n\n run(\n add_missing_files_cmd(\n rpc_port=data_rpc_port,\n ids=None if ids is None else json.loads(ids),\n overwrite=overwrite,\n foldername=None if foldername is None else Path(foldername),\n )\n )\n\n\n@data_cmd.command(\"add_mirror\", short_help=\"Publish mirror urls on chain\")\[email protected](\"-i\", \"--id\", help=\"Store id\", type=str, required=True)\[email protected](\n \"-a\", \"--amount\", help=\"Amount to spend for this mirror, in mojos\", type=int, default=0, show_default=True\n)\[email protected](\n \"-u\",\n \"--url\",\n \"urls\",\n help=\"URL to publish on the new coin, multiple accepted and will be published to a single coin.\",\n type=str,\n multiple=True,\n)\n@create_fee_option()\n@create_rpc_port_option()\ndef add_mirror(id: str, amount: int, urls: List[str], fee: Optional[str], data_rpc_port: int) -> None:\n from chia.cmds.data_funcs import add_mirror_cmd\n\n run(\n add_mirror_cmd(\n rpc_port=data_rpc_port,\n store_id=id,\n urls=urls,\n amount=amount,\n fee=fee,\n )\n )\n\n\n@data_cmd.command(\"delete_mirror\", short_help=\"Delete an owned mirror by its coin id\")\[email protected](\"-c\", \"--coin_id\", help=\"Coin id\", type=str, required=True)\n@create_fee_option()\n@create_rpc_port_option()\ndef delete_mirror(coin_id: str, fee: Optional[str], data_rpc_port: int) -> None:\n from chia.cmds.data_funcs import delete_mirror_cmd\n\n run(\n delete_mirror_cmd(\n rpc_port=data_rpc_port,\n coin_id=coin_id,\n fee=fee,\n )\n )\n\n\n@data_cmd.command(\"get_mirrors\", short_help=\"Get a list of all mirrors for a given store\")\[email protected](\"-i\", \"--id\", help=\"Store id\", type=str, required=True)\n@create_rpc_port_option()\ndef get_mirrors(id: str, data_rpc_port: int) -> None:\n from chia.cmds.data_funcs import get_mirrors_cmd\n\n run(\n get_mirrors_cmd(\n rpc_port=data_rpc_port,\n store_id=id,\n )\n )\n\n\n@data_cmd.command(\"get_subscriptions\", short_help=\"Get subscribed stores, including the owned stores\")\n@create_rpc_port_option()\ndef get_subscriptions(data_rpc_port: int) -> None:\n from chia.cmds.data_funcs import get_subscriptions_cmd\n\n run(\n get_subscriptions_cmd(\n rpc_port=data_rpc_port,\n )\n )\n\n\n@data_cmd.command(\"get_owned_stores\", short_help=\"Get owned stores\")\n@create_rpc_port_option()\ndef get_owned_stores(data_rpc_port: int) -> None:\n from chia.cmds.data_funcs import get_owned_stores_cmd\n\n run(\n get_owned_stores_cmd(\n rpc_port=data_rpc_port,\n )\n )\n", "path": "chia/cmds/data.py"}]} | 3,836 | 168 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.