problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.1k
25.4k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 582
39.1k
| num_tokens
int64 271
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_28938 | rasdani/github-patches | git_diff | jupyterhub__jupyterhub-1346 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Exception in /admin on sorting by running servers
**How to reproduce the issue**
On current master, visit /admin and click any sort arrow next to "Running".
**What you expected to happen**
Servers are sorted.
**What actually happens**
An error is produced, `500 : Internal Server Error`.
The hub logs show
```
Traceback (most recent call last):
File "/usr/local/lib/python3.5/dist-packages/tornado/web.py", line 1509, in _execute
result = method(*self.path_args, **self.path_kwargs)
File "/usr/local/lib/python3.5/dist-packages/jupyterhub/utils.py", line 193, in decorated
return method(self, *args, **kwargs)
File "/usr/local/lib/python3.5/dist-packages/jupyterhub/handlers/pages.py", line 179, in get
cols = [ getattr(orm.User, mapping.get(c, c)) for c in sorts ]
File "/usr/local/lib/python3.5/dist-packages/jupyterhub/handlers/pages.py", line 179, in <listcomp>
cols = [ getattr(orm.User, mapping.get(c, c)) for c in sorts ]
AttributeError: type object 'User' has no attribute '_server_id'
```
**Share what version of JupyterHub you are using**
Current master.
Looks like this is related to https://github.com/jupyterhub/jupyterhub/issues/766#issuecomment-267297617.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `jupyterhub/handlers/pages.py`
Content:
```
1 """Basic html-rendering handlers."""
2
3 # Copyright (c) Jupyter Development Team.
4 # Distributed under the terms of the Modified BSD License.
5
6 from http.client import responses
7
8 from jinja2 import TemplateNotFound
9 from tornado import web, gen
10 from tornado.httputil import url_concat
11
12 from .. import orm
13 from ..utils import admin_only, url_path_join
14 from .base import BaseHandler
15
16
17 class RootHandler(BaseHandler):
18 """Render the Hub root page.
19
20 If next argument is passed by single-user server,
21 redirect to base_url + single-user page.
22
23 If logged in, redirects to:
24
25 - single-user server if running
26 - hub home, otherwise
27
28 Otherwise, renders login page.
29 """
30 def get(self):
31 next_url = self.get_argument('next', '')
32 if next_url and not next_url.startswith('/'):
33 self.log.warning("Disallowing redirect outside JupyterHub: %r", next_url)
34 next_url = ''
35 if next_url and next_url.startswith(url_path_join(self.base_url, 'user/')):
36 # add /hub/ prefix, to ensure we redirect to the right user's server.
37 # The next request will be handled by UserSpawnHandler,
38 # ultimately redirecting to the logged-in user's server.
39 without_prefix = next_url[len(self.base_url):]
40 next_url = url_path_join(self.hub.base_url, without_prefix)
41 self.log.warning("Redirecting %s to %s. For sharing public links, use /user-redirect/",
42 self.request.uri, next_url,
43 )
44 self.redirect(next_url)
45 return
46 user = self.get_current_user()
47 if user:
48 if user.running:
49 url = user.url
50 self.log.debug("User is running: %s", url)
51 self.set_login_cookie(user) # set cookie
52 else:
53 url = url_path_join(self.hub.base_url, 'home')
54 self.log.debug("User is not running: %s", url)
55 else:
56 url = self.settings['login_url']
57 self.redirect(url)
58
59
60 class HomeHandler(BaseHandler):
61 """Render the user's home page."""
62
63 @web.authenticated
64 @gen.coroutine
65 def get(self):
66 user = self.get_current_user()
67 if user.running:
68 # trigger poll_and_notify event in case of a server that died
69 yield user.spawner.poll_and_notify()
70 html = self.render_template('home.html',
71 user=user,
72 url=user.url,
73 )
74 self.finish(html)
75
76
77 class SpawnHandler(BaseHandler):
78 """Handle spawning of single-user servers via form.
79
80 GET renders the form, POST handles form submission.
81
82 Only enabled when Spawner.options_form is defined.
83 """
84 def _render_form(self, message=''):
85 user = self.get_current_user()
86 return self.render_template('spawn.html',
87 user=user,
88 spawner_options_form=user.spawner.options_form,
89 error_message=message,
90 url=self.request.uri,
91 )
92
93 @web.authenticated
94 def get(self):
95 """GET renders form for spawning with user-specified options"""
96 user = self.get_current_user()
97 if not self.allow_named_servers and user.running:
98 url = user.url
99 self.log.debug("User is running: %s", url)
100 self.redirect(url)
101 return
102 if user.spawner.options_form:
103 self.finish(self._render_form())
104 else:
105 # not running, no form. Trigger spawn.
106 self.redirect(user.url)
107
108 @web.authenticated
109 @gen.coroutine
110 def post(self):
111 """POST spawns with user-specified options"""
112 user = self.get_current_user()
113 if not self.allow_named_servers and user.running:
114 url = user.url
115 self.log.warning("User is already running: %s", url)
116 self.redirect(url)
117 return
118 form_options = {}
119 for key, byte_list in self.request.body_arguments.items():
120 form_options[key] = [ bs.decode('utf8') for bs in byte_list ]
121 for key, byte_list in self.request.files.items():
122 form_options["%s_file"%key] = byte_list
123 try:
124 options = user.spawner.options_from_form(form_options)
125 yield self.spawn_single_user(user, options=options)
126 except Exception as e:
127 self.log.error("Failed to spawn single-user server with form", exc_info=True)
128 self.finish(self._render_form(str(e)))
129 return
130 self.set_login_cookie(user)
131 url = user.url
132
133 next_url = self.get_argument('next', '')
134 if next_url and not next_url.startswith('/'):
135 self.log.warning("Disallowing redirect outside JupyterHub: %r", next_url)
136 elif next_url:
137 url = next_url
138
139 self.redirect(url)
140
141 class AdminHandler(BaseHandler):
142 """Render the admin page."""
143
144 @admin_only
145 def get(self):
146 available = {'name', 'admin', 'running', 'last_activity'}
147 default_sort = ['admin', 'name']
148 mapping = {
149 'running': '_server_id'
150 }
151 default_order = {
152 'name': 'asc',
153 'last_activity': 'desc',
154 'admin': 'desc',
155 'running': 'desc',
156 }
157 sorts = self.get_arguments('sort') or default_sort
158 orders = self.get_arguments('order')
159
160 for bad in set(sorts).difference(available):
161 self.log.warning("ignoring invalid sort: %r", bad)
162 sorts.remove(bad)
163 for bad in set(orders).difference({'asc', 'desc'}):
164 self.log.warning("ignoring invalid order: %r", bad)
165 orders.remove(bad)
166
167 # add default sort as secondary
168 for s in default_sort:
169 if s not in sorts:
170 sorts.append(s)
171 if len(orders) < len(sorts):
172 for col in sorts[len(orders):]:
173 orders.append(default_order[col])
174 else:
175 orders = orders[:len(sorts)]
176
177 # this could be one incomprehensible nested list comprehension
178 # get User columns
179 cols = [ getattr(orm.User, mapping.get(c, c)) for c in sorts ]
180 # get User.col.desc() order objects
181 ordered = [ getattr(c, o)() for c, o in zip(cols, orders) ]
182
183 users = self.db.query(orm.User).order_by(*ordered)
184 users = [ self._user_from_orm(u) for u in users ]
185 running = [ u for u in users if u.running ]
186
187 html = self.render_template('admin.html',
188 user=self.get_current_user(),
189 admin_access=self.settings.get('admin_access', False),
190 users=users,
191 running=running,
192 sort={s:o for s,o in zip(sorts, orders)},
193 )
194 self.finish(html)
195
196
197 class TokenPageHandler(BaseHandler):
198 """Handler for page requesting new API tokens"""
199
200 @web.authenticated
201 def get(self):
202 html = self.render_template('token.html')
203 self.finish(html)
204
205
206 class ProxyErrorHandler(BaseHandler):
207 """Handler for rendering proxy error pages"""
208
209 def get(self, status_code_s):
210 status_code = int(status_code_s)
211 status_message = responses.get(status_code, 'Unknown HTTP Error')
212 # build template namespace
213
214 hub_home = url_path_join(self.hub.base_url, 'home')
215 message_html = ''
216 if status_code == 503:
217 message_html = ' '.join([
218 "Your server appears to be down.",
219 "Try restarting it <a href='%s'>from the hub</a>" % hub_home
220 ])
221 ns = dict(
222 status_code=status_code,
223 status_message=status_message,
224 message_html=message_html,
225 logo_url=hub_home,
226 )
227
228 self.set_header('Content-Type', 'text/html')
229 # render the template
230 try:
231 html = self.render_template('%s.html' % status_code, **ns)
232 except TemplateNotFound:
233 self.log.debug("No template for %d", status_code)
234 html = self.render_template('error.html', **ns)
235
236 self.write(html)
237
238
239 default_handlers = [
240 (r'/?', RootHandler),
241 (r'/home', HomeHandler),
242 (r'/admin', AdminHandler),
243 (r'/spawn', SpawnHandler),
244 (r'/token', TokenPageHandler),
245 (r'/error/(\d+)', ProxyErrorHandler),
246 ]
247
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/jupyterhub/handlers/pages.py b/jupyterhub/handlers/pages.py
--- a/jupyterhub/handlers/pages.py
+++ b/jupyterhub/handlers/pages.py
@@ -146,14 +146,19 @@
available = {'name', 'admin', 'running', 'last_activity'}
default_sort = ['admin', 'name']
mapping = {
- 'running': '_server_id'
+ 'running': orm.Spawner.server_id,
}
+ for name in available:
+ if name not in mapping:
+ mapping[name] = getattr(orm.User, name)
+
default_order = {
'name': 'asc',
'last_activity': 'desc',
'admin': 'desc',
'running': 'desc',
}
+
sorts = self.get_arguments('sort') or default_sort
orders = self.get_arguments('order')
@@ -176,11 +181,11 @@
# this could be one incomprehensible nested list comprehension
# get User columns
- cols = [ getattr(orm.User, mapping.get(c, c)) for c in sorts ]
+ cols = [ mapping[c] for c in sorts ]
# get User.col.desc() order objects
ordered = [ getattr(c, o)() for c, o in zip(cols, orders) ]
- users = self.db.query(orm.User).order_by(*ordered)
+ users = self.db.query(orm.User).join(orm.Spawner).order_by(*ordered)
users = [ self._user_from_orm(u) for u in users ]
running = [ u for u in users if u.running ]
| {"golden_diff": "diff --git a/jupyterhub/handlers/pages.py b/jupyterhub/handlers/pages.py\n--- a/jupyterhub/handlers/pages.py\n+++ b/jupyterhub/handlers/pages.py\n@@ -146,14 +146,19 @@\n available = {'name', 'admin', 'running', 'last_activity'}\n default_sort = ['admin', 'name']\n mapping = {\n- 'running': '_server_id'\n+ 'running': orm.Spawner.server_id,\n }\n+ for name in available:\n+ if name not in mapping:\n+ mapping[name] = getattr(orm.User, name)\n+\n default_order = {\n 'name': 'asc',\n 'last_activity': 'desc',\n 'admin': 'desc',\n 'running': 'desc',\n }\n+\n sorts = self.get_arguments('sort') or default_sort\n orders = self.get_arguments('order')\n \n@@ -176,11 +181,11 @@\n \n # this could be one incomprehensible nested list comprehension\n # get User columns\n- cols = [ getattr(orm.User, mapping.get(c, c)) for c in sorts ]\n+ cols = [ mapping[c] for c in sorts ]\n # get User.col.desc() order objects\n ordered = [ getattr(c, o)() for c, o in zip(cols, orders) ]\n \n- users = self.db.query(orm.User).order_by(*ordered)\n+ users = self.db.query(orm.User).join(orm.Spawner).order_by(*ordered)\n users = [ self._user_from_orm(u) for u in users ]\n running = [ u for u in users if u.running ]\n", "issue": "Exception in /admin on sorting by running servers\n**How to reproduce the issue**\r\nOn current master, visit /admin and click any sort arrow next to \"Running\".\r\n \r\n**What you expected to happen**\r\nServers are sorted.\r\n\r\n**What actually happens**\r\nAn error is produced, `500 : Internal Server Error`.\r\nThe hub logs show\r\n```\r\n Traceback (most recent call last):\r\n File \"/usr/local/lib/python3.5/dist-packages/tornado/web.py\", line 1509, in _execute\r\n result = method(*self.path_args, **self.path_kwargs)\r\n File \"/usr/local/lib/python3.5/dist-packages/jupyterhub/utils.py\", line 193, in decorated\r\n return method(self, *args, **kwargs)\r\n File \"/usr/local/lib/python3.5/dist-packages/jupyterhub/handlers/pages.py\", line 179, in get\r\n cols = [ getattr(orm.User, mapping.get(c, c)) for c in sorts ]\r\n File \"/usr/local/lib/python3.5/dist-packages/jupyterhub/handlers/pages.py\", line 179, in <listcomp>\r\n cols = [ getattr(orm.User, mapping.get(c, c)) for c in sorts ]\r\n AttributeError: type object 'User' has no attribute '_server_id'\r\n```\r\n**Share what version of JupyterHub you are using**\r\nCurrent master.\r\n\r\nLooks like this is related to https://github.com/jupyterhub/jupyterhub/issues/766#issuecomment-267297617.\r\n\n", "before_files": [{"content": "\"\"\"Basic html-rendering handlers.\"\"\"\n\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nfrom http.client import responses\n\nfrom jinja2 import TemplateNotFound\nfrom tornado import web, gen\nfrom tornado.httputil import url_concat\n\nfrom .. import orm\nfrom ..utils import admin_only, url_path_join\nfrom .base import BaseHandler\n\n\nclass RootHandler(BaseHandler):\n \"\"\"Render the Hub root page.\n\n If next argument is passed by single-user server,\n redirect to base_url + single-user page.\n\n If logged in, redirects to:\n\n - single-user server if running\n - hub home, otherwise\n\n Otherwise, renders login page.\n \"\"\"\n def get(self):\n next_url = self.get_argument('next', '')\n if next_url and not next_url.startswith('/'):\n self.log.warning(\"Disallowing redirect outside JupyterHub: %r\", next_url)\n next_url = ''\n if next_url and next_url.startswith(url_path_join(self.base_url, 'user/')):\n # add /hub/ prefix, to ensure we redirect to the right user's server.\n # The next request will be handled by UserSpawnHandler,\n # ultimately redirecting to the logged-in user's server.\n without_prefix = next_url[len(self.base_url):]\n next_url = url_path_join(self.hub.base_url, without_prefix)\n self.log.warning(\"Redirecting %s to %s. For sharing public links, use /user-redirect/\",\n self.request.uri, next_url,\n )\n self.redirect(next_url)\n return\n user = self.get_current_user()\n if user:\n if user.running:\n url = user.url\n self.log.debug(\"User is running: %s\", url)\n self.set_login_cookie(user) # set cookie\n else:\n url = url_path_join(self.hub.base_url, 'home')\n self.log.debug(\"User is not running: %s\", url)\n else:\n url = self.settings['login_url']\n self.redirect(url)\n\n\nclass HomeHandler(BaseHandler):\n \"\"\"Render the user's home page.\"\"\"\n\n @web.authenticated\n @gen.coroutine\n def get(self):\n user = self.get_current_user()\n if user.running:\n # trigger poll_and_notify event in case of a server that died\n yield user.spawner.poll_and_notify()\n html = self.render_template('home.html',\n user=user,\n url=user.url,\n )\n self.finish(html)\n\n\nclass SpawnHandler(BaseHandler):\n \"\"\"Handle spawning of single-user servers via form.\n\n GET renders the form, POST handles form submission.\n\n Only enabled when Spawner.options_form is defined.\n \"\"\"\n def _render_form(self, message=''):\n user = self.get_current_user()\n return self.render_template('spawn.html',\n user=user,\n spawner_options_form=user.spawner.options_form,\n error_message=message,\n url=self.request.uri,\n )\n\n @web.authenticated\n def get(self):\n \"\"\"GET renders form for spawning with user-specified options\"\"\"\n user = self.get_current_user()\n if not self.allow_named_servers and user.running:\n url = user.url\n self.log.debug(\"User is running: %s\", url)\n self.redirect(url)\n return\n if user.spawner.options_form:\n self.finish(self._render_form())\n else:\n # not running, no form. Trigger spawn.\n self.redirect(user.url)\n\n @web.authenticated\n @gen.coroutine\n def post(self):\n \"\"\"POST spawns with user-specified options\"\"\"\n user = self.get_current_user()\n if not self.allow_named_servers and user.running:\n url = user.url\n self.log.warning(\"User is already running: %s\", url)\n self.redirect(url)\n return\n form_options = {}\n for key, byte_list in self.request.body_arguments.items():\n form_options[key] = [ bs.decode('utf8') for bs in byte_list ]\n for key, byte_list in self.request.files.items():\n form_options[\"%s_file\"%key] = byte_list\n try:\n options = user.spawner.options_from_form(form_options)\n yield self.spawn_single_user(user, options=options)\n except Exception as e:\n self.log.error(\"Failed to spawn single-user server with form\", exc_info=True)\n self.finish(self._render_form(str(e)))\n return\n self.set_login_cookie(user)\n url = user.url\n\n next_url = self.get_argument('next', '')\n if next_url and not next_url.startswith('/'):\n self.log.warning(\"Disallowing redirect outside JupyterHub: %r\", next_url)\n elif next_url:\n url = next_url\n\n self.redirect(url)\n\nclass AdminHandler(BaseHandler):\n \"\"\"Render the admin page.\"\"\"\n\n @admin_only\n def get(self):\n available = {'name', 'admin', 'running', 'last_activity'}\n default_sort = ['admin', 'name']\n mapping = {\n 'running': '_server_id'\n }\n default_order = {\n 'name': 'asc',\n 'last_activity': 'desc',\n 'admin': 'desc',\n 'running': 'desc',\n }\n sorts = self.get_arguments('sort') or default_sort\n orders = self.get_arguments('order')\n\n for bad in set(sorts).difference(available):\n self.log.warning(\"ignoring invalid sort: %r\", bad)\n sorts.remove(bad)\n for bad in set(orders).difference({'asc', 'desc'}):\n self.log.warning(\"ignoring invalid order: %r\", bad)\n orders.remove(bad)\n\n # add default sort as secondary\n for s in default_sort:\n if s not in sorts:\n sorts.append(s)\n if len(orders) < len(sorts):\n for col in sorts[len(orders):]:\n orders.append(default_order[col])\n else:\n orders = orders[:len(sorts)]\n\n # this could be one incomprehensible nested list comprehension\n # get User columns\n cols = [ getattr(orm.User, mapping.get(c, c)) for c in sorts ]\n # get User.col.desc() order objects\n ordered = [ getattr(c, o)() for c, o in zip(cols, orders) ]\n\n users = self.db.query(orm.User).order_by(*ordered)\n users = [ self._user_from_orm(u) for u in users ]\n running = [ u for u in users if u.running ]\n\n html = self.render_template('admin.html',\n user=self.get_current_user(),\n admin_access=self.settings.get('admin_access', False),\n users=users,\n running=running,\n sort={s:o for s,o in zip(sorts, orders)},\n )\n self.finish(html)\n\n\nclass TokenPageHandler(BaseHandler):\n \"\"\"Handler for page requesting new API tokens\"\"\"\n\n @web.authenticated\n def get(self):\n html = self.render_template('token.html')\n self.finish(html)\n\n\nclass ProxyErrorHandler(BaseHandler):\n \"\"\"Handler for rendering proxy error pages\"\"\"\n \n def get(self, status_code_s):\n status_code = int(status_code_s)\n status_message = responses.get(status_code, 'Unknown HTTP Error')\n # build template namespace\n \n hub_home = url_path_join(self.hub.base_url, 'home')\n message_html = ''\n if status_code == 503:\n message_html = ' '.join([\n \"Your server appears to be down.\",\n \"Try restarting it <a href='%s'>from the hub</a>\" % hub_home\n ])\n ns = dict(\n status_code=status_code,\n status_message=status_message,\n message_html=message_html,\n logo_url=hub_home,\n )\n\n self.set_header('Content-Type', 'text/html')\n # render the template\n try:\n html = self.render_template('%s.html' % status_code, **ns)\n except TemplateNotFound:\n self.log.debug(\"No template for %d\", status_code)\n html = self.render_template('error.html', **ns)\n\n self.write(html)\n\n\ndefault_handlers = [\n (r'/?', RootHandler),\n (r'/home', HomeHandler),\n (r'/admin', AdminHandler),\n (r'/spawn', SpawnHandler),\n (r'/token', TokenPageHandler),\n (r'/error/(\\d+)', ProxyErrorHandler),\n]\n", "path": "jupyterhub/handlers/pages.py"}], "after_files": [{"content": "\"\"\"Basic html-rendering handlers.\"\"\"\n\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nfrom http.client import responses\n\nfrom jinja2 import TemplateNotFound\nfrom tornado import web, gen\nfrom tornado.httputil import url_concat\n\nfrom .. import orm\nfrom ..utils import admin_only, url_path_join\nfrom .base import BaseHandler\n\n\nclass RootHandler(BaseHandler):\n \"\"\"Render the Hub root page.\n\n If next argument is passed by single-user server,\n redirect to base_url + single-user page.\n\n If logged in, redirects to:\n\n - single-user server if running\n - hub home, otherwise\n\n Otherwise, renders login page.\n \"\"\"\n def get(self):\n next_url = self.get_argument('next', '')\n if next_url and not next_url.startswith('/'):\n self.log.warning(\"Disallowing redirect outside JupyterHub: %r\", next_url)\n next_url = ''\n if next_url and next_url.startswith(url_path_join(self.base_url, 'user/')):\n # add /hub/ prefix, to ensure we redirect to the right user's server.\n # The next request will be handled by UserSpawnHandler,\n # ultimately redirecting to the logged-in user's server.\n without_prefix = next_url[len(self.base_url):]\n next_url = url_path_join(self.hub.base_url, without_prefix)\n self.log.warning(\"Redirecting %s to %s. For sharing public links, use /user-redirect/\",\n self.request.uri, next_url,\n )\n self.redirect(next_url)\n return\n user = self.get_current_user()\n if user:\n if user.running:\n url = user.url\n self.log.debug(\"User is running: %s\", url)\n self.set_login_cookie(user) # set cookie\n else:\n url = url_path_join(self.hub.base_url, 'home')\n self.log.debug(\"User is not running: %s\", url)\n else:\n url = self.settings['login_url']\n self.redirect(url)\n\n\nclass HomeHandler(BaseHandler):\n \"\"\"Render the user's home page.\"\"\"\n\n @web.authenticated\n @gen.coroutine\n def get(self):\n user = self.get_current_user()\n if user.running:\n # trigger poll_and_notify event in case of a server that died\n yield user.spawner.poll_and_notify()\n html = self.render_template('home.html',\n user=user,\n url=user.url,\n )\n self.finish(html)\n\n\nclass SpawnHandler(BaseHandler):\n \"\"\"Handle spawning of single-user servers via form.\n\n GET renders the form, POST handles form submission.\n\n Only enabled when Spawner.options_form is defined.\n \"\"\"\n def _render_form(self, message=''):\n user = self.get_current_user()\n return self.render_template('spawn.html',\n user=user,\n spawner_options_form=user.spawner.options_form,\n error_message=message,\n url=self.request.uri,\n )\n\n @web.authenticated\n def get(self):\n \"\"\"GET renders form for spawning with user-specified options\"\"\"\n user = self.get_current_user()\n if not self.allow_named_servers and user.running:\n url = user.url\n self.log.debug(\"User is running: %s\", url)\n self.redirect(url)\n return\n if user.spawner.options_form:\n self.finish(self._render_form())\n else:\n # not running, no form. Trigger spawn.\n self.redirect(user.url)\n\n @web.authenticated\n @gen.coroutine\n def post(self):\n \"\"\"POST spawns with user-specified options\"\"\"\n user = self.get_current_user()\n if not self.allow_named_servers and user.running:\n url = user.url\n self.log.warning(\"User is already running: %s\", url)\n self.redirect(url)\n return\n form_options = {}\n for key, byte_list in self.request.body_arguments.items():\n form_options[key] = [ bs.decode('utf8') for bs in byte_list ]\n for key, byte_list in self.request.files.items():\n form_options[\"%s_file\"%key] = byte_list\n try:\n options = user.spawner.options_from_form(form_options)\n yield self.spawn_single_user(user, options=options)\n except Exception as e:\n self.log.error(\"Failed to spawn single-user server with form\", exc_info=True)\n self.finish(self._render_form(str(e)))\n return\n self.set_login_cookie(user)\n url = user.url\n\n next_url = self.get_argument('next', '')\n if next_url and not next_url.startswith('/'):\n self.log.warning(\"Disallowing redirect outside JupyterHub: %r\", next_url)\n elif next_url:\n url = next_url\n\n self.redirect(url)\n\nclass AdminHandler(BaseHandler):\n \"\"\"Render the admin page.\"\"\"\n\n @admin_only\n def get(self):\n available = {'name', 'admin', 'running', 'last_activity'}\n default_sort = ['admin', 'name']\n mapping = {\n 'running': orm.Spawner.server_id,\n }\n for name in available:\n if name not in mapping:\n mapping[name] = getattr(orm.User, name)\n\n default_order = {\n 'name': 'asc',\n 'last_activity': 'desc',\n 'admin': 'desc',\n 'running': 'desc',\n }\n\n sorts = self.get_arguments('sort') or default_sort\n orders = self.get_arguments('order')\n\n for bad in set(sorts).difference(available):\n self.log.warning(\"ignoring invalid sort: %r\", bad)\n sorts.remove(bad)\n for bad in set(orders).difference({'asc', 'desc'}):\n self.log.warning(\"ignoring invalid order: %r\", bad)\n orders.remove(bad)\n\n # add default sort as secondary\n for s in default_sort:\n if s not in sorts:\n sorts.append(s)\n if len(orders) < len(sorts):\n for col in sorts[len(orders):]:\n orders.append(default_order[col])\n else:\n orders = orders[:len(sorts)]\n\n # this could be one incomprehensible nested list comprehension\n # get User columns\n cols = [ mapping[c] for c in sorts ]\n # get User.col.desc() order objects\n ordered = [ getattr(c, o)() for c, o in zip(cols, orders) ]\n\n users = self.db.query(orm.User).join(orm.Spawner).order_by(*ordered)\n users = [ self._user_from_orm(u) for u in users ]\n running = [ u for u in users if u.running ]\n\n html = self.render_template('admin.html',\n user=self.get_current_user(),\n admin_access=self.settings.get('admin_access', False),\n users=users,\n running=running,\n sort={s:o for s,o in zip(sorts, orders)},\n )\n self.finish(html)\n\n\nclass TokenPageHandler(BaseHandler):\n \"\"\"Handler for page requesting new API tokens\"\"\"\n\n @web.authenticated\n def get(self):\n html = self.render_template('token.html')\n self.finish(html)\n\n\nclass ProxyErrorHandler(BaseHandler):\n \"\"\"Handler for rendering proxy error pages\"\"\"\n \n def get(self, status_code_s):\n status_code = int(status_code_s)\n status_message = responses.get(status_code, 'Unknown HTTP Error')\n # build template namespace\n \n hub_home = url_path_join(self.hub.base_url, 'home')\n message_html = ''\n if status_code == 503:\n message_html = ' '.join([\n \"Your server appears to be down.\",\n \"Try restarting it <a href='%s'>from the hub</a>\" % hub_home\n ])\n ns = dict(\n status_code=status_code,\n status_message=status_message,\n message_html=message_html,\n logo_url=hub_home,\n )\n\n self.set_header('Content-Type', 'text/html')\n # render the template\n try:\n html = self.render_template('%s.html' % status_code, **ns)\n except TemplateNotFound:\n self.log.debug(\"No template for %d\", status_code)\n html = self.render_template('error.html', **ns)\n\n self.write(html)\n\n\ndefault_handlers = [\n (r'/?', RootHandler),\n (r'/home', HomeHandler),\n (r'/admin', AdminHandler),\n (r'/spawn', SpawnHandler),\n (r'/token', TokenPageHandler),\n (r'/error/(\\d+)', ProxyErrorHandler),\n]\n", "path": "jupyterhub/handlers/pages.py"}]} | 3,041 | 371 |
gh_patches_debug_44703 | rasdani/github-patches | git_diff | Lightning-AI__pytorch-lightning-649 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
num_training_batches rounds down, causing 0 batches count
## 🐛 Bug
self.num_training_batches is defined using int [here](https://github.com/williamFalcon/pytorch-lightning/blob/ca73b70d15bc8db3f57c1fd2d3bf152e6e1d7c4e/pytorch_lightning/trainer/data_loading.py#L52), which rounds it down to 0 when a small training_percent_check or overfit_pct is used, even though at least 1 batch is still processed.
This does not cause any errors in "vanilla" lightning, but crashes any user code that uses the number of batches in a division (for example to get an average of some quantity over batches).
### To Reproduce
Steps to reproduce the behavior:
Set the training percentage to a small enough percentage that the number of examples is smaller than the batch size for a given dataset.
This would require a very simple fix, either to use `math.ceil()` or `max(1, self.num_training_batches)`, depending of how the quantity is expected to behave in the rest of the code.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pytorch_lightning/trainer/data_loading.py`
Content:
```
1 import warnings
2 from abc import ABC
3
4 import torch.distributed as dist
5 try:
6 # loading for pyTorch 1.3
7 from torch.utils.data import IterableDataset
8 except ImportError:
9 # loading for pyTorch 1.1
10 import torch
11 warnings.warn('Your version of pyTorch %s does not support `IterableDataset`,'
12 ' please upgrade to 1.2+' % torch.__version__, ImportWarning)
13 EXIST_ITER_DATASET = False
14 else:
15 EXIST_ITER_DATASET = True
16 from torch.utils.data.distributed import DistributedSampler
17
18 from pytorch_lightning.utilities.debugging import MisconfigurationException
19
20 try:
21 from apex import amp
22
23 APEX_AVAILABLE = True
24 except ImportError:
25 APEX_AVAILABLE = False
26
27
28 class TrainerDataLoadingMixin(ABC):
29
30 def __init__(self):
31 # this is just a summary on variables used in this abstract class,
32 # the proper values/initialisation should be done in child class
33 self.proc_rank = None
34 self.use_ddp = None
35 self.use_ddp2 = None
36 self.shown_warnings = None
37 self.val_check_interval = None
38
39 def init_train_dataloader(self, model):
40 """
41 Dataloaders are provided by the model
42 :param model:
43 :return:
44 """
45 self.get_train_dataloader = model.train_dataloader
46
47 # determine number of training batches
48 if EXIST_ITER_DATASET and isinstance(self.get_train_dataloader().dataset, IterableDataset):
49 self.num_training_batches = float('inf')
50 else:
51 self.num_training_batches = len(self.get_train_dataloader())
52 self.num_training_batches = int(self.num_training_batches * self.train_percent_check)
53
54 # determine when to check validation
55 # if int passed in, val checks that often
56 # otherwise, it checks in [0, 1.0] % range of a training epoch
57 if isinstance(self.val_check_interval, int):
58 self.val_check_batch = self.val_check_interval
59 else:
60 self.val_check_batch = int(self.num_training_batches * self.val_check_interval)
61 self.val_check_batch = max(1, self.val_check_batch)
62
63 on_ddp = self.use_ddp or self.use_ddp2
64 if on_ddp and not isinstance(self.get_train_dataloader().sampler, DistributedSampler):
65 msg = """
66 You're using multiple gpus and multiple nodes without using a DistributedSampler
67 to assign a subset of your data to each process. To silence this warning, pass a
68 DistributedSampler to your DataLoader.
69
70 ie: this:
71 dataset = myDataset()
72 dataloader = Dataloader(dataset)
73
74 becomes:
75 dataset = myDataset()
76 dist_sampler = torch.utils.data.distributed.DistributedSampler(dataset)
77 dataloader = Dataloader(dataset, sampler=dist_sampler)
78
79 If you want each process to load the full dataset, ignore this warning.
80 """
81 if msg not in self.shown_warnings and self.proc_rank == 0:
82 self.shown_warnings.add(msg)
83 warnings.warn(msg)
84
85 def init_val_dataloader(self, model):
86 """
87 Dataloaders are provided by the model
88 :param model:
89 :return:
90 """
91 self.get_val_dataloaders = model.val_dataloader
92
93 # determine number of validation batches
94 # val datasets could be none, 1 or 2+
95 if self.get_val_dataloaders() is not None:
96 self.num_val_batches = sum(len(dataloader) for dataloader in self.get_val_dataloaders())
97 self.num_val_batches = int(self.num_val_batches * self.val_percent_check)
98 self.num_val_batches = max(1, self.num_val_batches)
99
100 on_ddp = self.use_ddp or self.use_ddp2
101 if on_ddp and self.get_val_dataloaders() is not None:
102 for dataloader in self.get_val_dataloaders():
103 if not isinstance(dataloader.sampler, DistributedSampler):
104 msg = """
105 Your val_dataloader(s) don't use DistributedSampler.
106
107 You're using multiple gpus and multiple nodes without using a
108 DistributedSampler to assign a subset of your data to each process.
109 To silence this warning, pass a DistributedSampler to your DataLoader.
110
111 ie: this:
112 dataset = myDataset()
113 dataloader = Dataloader(dataset)
114
115 becomes:
116 dataset = myDataset()
117 dist_sampler = torch.utils.data.distributed.DistributedSampler(dataset)
118 dataloader = Dataloader(dataset, sampler=dist_sampler)
119
120 If you want each process to load the full dataset, ignore this warning.
121 """
122 if msg not in self.shown_warnings and self.proc_rank == 0:
123 self.shown_warnings.add(msg)
124 warnings.warn(msg)
125 break
126
127 def init_test_dataloader(self, model):
128 """Dataloaders are provided by the model.
129
130 :param model:
131 """
132
133 self.get_test_dataloaders = model.test_dataloader
134
135 # determine number of test batches
136 if self.get_test_dataloaders() is not None:
137 len_sum = sum(len(dataloader) for dataloader in self.get_test_dataloaders())
138 self.num_test_batches = len_sum
139 self.num_test_batches = int(self.num_test_batches * self.test_percent_check)
140 self.num_test_batches = max(1, self.num_test_batches)
141
142 on_ddp = self.use_ddp or self.use_ddp2
143 if on_ddp and self.get_test_dataloaders() is not None:
144 for dataloader in self.get_test_dataloaders():
145 if not isinstance(dataloader.sampler, DistributedSampler):
146 msg = """
147 Your `test_dataloader(s)` don't use DistributedSampler.
148
149 You're using multiple gpus and multiple nodes without using a
150 DistributedSampler to assign a subset of your data to each process.
151 To silence this warning, pass a DistributedSampler to your DataLoader.
152
153 ie: this::
154
155 dataset = myDataset()
156 dataloader = Dataloader(dataset)
157
158 becomes::
159
160 dataset = myDataset()
161 dist_sampler = torch.utils.data.distributed.DistributedSampler(dataset)
162 dataloader = Dataloader(dataset, sampler=dist_sampler)
163
164 If you want each process to load the full dataset, ignore this warning.
165 """
166 if msg not in self.shown_warnings and self.proc_rank == 0:
167 self.shown_warnings.add(msg)
168 warnings.warn(msg)
169 break
170
171 def get_dataloaders(self, model):
172 """
173 Dataloaders are provided by the model
174 :param model:
175 :return:
176 """
177
178 self.init_train_dataloader(model)
179 self.init_test_dataloader(model)
180 self.init_val_dataloader(model)
181
182 if self.use_ddp or self.use_ddp2:
183 # wait for all processes to catch up
184 dist.barrier()
185
186 # load each dataloader
187 self.get_train_dataloader()
188 self.get_test_dataloaders()
189 self.get_val_dataloaders()
190
191 # support IterableDataset for train data
192 self.is_iterable_train_dataloader = (
193 EXIST_ITER_DATASET and isinstance(self.get_train_dataloader().dataset, IterableDataset))
194 if self.is_iterable_train_dataloader and not isinstance(self.val_check_interval, int):
195 m = '''
196 When using an iterableDataset for `train_dataloader`,
197 `Trainer(val_check_interval)` must be an int.
198 An int k specifies checking validation every k training batches
199 '''
200 raise MisconfigurationException(m)
201
202 def determine_data_use_amount(self, train_percent_check, val_percent_check,
203 test_percent_check, overfit_pct):
204 """
205 Use less data for debugging purposes
206 """
207 self.train_percent_check = train_percent_check
208 self.val_percent_check = val_percent_check
209 self.test_percent_check = test_percent_check
210 if overfit_pct > 0:
211 self.train_percent_check = overfit_pct
212 self.val_percent_check = overfit_pct
213 self.test_percent_check = overfit_pct
214
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pytorch_lightning/trainer/data_loading.py b/pytorch_lightning/trainer/data_loading.py
--- a/pytorch_lightning/trainer/data_loading.py
+++ b/pytorch_lightning/trainer/data_loading.py
@@ -36,6 +36,15 @@
self.shown_warnings = None
self.val_check_interval = None
+ def _percent_range_check(self, name):
+ value = getattr(self, name)
+ msg = f"`{name}` must lie in the range [0.0, 1.0], but got {value:.3f}."
+ if name == "val_check_interval":
+ msg += " If you want to disable validation set `val_percent_check` to 0.0 instead."
+
+ if not 0. <= value <= 1.:
+ raise ValueError(msg)
+
def init_train_dataloader(self, model):
"""
Dataloaders are provided by the model
@@ -48,6 +57,8 @@
if EXIST_ITER_DATASET and isinstance(self.get_train_dataloader().dataset, IterableDataset):
self.num_training_batches = float('inf')
else:
+ self._percent_range_check('train_percent_check')
+
self.num_training_batches = len(self.get_train_dataloader())
self.num_training_batches = int(self.num_training_batches * self.train_percent_check)
@@ -56,7 +67,14 @@
# otherwise, it checks in [0, 1.0] % range of a training epoch
if isinstance(self.val_check_interval, int):
self.val_check_batch = self.val_check_interval
+ if self.val_check_batch > self.num_training_batches:
+ raise ValueError(
+ f"`val_check_interval` ({self.val_check_interval}) must be less than or equal "
+ f"to the number of the training batches ({self.num_training_batches}). "
+ f"If you want to disable validation set `val_percent_check` to 0.0 instead.")
else:
+ self._percent_range_check('val_check_interval')
+
self.val_check_batch = int(self.num_training_batches * self.val_check_interval)
self.val_check_batch = max(1, self.val_check_batch)
@@ -89,13 +107,15 @@
:return:
"""
self.get_val_dataloaders = model.val_dataloader
+ self.num_val_batches = 0
# determine number of validation batches
# val datasets could be none, 1 or 2+
if self.get_val_dataloaders() is not None:
+ self._percent_range_check('val_percent_check')
+
self.num_val_batches = sum(len(dataloader) for dataloader in self.get_val_dataloaders())
self.num_val_batches = int(self.num_val_batches * self.val_percent_check)
- self.num_val_batches = max(1, self.num_val_batches)
on_ddp = self.use_ddp or self.use_ddp2
if on_ddp and self.get_val_dataloaders() is not None:
@@ -134,10 +154,11 @@
# determine number of test batches
if self.get_test_dataloaders() is not None:
+ self._percent_range_check('test_percent_check')
+
len_sum = sum(len(dataloader) for dataloader in self.get_test_dataloaders())
self.num_test_batches = len_sum
self.num_test_batches = int(self.num_test_batches * self.test_percent_check)
- self.num_test_batches = max(1, self.num_test_batches)
on_ddp = self.use_ddp or self.use_ddp2
if on_ddp and self.get_test_dataloaders() is not None:
@@ -208,6 +229,10 @@
self.val_percent_check = val_percent_check
self.test_percent_check = test_percent_check
if overfit_pct > 0:
+ if overfit_pct > 1:
+ raise ValueError(f"`overfit_pct` must be not greater than 1.0, but got "
+ f"{overfit_pct:.3f}.")
+
self.train_percent_check = overfit_pct
self.val_percent_check = overfit_pct
self.test_percent_check = overfit_pct
| {"golden_diff": "diff --git a/pytorch_lightning/trainer/data_loading.py b/pytorch_lightning/trainer/data_loading.py\n--- a/pytorch_lightning/trainer/data_loading.py\n+++ b/pytorch_lightning/trainer/data_loading.py\n@@ -36,6 +36,15 @@\n self.shown_warnings = None\n self.val_check_interval = None\n \n+ def _percent_range_check(self, name):\n+ value = getattr(self, name)\n+ msg = f\"`{name}` must lie in the range [0.0, 1.0], but got {value:.3f}.\"\n+ if name == \"val_check_interval\":\n+ msg += \" If you want to disable validation set `val_percent_check` to 0.0 instead.\"\n+\n+ if not 0. <= value <= 1.:\n+ raise ValueError(msg)\n+\n def init_train_dataloader(self, model):\n \"\"\"\n Dataloaders are provided by the model\n@@ -48,6 +57,8 @@\n if EXIST_ITER_DATASET and isinstance(self.get_train_dataloader().dataset, IterableDataset):\n self.num_training_batches = float('inf')\n else:\n+ self._percent_range_check('train_percent_check')\n+\n self.num_training_batches = len(self.get_train_dataloader())\n self.num_training_batches = int(self.num_training_batches * self.train_percent_check)\n \n@@ -56,7 +67,14 @@\n # otherwise, it checks in [0, 1.0] % range of a training epoch\n if isinstance(self.val_check_interval, int):\n self.val_check_batch = self.val_check_interval\n+ if self.val_check_batch > self.num_training_batches:\n+ raise ValueError(\n+ f\"`val_check_interval` ({self.val_check_interval}) must be less than or equal \"\n+ f\"to the number of the training batches ({self.num_training_batches}). \"\n+ f\"If you want to disable validation set `val_percent_check` to 0.0 instead.\")\n else:\n+ self._percent_range_check('val_check_interval')\n+\n self.val_check_batch = int(self.num_training_batches * self.val_check_interval)\n self.val_check_batch = max(1, self.val_check_batch)\n \n@@ -89,13 +107,15 @@\n :return:\n \"\"\"\n self.get_val_dataloaders = model.val_dataloader\n+ self.num_val_batches = 0\n \n # determine number of validation batches\n # val datasets could be none, 1 or 2+\n if self.get_val_dataloaders() is not None:\n+ self._percent_range_check('val_percent_check')\n+\n self.num_val_batches = sum(len(dataloader) for dataloader in self.get_val_dataloaders())\n self.num_val_batches = int(self.num_val_batches * self.val_percent_check)\n- self.num_val_batches = max(1, self.num_val_batches)\n \n on_ddp = self.use_ddp or self.use_ddp2\n if on_ddp and self.get_val_dataloaders() is not None:\n@@ -134,10 +154,11 @@\n \n # determine number of test batches\n if self.get_test_dataloaders() is not None:\n+ self._percent_range_check('test_percent_check')\n+\n len_sum = sum(len(dataloader) for dataloader in self.get_test_dataloaders())\n self.num_test_batches = len_sum\n self.num_test_batches = int(self.num_test_batches * self.test_percent_check)\n- self.num_test_batches = max(1, self.num_test_batches)\n \n on_ddp = self.use_ddp or self.use_ddp2\n if on_ddp and self.get_test_dataloaders() is not None:\n@@ -208,6 +229,10 @@\n self.val_percent_check = val_percent_check\n self.test_percent_check = test_percent_check\n if overfit_pct > 0:\n+ if overfit_pct > 1:\n+ raise ValueError(f\"`overfit_pct` must be not greater than 1.0, but got \"\n+ f\"{overfit_pct:.3f}.\")\n+\n self.train_percent_check = overfit_pct\n self.val_percent_check = overfit_pct\n self.test_percent_check = overfit_pct\n", "issue": "num_training_batches rounds down, causing 0 batches count\n## \ud83d\udc1b Bug\r\n\r\nself.num_training_batches is defined using int [here](https://github.com/williamFalcon/pytorch-lightning/blob/ca73b70d15bc8db3f57c1fd2d3bf152e6e1d7c4e/pytorch_lightning/trainer/data_loading.py#L52), which rounds it down to 0 when a small training_percent_check or overfit_pct is used, even though at least 1 batch is still processed. \r\n\r\nThis does not cause any errors in \"vanilla\" lightning, but crashes any user code that uses the number of batches in a division (for example to get an average of some quantity over batches).\r\n\r\n### To Reproduce\r\n\r\nSteps to reproduce the behavior:\r\n\r\nSet the training percentage to a small enough percentage that the number of examples is smaller than the batch size for a given dataset.\r\n\r\nThis would require a very simple fix, either to use `math.ceil()` or `max(1, self.num_training_batches)`, depending of how the quantity is expected to behave in the rest of the code.\r\n\n", "before_files": [{"content": "import warnings\nfrom abc import ABC\n\nimport torch.distributed as dist\ntry:\n # loading for pyTorch 1.3\n from torch.utils.data import IterableDataset\nexcept ImportError:\n # loading for pyTorch 1.1\n import torch\n warnings.warn('Your version of pyTorch %s does not support `IterableDataset`,'\n ' please upgrade to 1.2+' % torch.__version__, ImportWarning)\n EXIST_ITER_DATASET = False\nelse:\n EXIST_ITER_DATASET = True\nfrom torch.utils.data.distributed import DistributedSampler\n\nfrom pytorch_lightning.utilities.debugging import MisconfigurationException\n\ntry:\n from apex import amp\n\n APEX_AVAILABLE = True\nexcept ImportError:\n APEX_AVAILABLE = False\n\n\nclass TrainerDataLoadingMixin(ABC):\n\n def __init__(self):\n # this is just a summary on variables used in this abstract class,\n # the proper values/initialisation should be done in child class\n self.proc_rank = None\n self.use_ddp = None\n self.use_ddp2 = None\n self.shown_warnings = None\n self.val_check_interval = None\n\n def init_train_dataloader(self, model):\n \"\"\"\n Dataloaders are provided by the model\n :param model:\n :return:\n \"\"\"\n self.get_train_dataloader = model.train_dataloader\n\n # determine number of training batches\n if EXIST_ITER_DATASET and isinstance(self.get_train_dataloader().dataset, IterableDataset):\n self.num_training_batches = float('inf')\n else:\n self.num_training_batches = len(self.get_train_dataloader())\n self.num_training_batches = int(self.num_training_batches * self.train_percent_check)\n\n # determine when to check validation\n # if int passed in, val checks that often\n # otherwise, it checks in [0, 1.0] % range of a training epoch\n if isinstance(self.val_check_interval, int):\n self.val_check_batch = self.val_check_interval\n else:\n self.val_check_batch = int(self.num_training_batches * self.val_check_interval)\n self.val_check_batch = max(1, self.val_check_batch)\n\n on_ddp = self.use_ddp or self.use_ddp2\n if on_ddp and not isinstance(self.get_train_dataloader().sampler, DistributedSampler):\n msg = \"\"\"\n You're using multiple gpus and multiple nodes without using a DistributedSampler\n to assign a subset of your data to each process. To silence this warning, pass a\n DistributedSampler to your DataLoader.\n\n ie: this:\n dataset = myDataset()\n dataloader = Dataloader(dataset)\n\n becomes:\n dataset = myDataset()\n dist_sampler = torch.utils.data.distributed.DistributedSampler(dataset)\n dataloader = Dataloader(dataset, sampler=dist_sampler)\n\n If you want each process to load the full dataset, ignore this warning.\n \"\"\"\n if msg not in self.shown_warnings and self.proc_rank == 0:\n self.shown_warnings.add(msg)\n warnings.warn(msg)\n\n def init_val_dataloader(self, model):\n \"\"\"\n Dataloaders are provided by the model\n :param model:\n :return:\n \"\"\"\n self.get_val_dataloaders = model.val_dataloader\n\n # determine number of validation batches\n # val datasets could be none, 1 or 2+\n if self.get_val_dataloaders() is not None:\n self.num_val_batches = sum(len(dataloader) for dataloader in self.get_val_dataloaders())\n self.num_val_batches = int(self.num_val_batches * self.val_percent_check)\n self.num_val_batches = max(1, self.num_val_batches)\n\n on_ddp = self.use_ddp or self.use_ddp2\n if on_ddp and self.get_val_dataloaders() is not None:\n for dataloader in self.get_val_dataloaders():\n if not isinstance(dataloader.sampler, DistributedSampler):\n msg = \"\"\"\n Your val_dataloader(s) don't use DistributedSampler.\n\n You're using multiple gpus and multiple nodes without using a\n DistributedSampler to assign a subset of your data to each process.\n To silence this warning, pass a DistributedSampler to your DataLoader.\n\n ie: this:\n dataset = myDataset()\n dataloader = Dataloader(dataset)\n\n becomes:\n dataset = myDataset()\n dist_sampler = torch.utils.data.distributed.DistributedSampler(dataset)\n dataloader = Dataloader(dataset, sampler=dist_sampler)\n\n If you want each process to load the full dataset, ignore this warning.\n \"\"\"\n if msg not in self.shown_warnings and self.proc_rank == 0:\n self.shown_warnings.add(msg)\n warnings.warn(msg)\n break\n\n def init_test_dataloader(self, model):\n \"\"\"Dataloaders are provided by the model.\n\n :param model:\n \"\"\"\n\n self.get_test_dataloaders = model.test_dataloader\n\n # determine number of test batches\n if self.get_test_dataloaders() is not None:\n len_sum = sum(len(dataloader) for dataloader in self.get_test_dataloaders())\n self.num_test_batches = len_sum\n self.num_test_batches = int(self.num_test_batches * self.test_percent_check)\n self.num_test_batches = max(1, self.num_test_batches)\n\n on_ddp = self.use_ddp or self.use_ddp2\n if on_ddp and self.get_test_dataloaders() is not None:\n for dataloader in self.get_test_dataloaders():\n if not isinstance(dataloader.sampler, DistributedSampler):\n msg = \"\"\"\n Your `test_dataloader(s)` don't use DistributedSampler.\n\n You're using multiple gpus and multiple nodes without using a\n DistributedSampler to assign a subset of your data to each process.\n To silence this warning, pass a DistributedSampler to your DataLoader.\n\n ie: this::\n\n dataset = myDataset()\n dataloader = Dataloader(dataset)\n\n becomes::\n\n dataset = myDataset()\n dist_sampler = torch.utils.data.distributed.DistributedSampler(dataset)\n dataloader = Dataloader(dataset, sampler=dist_sampler)\n\n If you want each process to load the full dataset, ignore this warning.\n \"\"\"\n if msg not in self.shown_warnings and self.proc_rank == 0:\n self.shown_warnings.add(msg)\n warnings.warn(msg)\n break\n\n def get_dataloaders(self, model):\n \"\"\"\n Dataloaders are provided by the model\n :param model:\n :return:\n \"\"\"\n\n self.init_train_dataloader(model)\n self.init_test_dataloader(model)\n self.init_val_dataloader(model)\n\n if self.use_ddp or self.use_ddp2:\n # wait for all processes to catch up\n dist.barrier()\n\n # load each dataloader\n self.get_train_dataloader()\n self.get_test_dataloaders()\n self.get_val_dataloaders()\n\n # support IterableDataset for train data\n self.is_iterable_train_dataloader = (\n EXIST_ITER_DATASET and isinstance(self.get_train_dataloader().dataset, IterableDataset))\n if self.is_iterable_train_dataloader and not isinstance(self.val_check_interval, int):\n m = '''\n When using an iterableDataset for `train_dataloader`,\n `Trainer(val_check_interval)` must be an int.\n An int k specifies checking validation every k training batches\n '''\n raise MisconfigurationException(m)\n\n def determine_data_use_amount(self, train_percent_check, val_percent_check,\n test_percent_check, overfit_pct):\n \"\"\"\n Use less data for debugging purposes\n \"\"\"\n self.train_percent_check = train_percent_check\n self.val_percent_check = val_percent_check\n self.test_percent_check = test_percent_check\n if overfit_pct > 0:\n self.train_percent_check = overfit_pct\n self.val_percent_check = overfit_pct\n self.test_percent_check = overfit_pct\n", "path": "pytorch_lightning/trainer/data_loading.py"}], "after_files": [{"content": "import warnings\nfrom abc import ABC\n\nimport torch.distributed as dist\ntry:\n # loading for pyTorch 1.3\n from torch.utils.data import IterableDataset\nexcept ImportError:\n # loading for pyTorch 1.1\n import torch\n warnings.warn('Your version of pyTorch %s does not support `IterableDataset`,'\n ' please upgrade to 1.2+' % torch.__version__, ImportWarning)\n EXIST_ITER_DATASET = False\nelse:\n EXIST_ITER_DATASET = True\nfrom torch.utils.data.distributed import DistributedSampler\n\nfrom pytorch_lightning.utilities.debugging import MisconfigurationException\n\ntry:\n from apex import amp\n\n APEX_AVAILABLE = True\nexcept ImportError:\n APEX_AVAILABLE = False\n\n\nclass TrainerDataLoadingMixin(ABC):\n\n def __init__(self):\n # this is just a summary on variables used in this abstract class,\n # the proper values/initialisation should be done in child class\n self.proc_rank = None\n self.use_ddp = None\n self.use_ddp2 = None\n self.shown_warnings = None\n self.val_check_interval = None\n\n def _percent_range_check(self, name):\n value = getattr(self, name)\n msg = f\"`{name}` must lie in the range [0.0, 1.0], but got {value:.3f}.\"\n if name == \"val_check_interval\":\n msg += \" If you want to disable validation set `val_percent_check` to 0.0 instead.\"\n\n if not 0. <= value <= 1.:\n raise ValueError(msg)\n\n def init_train_dataloader(self, model):\n \"\"\"\n Dataloaders are provided by the model\n :param model:\n :return:\n \"\"\"\n self.get_train_dataloader = model.train_dataloader\n\n # determine number of training batches\n if EXIST_ITER_DATASET and isinstance(self.get_train_dataloader().dataset, IterableDataset):\n self.num_training_batches = float('inf')\n else:\n self._percent_range_check('train_percent_check')\n\n self.num_training_batches = len(self.get_train_dataloader())\n self.num_training_batches = int(self.num_training_batches * self.train_percent_check)\n\n # determine when to check validation\n # if int passed in, val checks that often\n # otherwise, it checks in [0, 1.0] % range of a training epoch\n if isinstance(self.val_check_interval, int):\n self.val_check_batch = self.val_check_interval\n if self.val_check_batch > self.num_training_batches:\n raise ValueError(\n f\"`val_check_interval` ({self.val_check_interval}) must be less than or equal \"\n f\"to the number of the training batches ({self.num_training_batches}). \"\n f\"If you want to disable validation set `val_percent_check` to 0.0 instead.\")\n else:\n self._percent_range_check('val_check_interval')\n\n self.val_check_batch = int(self.num_training_batches * self.val_check_interval)\n self.val_check_batch = max(1, self.val_check_batch)\n\n on_ddp = self.use_ddp or self.use_ddp2\n if on_ddp and not isinstance(self.get_train_dataloader().sampler, DistributedSampler):\n msg = \"\"\"\n You're using multiple gpus and multiple nodes without using a DistributedSampler\n to assign a subset of your data to each process. To silence this warning, pass a\n DistributedSampler to your DataLoader.\n\n ie: this:\n dataset = myDataset()\n dataloader = Dataloader(dataset)\n\n becomes:\n dataset = myDataset()\n dist_sampler = torch.utils.data.distributed.DistributedSampler(dataset)\n dataloader = Dataloader(dataset, sampler=dist_sampler)\n\n If you want each process to load the full dataset, ignore this warning.\n \"\"\"\n if msg not in self.shown_warnings and self.proc_rank == 0:\n self.shown_warnings.add(msg)\n warnings.warn(msg)\n\n def init_val_dataloader(self, model):\n \"\"\"\n Dataloaders are provided by the model\n :param model:\n :return:\n \"\"\"\n self.get_val_dataloaders = model.val_dataloader\n self.num_val_batches = 0\n\n # determine number of validation batches\n # val datasets could be none, 1 or 2+\n if self.get_val_dataloaders() is not None:\n self._percent_range_check('val_percent_check')\n\n self.num_val_batches = sum(len(dataloader) for dataloader in self.get_val_dataloaders())\n self.num_val_batches = int(self.num_val_batches * self.val_percent_check)\n\n on_ddp = self.use_ddp or self.use_ddp2\n if on_ddp and self.get_val_dataloaders() is not None:\n for dataloader in self.get_val_dataloaders():\n if not isinstance(dataloader.sampler, DistributedSampler):\n msg = \"\"\"\n Your val_dataloader(s) don't use DistributedSampler.\n\n You're using multiple gpus and multiple nodes without using a\n DistributedSampler to assign a subset of your data to each process.\n To silence this warning, pass a DistributedSampler to your DataLoader.\n\n ie: this:\n dataset = myDataset()\n dataloader = Dataloader(dataset)\n\n becomes:\n dataset = myDataset()\n dist_sampler = torch.utils.data.distributed.DistributedSampler(dataset)\n dataloader = Dataloader(dataset, sampler=dist_sampler)\n\n If you want each process to load the full dataset, ignore this warning.\n \"\"\"\n if msg not in self.shown_warnings and self.proc_rank == 0:\n self.shown_warnings.add(msg)\n warnings.warn(msg)\n break\n\n def init_test_dataloader(self, model):\n \"\"\"Dataloaders are provided by the model.\n\n :param model:\n \"\"\"\n\n self.get_test_dataloaders = model.test_dataloader\n\n # determine number of test batches\n if self.get_test_dataloaders() is not None:\n self._percent_range_check('test_percent_check')\n\n len_sum = sum(len(dataloader) for dataloader in self.get_test_dataloaders())\n self.num_test_batches = len_sum\n self.num_test_batches = int(self.num_test_batches * self.test_percent_check)\n\n on_ddp = self.use_ddp or self.use_ddp2\n if on_ddp and self.get_test_dataloaders() is not None:\n for dataloader in self.get_test_dataloaders():\n if not isinstance(dataloader.sampler, DistributedSampler):\n msg = \"\"\"\n Your `test_dataloader(s)` don't use DistributedSampler.\n\n You're using multiple gpus and multiple nodes without using a\n DistributedSampler to assign a subset of your data to each process.\n To silence this warning, pass a DistributedSampler to your DataLoader.\n\n ie: this::\n\n dataset = myDataset()\n dataloader = Dataloader(dataset)\n\n becomes::\n\n dataset = myDataset()\n dist_sampler = torch.utils.data.distributed.DistributedSampler(dataset)\n dataloader = Dataloader(dataset, sampler=dist_sampler)\n\n If you want each process to load the full dataset, ignore this warning.\n \"\"\"\n if msg not in self.shown_warnings and self.proc_rank == 0:\n self.shown_warnings.add(msg)\n warnings.warn(msg)\n break\n\n def get_dataloaders(self, model):\n \"\"\"\n Dataloaders are provided by the model\n :param model:\n :return:\n \"\"\"\n\n self.init_train_dataloader(model)\n self.init_test_dataloader(model)\n self.init_val_dataloader(model)\n\n if self.use_ddp or self.use_ddp2:\n # wait for all processes to catch up\n dist.barrier()\n\n # load each dataloader\n self.get_train_dataloader()\n self.get_test_dataloaders()\n self.get_val_dataloaders()\n\n # support IterableDataset for train data\n self.is_iterable_train_dataloader = (\n EXIST_ITER_DATASET and isinstance(self.get_train_dataloader().dataset, IterableDataset))\n if self.is_iterable_train_dataloader and not isinstance(self.val_check_interval, int):\n m = '''\n When using an iterableDataset for `train_dataloader`,\n `Trainer(val_check_interval)` must be an int.\n An int k specifies checking validation every k training batches\n '''\n raise MisconfigurationException(m)\n\n def determine_data_use_amount(self, train_percent_check, val_percent_check,\n test_percent_check, overfit_pct):\n \"\"\"\n Use less data for debugging purposes\n \"\"\"\n self.train_percent_check = train_percent_check\n self.val_percent_check = val_percent_check\n self.test_percent_check = test_percent_check\n if overfit_pct > 0:\n if overfit_pct > 1:\n raise ValueError(f\"`overfit_pct` must be not greater than 1.0, but got \"\n f\"{overfit_pct:.3f}.\")\n\n self.train_percent_check = overfit_pct\n self.val_percent_check = overfit_pct\n self.test_percent_check = overfit_pct\n", "path": "pytorch_lightning/trainer/data_loading.py"}]} | 2,791 | 939 |
gh_patches_debug_15355 | rasdani/github-patches | git_diff | qutebrowser__qutebrowser-6205 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Error page does not fully work without JS
When a page cannot load due to an error (e.g., renderer process killed), an error page with the text "Unable to load page" is displayed with the URL of the page that I tried to load and a button to "Try again"; however, clicking on this button has no effect. Further inspection reveals that this button relies on JavaScript (I have `content.javascript.enabled` set to false by default).
As a small quality of life improvement, would it be feasible to change the button to a simple link that would work without JS? I would be happy to open a pull request with this change (it seems that this would only entail changing a few lines in [`error.html`](https://github.com/qutebrowser/qutebrowser/blob/master/qutebrowser/html/error.html)); I just wanted to check first if that would be a good idea.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `qutebrowser/utils/jinja.py`
Content:
```
1 # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
2
3 # Copyright 2014-2021 Florian Bruhin (The Compiler) <[email protected]>
4 #
5 # This file is part of qutebrowser.
6 #
7 # qutebrowser is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU General Public License as published by
9 # the Free Software Foundation, either version 3 of the License, or
10 # (at your option) any later version.
11 #
12 # qutebrowser is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU General Public License for more details.
16 #
17 # You should have received a copy of the GNU General Public License
18 # along with qutebrowser. If not, see <https://www.gnu.org/licenses/>.
19
20 """Utilities related to jinja2."""
21
22 import os
23 import os.path
24 import posixpath
25 import functools
26 import contextlib
27 import html
28 from typing import Any, Callable, FrozenSet, Iterator, List, Set, Tuple
29
30 import jinja2
31 import jinja2.nodes
32 from PyQt5.QtCore import QUrl
33
34 from qutebrowser.utils import utils, urlutils, log, qtutils, javascript
35 from qutebrowser.misc import debugcachestats
36
37
38 html_fallback = """
39 <!DOCTYPE html>
40 <html>
41 <head>
42 <meta charset="utf-8">
43 <title>Error while loading template</title>
44 </head>
45 <body>
46 <p><span style="font-size:120%;color:red">
47 The %FILE% template could not be found!<br>
48 Please check your qutebrowser installation
49 </span><br>
50 %ERROR%
51 </p>
52 </body>
53 </html>
54 """
55
56
57 class Loader(jinja2.BaseLoader):
58
59 """Jinja loader which uses utils.read_file to load templates.
60
61 Attributes:
62 _subdir: The subdirectory to find templates in.
63 """
64
65 def __init__(self, subdir: str) -> None:
66 self._subdir = subdir
67
68 def get_source(
69 self,
70 _env: jinja2.Environment,
71 template: str
72 ) -> Tuple[str, str, Callable[[], bool]]:
73 path = os.path.join(self._subdir, template)
74 try:
75 source = utils.read_file(path)
76 except OSError as e:
77 source = html_fallback.replace("%ERROR%", html.escape(str(e)))
78 source = source.replace("%FILE%", html.escape(template))
79 log.misc.exception("The {} template could not be loaded from {}"
80 .format(template, path))
81 # Currently we don't implement auto-reloading, so we always return True
82 # for up-to-date.
83 return source, path, lambda: True
84
85
86 class Environment(jinja2.Environment):
87
88 """Our own jinja environment which is more strict."""
89
90 def __init__(self) -> None:
91 super().__init__(loader=Loader('html'),
92 autoescape=lambda _name: self._autoescape,
93 undefined=jinja2.StrictUndefined)
94 self.globals['resource_url'] = self._resource_url
95 self.globals['file_url'] = urlutils.file_url
96 self.globals['data_url'] = self._data_url
97 self.globals['qcolor_to_qsscolor'] = qtutils.qcolor_to_qsscolor
98 self.filters['js_string_escape'] = javascript.string_escape
99 self._autoescape = True
100
101 @contextlib.contextmanager
102 def no_autoescape(self) -> Iterator[None]:
103 """Context manager to temporarily turn off autoescaping."""
104 self._autoescape = False
105 yield
106 self._autoescape = True
107
108 def _resource_url(self, path: str) -> str:
109 """Load qutebrowser resource files.
110
111 Arguments:
112 path: The relative path to the resource.
113 """
114 assert not posixpath.isabs(path), path
115 url = QUrl('qute://resource')
116 url.setPath('/' + path)
117 urlutils.ensure_valid(url)
118 urlstr = url.toString(QUrl.FullyEncoded) # type: ignore[arg-type]
119 return urlstr
120
121 def _data_url(self, path: str) -> str:
122 """Get a data: url for the broken qutebrowser logo."""
123 data = utils.read_file_binary(path)
124 mimetype = utils.guess_mimetype(path)
125 return urlutils.data_url(mimetype, data).toString()
126
127 def getattr(self, obj: Any, attribute: str) -> Any:
128 """Override jinja's getattr() to be less clever.
129
130 This means it doesn't fall back to __getitem__, and it doesn't hide
131 AttributeError.
132 """
133 return getattr(obj, attribute)
134
135
136 def render(template: str, **kwargs: Any) -> str:
137 """Render the given template and pass the given arguments to it."""
138 return environment.get_template(template).render(**kwargs)
139
140
141 environment = Environment()
142 js_environment = jinja2.Environment(loader=Loader('javascript'))
143
144
145 @debugcachestats.register()
146 @functools.lru_cache()
147 def template_config_variables(template: str) -> FrozenSet[str]:
148 """Return the config variables used in the template."""
149 unvisted_nodes = [environment.parse(template)]
150 result: Set[str] = set()
151 while unvisted_nodes:
152 node = unvisted_nodes.pop()
153 if not isinstance(node, jinja2.nodes.Getattr):
154 unvisted_nodes.extend(node.iter_child_nodes())
155 continue
156
157 # List of attribute names in reverse order.
158 # For example it's ['ab', 'c', 'd'] for 'conf.d.c.ab'.
159 attrlist: List[str] = []
160 while isinstance(node, jinja2.nodes.Getattr):
161 attrlist.append(node.attr) # type: ignore[attr-defined]
162 node = node.node # type: ignore[attr-defined]
163
164 if isinstance(node, jinja2.nodes.Name):
165 if node.name == 'conf': # type: ignore[attr-defined]
166 result.add('.'.join(reversed(attrlist)))
167 # otherwise, the node is a Name node so it doesn't have any
168 # child nodes
169 else:
170 unvisted_nodes.append(node)
171
172 from qutebrowser.config import config
173 for option in result:
174 config.instance.ensure_has_opt(option)
175
176 return frozenset(result)
177
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/qutebrowser/utils/jinja.py b/qutebrowser/utils/jinja.py
--- a/qutebrowser/utils/jinja.py
+++ b/qutebrowser/utils/jinja.py
@@ -31,7 +31,7 @@
import jinja2.nodes
from PyQt5.QtCore import QUrl
-from qutebrowser.utils import utils, urlutils, log, qtutils, javascript
+from qutebrowser.utils import utils, urlutils, log, qtutils
from qutebrowser.misc import debugcachestats
@@ -95,7 +95,6 @@
self.globals['file_url'] = urlutils.file_url
self.globals['data_url'] = self._data_url
self.globals['qcolor_to_qsscolor'] = qtutils.qcolor_to_qsscolor
- self.filters['js_string_escape'] = javascript.string_escape
self._autoescape = True
@contextlib.contextmanager
| {"golden_diff": "diff --git a/qutebrowser/utils/jinja.py b/qutebrowser/utils/jinja.py\n--- a/qutebrowser/utils/jinja.py\n+++ b/qutebrowser/utils/jinja.py\n@@ -31,7 +31,7 @@\n import jinja2.nodes\n from PyQt5.QtCore import QUrl\n \n-from qutebrowser.utils import utils, urlutils, log, qtutils, javascript\n+from qutebrowser.utils import utils, urlutils, log, qtutils\n from qutebrowser.misc import debugcachestats\n \n \n@@ -95,7 +95,6 @@\n self.globals['file_url'] = urlutils.file_url\n self.globals['data_url'] = self._data_url\n self.globals['qcolor_to_qsscolor'] = qtutils.qcolor_to_qsscolor\n- self.filters['js_string_escape'] = javascript.string_escape\n self._autoescape = True\n \n @contextlib.contextmanager\n", "issue": "Error page does not fully work without JS\nWhen a page cannot load due to an error (e.g., renderer process killed), an error page with the text \"Unable to load page\" is displayed with the URL of the page that I tried to load and a button to \"Try again\"; however, clicking on this button has no effect. Further inspection reveals that this button relies on JavaScript (I have `content.javascript.enabled` set to false by default).\r\n\r\nAs a small quality of life improvement, would it be feasible to change the button to a simple link that would work without JS? I would be happy to open a pull request with this change (it seems that this would only entail changing a few lines in [`error.html`](https://github.com/qutebrowser/qutebrowser/blob/master/qutebrowser/html/error.html)); I just wanted to check first if that would be a good idea.\n", "before_files": [{"content": "# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:\n\n# Copyright 2014-2021 Florian Bruhin (The Compiler) <[email protected]>\n#\n# This file is part of qutebrowser.\n#\n# qutebrowser is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# qutebrowser is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with qutebrowser. If not, see <https://www.gnu.org/licenses/>.\n\n\"\"\"Utilities related to jinja2.\"\"\"\n\nimport os\nimport os.path\nimport posixpath\nimport functools\nimport contextlib\nimport html\nfrom typing import Any, Callable, FrozenSet, Iterator, List, Set, Tuple\n\nimport jinja2\nimport jinja2.nodes\nfrom PyQt5.QtCore import QUrl\n\nfrom qutebrowser.utils import utils, urlutils, log, qtutils, javascript\nfrom qutebrowser.misc import debugcachestats\n\n\nhtml_fallback = \"\"\"\n<!DOCTYPE html>\n<html>\n <head>\n <meta charset=\"utf-8\">\n <title>Error while loading template</title>\n </head>\n <body>\n <p><span style=\"font-size:120%;color:red\">\n The %FILE% template could not be found!<br>\n Please check your qutebrowser installation\n </span><br>\n %ERROR%\n </p>\n </body>\n</html>\n\"\"\"\n\n\nclass Loader(jinja2.BaseLoader):\n\n \"\"\"Jinja loader which uses utils.read_file to load templates.\n\n Attributes:\n _subdir: The subdirectory to find templates in.\n \"\"\"\n\n def __init__(self, subdir: str) -> None:\n self._subdir = subdir\n\n def get_source(\n self,\n _env: jinja2.Environment,\n template: str\n ) -> Tuple[str, str, Callable[[], bool]]:\n path = os.path.join(self._subdir, template)\n try:\n source = utils.read_file(path)\n except OSError as e:\n source = html_fallback.replace(\"%ERROR%\", html.escape(str(e)))\n source = source.replace(\"%FILE%\", html.escape(template))\n log.misc.exception(\"The {} template could not be loaded from {}\"\n .format(template, path))\n # Currently we don't implement auto-reloading, so we always return True\n # for up-to-date.\n return source, path, lambda: True\n\n\nclass Environment(jinja2.Environment):\n\n \"\"\"Our own jinja environment which is more strict.\"\"\"\n\n def __init__(self) -> None:\n super().__init__(loader=Loader('html'),\n autoescape=lambda _name: self._autoescape,\n undefined=jinja2.StrictUndefined)\n self.globals['resource_url'] = self._resource_url\n self.globals['file_url'] = urlutils.file_url\n self.globals['data_url'] = self._data_url\n self.globals['qcolor_to_qsscolor'] = qtutils.qcolor_to_qsscolor\n self.filters['js_string_escape'] = javascript.string_escape\n self._autoescape = True\n\n @contextlib.contextmanager\n def no_autoescape(self) -> Iterator[None]:\n \"\"\"Context manager to temporarily turn off autoescaping.\"\"\"\n self._autoescape = False\n yield\n self._autoescape = True\n\n def _resource_url(self, path: str) -> str:\n \"\"\"Load qutebrowser resource files.\n\n Arguments:\n path: The relative path to the resource.\n \"\"\"\n assert not posixpath.isabs(path), path\n url = QUrl('qute://resource')\n url.setPath('/' + path)\n urlutils.ensure_valid(url)\n urlstr = url.toString(QUrl.FullyEncoded) # type: ignore[arg-type]\n return urlstr\n\n def _data_url(self, path: str) -> str:\n \"\"\"Get a data: url for the broken qutebrowser logo.\"\"\"\n data = utils.read_file_binary(path)\n mimetype = utils.guess_mimetype(path)\n return urlutils.data_url(mimetype, data).toString()\n\n def getattr(self, obj: Any, attribute: str) -> Any:\n \"\"\"Override jinja's getattr() to be less clever.\n\n This means it doesn't fall back to __getitem__, and it doesn't hide\n AttributeError.\n \"\"\"\n return getattr(obj, attribute)\n\n\ndef render(template: str, **kwargs: Any) -> str:\n \"\"\"Render the given template and pass the given arguments to it.\"\"\"\n return environment.get_template(template).render(**kwargs)\n\n\nenvironment = Environment()\njs_environment = jinja2.Environment(loader=Loader('javascript'))\n\n\[email protected]()\[email protected]_cache()\ndef template_config_variables(template: str) -> FrozenSet[str]:\n \"\"\"Return the config variables used in the template.\"\"\"\n unvisted_nodes = [environment.parse(template)]\n result: Set[str] = set()\n while unvisted_nodes:\n node = unvisted_nodes.pop()\n if not isinstance(node, jinja2.nodes.Getattr):\n unvisted_nodes.extend(node.iter_child_nodes())\n continue\n\n # List of attribute names in reverse order.\n # For example it's ['ab', 'c', 'd'] for 'conf.d.c.ab'.\n attrlist: List[str] = []\n while isinstance(node, jinja2.nodes.Getattr):\n attrlist.append(node.attr) # type: ignore[attr-defined]\n node = node.node # type: ignore[attr-defined]\n\n if isinstance(node, jinja2.nodes.Name):\n if node.name == 'conf': # type: ignore[attr-defined]\n result.add('.'.join(reversed(attrlist)))\n # otherwise, the node is a Name node so it doesn't have any\n # child nodes\n else:\n unvisted_nodes.append(node)\n\n from qutebrowser.config import config\n for option in result:\n config.instance.ensure_has_opt(option)\n\n return frozenset(result)\n", "path": "qutebrowser/utils/jinja.py"}], "after_files": [{"content": "# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:\n\n# Copyright 2014-2021 Florian Bruhin (The Compiler) <[email protected]>\n#\n# This file is part of qutebrowser.\n#\n# qutebrowser is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# qutebrowser is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with qutebrowser. If not, see <https://www.gnu.org/licenses/>.\n\n\"\"\"Utilities related to jinja2.\"\"\"\n\nimport os\nimport os.path\nimport posixpath\nimport functools\nimport contextlib\nimport html\nfrom typing import Any, Callable, FrozenSet, Iterator, List, Set, Tuple\n\nimport jinja2\nimport jinja2.nodes\nfrom PyQt5.QtCore import QUrl\n\nfrom qutebrowser.utils import utils, urlutils, log, qtutils\nfrom qutebrowser.misc import debugcachestats\n\n\nhtml_fallback = \"\"\"\n<!DOCTYPE html>\n<html>\n <head>\n <meta charset=\"utf-8\">\n <title>Error while loading template</title>\n </head>\n <body>\n <p><span style=\"font-size:120%;color:red\">\n The %FILE% template could not be found!<br>\n Please check your qutebrowser installation\n </span><br>\n %ERROR%\n </p>\n </body>\n</html>\n\"\"\"\n\n\nclass Loader(jinja2.BaseLoader):\n\n \"\"\"Jinja loader which uses utils.read_file to load templates.\n\n Attributes:\n _subdir: The subdirectory to find templates in.\n \"\"\"\n\n def __init__(self, subdir: str) -> None:\n self._subdir = subdir\n\n def get_source(\n self,\n _env: jinja2.Environment,\n template: str\n ) -> Tuple[str, str, Callable[[], bool]]:\n path = os.path.join(self._subdir, template)\n try:\n source = utils.read_file(path)\n except OSError as e:\n source = html_fallback.replace(\"%ERROR%\", html.escape(str(e)))\n source = source.replace(\"%FILE%\", html.escape(template))\n log.misc.exception(\"The {} template could not be loaded from {}\"\n .format(template, path))\n # Currently we don't implement auto-reloading, so we always return True\n # for up-to-date.\n return source, path, lambda: True\n\n\nclass Environment(jinja2.Environment):\n\n \"\"\"Our own jinja environment which is more strict.\"\"\"\n\n def __init__(self) -> None:\n super().__init__(loader=Loader('html'),\n autoescape=lambda _name: self._autoescape,\n undefined=jinja2.StrictUndefined)\n self.globals['resource_url'] = self._resource_url\n self.globals['file_url'] = urlutils.file_url\n self.globals['data_url'] = self._data_url\n self.globals['qcolor_to_qsscolor'] = qtutils.qcolor_to_qsscolor\n self._autoescape = True\n\n @contextlib.contextmanager\n def no_autoescape(self) -> Iterator[None]:\n \"\"\"Context manager to temporarily turn off autoescaping.\"\"\"\n self._autoescape = False\n yield\n self._autoescape = True\n\n def _resource_url(self, path: str) -> str:\n \"\"\"Load qutebrowser resource files.\n\n Arguments:\n path: The relative path to the resource.\n \"\"\"\n assert not posixpath.isabs(path), path\n url = QUrl('qute://resource')\n url.setPath('/' + path)\n urlutils.ensure_valid(url)\n urlstr = url.toString(QUrl.FullyEncoded) # type: ignore[arg-type]\n return urlstr\n\n def _data_url(self, path: str) -> str:\n \"\"\"Get a data: url for the broken qutebrowser logo.\"\"\"\n data = utils.read_file_binary(path)\n mimetype = utils.guess_mimetype(path)\n return urlutils.data_url(mimetype, data).toString()\n\n def getattr(self, obj: Any, attribute: str) -> Any:\n \"\"\"Override jinja's getattr() to be less clever.\n\n This means it doesn't fall back to __getitem__, and it doesn't hide\n AttributeError.\n \"\"\"\n return getattr(obj, attribute)\n\n\ndef render(template: str, **kwargs: Any) -> str:\n \"\"\"Render the given template and pass the given arguments to it.\"\"\"\n return environment.get_template(template).render(**kwargs)\n\n\nenvironment = Environment()\njs_environment = jinja2.Environment(loader=Loader('javascript'))\n\n\[email protected]()\[email protected]_cache()\ndef template_config_variables(template: str) -> FrozenSet[str]:\n \"\"\"Return the config variables used in the template.\"\"\"\n unvisted_nodes = [environment.parse(template)]\n result: Set[str] = set()\n while unvisted_nodes:\n node = unvisted_nodes.pop()\n if not isinstance(node, jinja2.nodes.Getattr):\n unvisted_nodes.extend(node.iter_child_nodes())\n continue\n\n # List of attribute names in reverse order.\n # For example it's ['ab', 'c', 'd'] for 'conf.d.c.ab'.\n attrlist: List[str] = []\n while isinstance(node, jinja2.nodes.Getattr):\n attrlist.append(node.attr) # type: ignore[attr-defined]\n node = node.node # type: ignore[attr-defined]\n\n if isinstance(node, jinja2.nodes.Name):\n if node.name == 'conf': # type: ignore[attr-defined]\n result.add('.'.join(reversed(attrlist)))\n # otherwise, the node is a Name node so it doesn't have any\n # child nodes\n else:\n unvisted_nodes.append(node)\n\n from qutebrowser.config import config\n for option in result:\n config.instance.ensure_has_opt(option)\n\n return frozenset(result)\n", "path": "qutebrowser/utils/jinja.py"}]} | 2,267 | 206 |
gh_patches_debug_29868 | rasdani/github-patches | git_diff | piskvorky__gensim-1653 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Error while summarizing text
Hi,
I've received the following error when trying to summarize the body of this news article:
https://www.theguardian.com/media/2016/jun/19/sun-times-brexit-in-out-shake-it-all-about
The error follows:
File "/home/apps/comment_parser/venv/local/lib/python2.7/site-packages/gensim/summarization/summarizer.py", line 202, in summarize
most_important_docs = summarize_corpus(corpus, ratio=ratio if word_count is None else 1)
File "/home/apps/comment_parser/venv/local/lib/python2.7/site-packages/gensim/summarization/summarizer.py", line 161, in summarize_corpus
pagerank_scores = _pagerank(graph)
File "/home/apps/comment_parser/venv/local/lib/python2.7/site-packages/gensim/summarization/pagerank_weighted.py", line 24, in pagerank_weighted
vals, vecs = eigs(pagerank_matrix.T, k=1) # TODO raise an error if matrix has complex eigenvectors?
File "/usr/lib/python2.7/dist-packages/scipy/sparse/linalg/eigen/arpack/arpack.py", line 1271, in eigs
ncv, v0, maxiter, which, tol)
File "/usr/lib/python2.7/dist-packages/scipy/sparse/linalg/eigen/arpack/arpack.py", line 685, in __init__
raise ValueError("k must be less than ndim(A)-1, k=%d" % k)
ValueError: k must be less than ndim(A)-1, k=1
Regards,
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gensim/summarization/pagerank_weighted.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 #
4 # Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
5 from numpy import empty as empty_matrix
6 from scipy.sparse import csr_matrix
7 from scipy.sparse.linalg import eigs
8 from six.moves import xrange
9
10 try:
11 from numpy import VisibleDeprecationWarning
12 import warnings
13 warnings.filterwarnings("ignore", category=VisibleDeprecationWarning)
14 except ImportError:
15 pass
16
17
18 def pagerank_weighted(graph, damping=0.85):
19 adjacency_matrix = build_adjacency_matrix(graph)
20 probability_matrix = build_probability_matrix(graph)
21
22 pagerank_matrix = damping * adjacency_matrix.todense() + (1 - damping) * probability_matrix
23
24 vals, vecs = eigs(pagerank_matrix.T, k=1) # TODO raise an error if matrix has complex eigenvectors?
25
26 return process_results(graph, vecs.real)
27
28
29 def build_adjacency_matrix(graph):
30 row = []
31 col = []
32 data = []
33 nodes = graph.nodes()
34 length = len(nodes)
35
36 for i in xrange(length):
37 current_node = nodes[i]
38 neighbors_sum = sum(graph.edge_weight((current_node, neighbor)) for neighbor in graph.neighbors(current_node))
39 for j in xrange(length):
40 edge_weight = float(graph.edge_weight((current_node, nodes[j])))
41 if i != j and edge_weight != 0.0:
42 row.append(i)
43 col.append(j)
44 data.append(edge_weight / neighbors_sum)
45
46 return csr_matrix((data, (row, col)), shape=(length, length))
47
48
49 def build_probability_matrix(graph):
50 dimension = len(graph.nodes())
51 matrix = empty_matrix((dimension, dimension))
52
53 probability = 1.0 / float(dimension)
54 matrix.fill(probability)
55
56 return matrix
57
58
59 def process_results(graph, vecs):
60 scores = {}
61 for i, node in enumerate(graph.nodes()):
62 scores[node] = abs(vecs[i, :])
63
64 return scores
65
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/gensim/summarization/pagerank_weighted.py b/gensim/summarization/pagerank_weighted.py
--- a/gensim/summarization/pagerank_weighted.py
+++ b/gensim/summarization/pagerank_weighted.py
@@ -2,7 +2,9 @@
# -*- coding: utf-8 -*-
#
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
+import numpy
from numpy import empty as empty_matrix
+from scipy.linalg import eig
from scipy.sparse import csr_matrix
from scipy.sparse.linalg import eigs
from six.moves import xrange
@@ -21,9 +23,10 @@
pagerank_matrix = damping * adjacency_matrix.todense() + (1 - damping) * probability_matrix
- vals, vecs = eigs(pagerank_matrix.T, k=1) # TODO raise an error if matrix has complex eigenvectors?
+ vec = principal_eigenvector(pagerank_matrix.T)
- return process_results(graph, vecs.real)
+ # Because pagerank_matrix is positive, vec is always real (i.e. not complex)
+ return process_results(graph, vec.real)
def build_adjacency_matrix(graph):
@@ -56,9 +59,23 @@
return matrix
-def process_results(graph, vecs):
+def principal_eigenvector(a):
+ # Note that we prefer to use `eigs` even for dense matrix
+ # because we need only one eigenvector. See #441, #438 for discussion.
+
+ # But it doesn't work for dim A < 3, so we just handle this special case
+ if len(a) < 3:
+ vals, vecs = eig(a)
+ ind = numpy.abs(vals).argmax()
+ return vecs[:, ind]
+ else:
+ vals, vecs = eigs(a, k=1)
+ return vecs[:, 0]
+
+
+def process_results(graph, vec):
scores = {}
for i, node in enumerate(graph.nodes()):
- scores[node] = abs(vecs[i, :])
+ scores[node] = abs(vec[i])
return scores
| {"golden_diff": "diff --git a/gensim/summarization/pagerank_weighted.py b/gensim/summarization/pagerank_weighted.py\n--- a/gensim/summarization/pagerank_weighted.py\n+++ b/gensim/summarization/pagerank_weighted.py\n@@ -2,7 +2,9 @@\n # -*- coding: utf-8 -*-\n #\n # Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html\n+import numpy\n from numpy import empty as empty_matrix\n+from scipy.linalg import eig\n from scipy.sparse import csr_matrix\n from scipy.sparse.linalg import eigs\n from six.moves import xrange\n@@ -21,9 +23,10 @@\n \n pagerank_matrix = damping * adjacency_matrix.todense() + (1 - damping) * probability_matrix\n \n- vals, vecs = eigs(pagerank_matrix.T, k=1) # TODO raise an error if matrix has complex eigenvectors?\n+ vec = principal_eigenvector(pagerank_matrix.T)\n \n- return process_results(graph, vecs.real)\n+ # Because pagerank_matrix is positive, vec is always real (i.e. not complex)\n+ return process_results(graph, vec.real)\n \n \n def build_adjacency_matrix(graph):\n@@ -56,9 +59,23 @@\n return matrix\n \n \n-def process_results(graph, vecs):\n+def principal_eigenvector(a):\n+ # Note that we prefer to use `eigs` even for dense matrix\n+ # because we need only one eigenvector. See #441, #438 for discussion.\n+\n+ # But it doesn't work for dim A < 3, so we just handle this special case\n+ if len(a) < 3:\n+ vals, vecs = eig(a)\n+ ind = numpy.abs(vals).argmax()\n+ return vecs[:, ind]\n+ else:\n+ vals, vecs = eigs(a, k=1)\n+ return vecs[:, 0]\n+\n+\n+def process_results(graph, vec):\n scores = {}\n for i, node in enumerate(graph.nodes()):\n- scores[node] = abs(vecs[i, :])\n+ scores[node] = abs(vec[i])\n \n return scores\n", "issue": "Error while summarizing text\nHi,\n\nI've received the following error when trying to summarize the body of this news article:\n\nhttps://www.theguardian.com/media/2016/jun/19/sun-times-brexit-in-out-shake-it-all-about\n\nThe error follows:\n\n File \"/home/apps/comment_parser/venv/local/lib/python2.7/site-packages/gensim/summarization/summarizer.py\", line 202, in summarize\n most_important_docs = summarize_corpus(corpus, ratio=ratio if word_count is None else 1)\n File \"/home/apps/comment_parser/venv/local/lib/python2.7/site-packages/gensim/summarization/summarizer.py\", line 161, in summarize_corpus\n pagerank_scores = _pagerank(graph)\n File \"/home/apps/comment_parser/venv/local/lib/python2.7/site-packages/gensim/summarization/pagerank_weighted.py\", line 24, in pagerank_weighted\n vals, vecs = eigs(pagerank_matrix.T, k=1) # TODO raise an error if matrix has complex eigenvectors?\n File \"/usr/lib/python2.7/dist-packages/scipy/sparse/linalg/eigen/arpack/arpack.py\", line 1271, in eigs\n ncv, v0, maxiter, which, tol)\n File \"/usr/lib/python2.7/dist-packages/scipy/sparse/linalg/eigen/arpack/arpack.py\", line 685, in __init__\n raise ValueError(\"k must be less than ndim(A)-1, k=%d\" % k)\nValueError: k must be less than ndim(A)-1, k=1\n\nRegards,\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html\nfrom numpy import empty as empty_matrix\nfrom scipy.sparse import csr_matrix\nfrom scipy.sparse.linalg import eigs\nfrom six.moves import xrange\n\ntry:\n from numpy import VisibleDeprecationWarning\n import warnings\n warnings.filterwarnings(\"ignore\", category=VisibleDeprecationWarning)\nexcept ImportError:\n pass\n\n\ndef pagerank_weighted(graph, damping=0.85):\n adjacency_matrix = build_adjacency_matrix(graph)\n probability_matrix = build_probability_matrix(graph)\n\n pagerank_matrix = damping * adjacency_matrix.todense() + (1 - damping) * probability_matrix\n\n vals, vecs = eigs(pagerank_matrix.T, k=1) # TODO raise an error if matrix has complex eigenvectors?\n\n return process_results(graph, vecs.real)\n\n\ndef build_adjacency_matrix(graph):\n row = []\n col = []\n data = []\n nodes = graph.nodes()\n length = len(nodes)\n\n for i in xrange(length):\n current_node = nodes[i]\n neighbors_sum = sum(graph.edge_weight((current_node, neighbor)) for neighbor in graph.neighbors(current_node))\n for j in xrange(length):\n edge_weight = float(graph.edge_weight((current_node, nodes[j])))\n if i != j and edge_weight != 0.0:\n row.append(i)\n col.append(j)\n data.append(edge_weight / neighbors_sum)\n\n return csr_matrix((data, (row, col)), shape=(length, length))\n\n\ndef build_probability_matrix(graph):\n dimension = len(graph.nodes())\n matrix = empty_matrix((dimension, dimension))\n\n probability = 1.0 / float(dimension)\n matrix.fill(probability)\n\n return matrix\n\n\ndef process_results(graph, vecs):\n scores = {}\n for i, node in enumerate(graph.nodes()):\n scores[node] = abs(vecs[i, :])\n\n return scores\n", "path": "gensim/summarization/pagerank_weighted.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html\nimport numpy\nfrom numpy import empty as empty_matrix\nfrom scipy.linalg import eig\nfrom scipy.sparse import csr_matrix\nfrom scipy.sparse.linalg import eigs\nfrom six.moves import xrange\n\ntry:\n from numpy import VisibleDeprecationWarning\n import warnings\n warnings.filterwarnings(\"ignore\", category=VisibleDeprecationWarning)\nexcept ImportError:\n pass\n\n\ndef pagerank_weighted(graph, damping=0.85):\n adjacency_matrix = build_adjacency_matrix(graph)\n probability_matrix = build_probability_matrix(graph)\n\n pagerank_matrix = damping * adjacency_matrix.todense() + (1 - damping) * probability_matrix\n\n vec = principal_eigenvector(pagerank_matrix.T)\n\n # Because pagerank_matrix is positive, vec is always real (i.e. not complex)\n return process_results(graph, vec.real)\n\n\ndef build_adjacency_matrix(graph):\n row = []\n col = []\n data = []\n nodes = graph.nodes()\n length = len(nodes)\n\n for i in xrange(length):\n current_node = nodes[i]\n neighbors_sum = sum(graph.edge_weight((current_node, neighbor)) for neighbor in graph.neighbors(current_node))\n for j in xrange(length):\n edge_weight = float(graph.edge_weight((current_node, nodes[j])))\n if i != j and edge_weight != 0.0:\n row.append(i)\n col.append(j)\n data.append(edge_weight / neighbors_sum)\n\n return csr_matrix((data, (row, col)), shape=(length, length))\n\n\ndef build_probability_matrix(graph):\n dimension = len(graph.nodes())\n matrix = empty_matrix((dimension, dimension))\n\n probability = 1.0 / float(dimension)\n matrix.fill(probability)\n\n return matrix\n\n\ndef principal_eigenvector(a):\n # Note that we prefer to use `eigs` even for dense matrix\n # because we need only one eigenvector. See #441, #438 for discussion.\n\n # But it doesn't work for dim A < 3, so we just handle this special case\n if len(a) < 3:\n vals, vecs = eig(a)\n ind = numpy.abs(vals).argmax()\n return vecs[:, ind]\n else:\n vals, vecs = eigs(a, k=1)\n return vecs[:, 0]\n\n\ndef process_results(graph, vec):\n scores = {}\n for i, node in enumerate(graph.nodes()):\n scores[node] = abs(vec[i])\n\n return scores\n", "path": "gensim/summarization/pagerank_weighted.py"}]} | 1,203 | 500 |
gh_patches_debug_41193 | rasdani/github-patches | git_diff | microsoft__botbuilder-python-279 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
NumberPrompt Locale Not Fully Implemented
## Version
v4.5
## Describe the bug
Found this bug while investigating for parity with regards to this [`NumberPrompt` bug filed in the dotnet repo](https://github.com/microsoft/botbuilder-dotnet/issues/2288)
* in constructor, `default_locale` attribute is set, but never used in `NumberPrompt`'s implementation (not in `on_prompt()` nor `on_recognize()`
* `on_recognize()` does allow you to pass in `locale` via `Activity`, however locale will not be used if only `default_locale` is specified
* "`English`" is used as string to specify locale, when we should be using the constants provided by the python recognizers-text repo
* Separately, there's definitely a lack of unit test coverage for this feature (only 2 tests written)
## Expected behavior
* implement use of `default_locale`
* implement use of recognizers-text constants to specify locale
[bug]
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `libraries/botbuilder-dialogs/botbuilder/dialogs/prompts/number_prompt.py`
Content:
```
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License.
3
4 from typing import Dict
5 from recognizers_number import recognize_number
6 from botbuilder.core.turn_context import TurnContext
7 from botbuilder.schema import ActivityTypes
8 from .prompt import Prompt
9 from .prompt_options import PromptOptions
10 from .prompt_recognizer_result import PromptRecognizerResult
11
12
13 class NumberPrompt(Prompt):
14 # TODO: PromptValidator
15 def __init__(self, dialog_id: str, validator: object, default_locale: str):
16 super(NumberPrompt, self).__init__(dialog_id, validator)
17 self.default_locale = default_locale
18
19 async def on_prompt(
20 self,
21 turn_context: TurnContext,
22 state: Dict[str, object],
23 options: PromptOptions,
24 is_retry: bool,
25 ):
26 if not turn_context:
27 raise TypeError("NumberPrompt.on_prompt(): turn_context cannot be None.")
28 if not options:
29 raise TypeError("NumberPrompt.on_prompt(): options cannot be None.")
30
31 if is_retry and options.retry_prompt is not None:
32 turn_context.send_activity(options.retry_prompt)
33 else:
34 if options.prompt is not None:
35 await turn_context.send_activity(options.prompt)
36
37 async def on_recognize(
38 self,
39 turn_context: TurnContext,
40 state: Dict[str, object],
41 options: PromptOptions,
42 ) -> PromptRecognizerResult:
43 if not turn_context:
44 raise TypeError("NumberPrompt.on_recognize(): turn_context cannot be None.")
45
46 result = PromptRecognizerResult()
47 if turn_context.activity.type == ActivityTypes.message:
48 message = turn_context.activity
49
50 # TODO: Fix constant English with correct constant from text recognizer
51 culture = (
52 turn_context.activity.locale
53 if turn_context.activity.locale is not None
54 else "English"
55 )
56
57 results = recognize_number(message.text, culture)
58 if results:
59 result.succeeded = True
60 result.value = results[0].resolution["value"]
61
62 return result
63
```
Path: `libraries/botbuilder-dialogs/setup.py`
Content:
```
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License.
3
4 import os
5 from setuptools import setup
6
7 REQUIRES = [
8 "recognizers-text-date-time>=1.0.1a0",
9 "recognizers-text-number-with-unit>=1.0.1a0",
10 "recognizers-text-number>=1.0.1a0",
11 "recognizers-text>=1.0.1a0",
12 "recognizers-text-choice>=1.0.1a0",
13 "grapheme>=0.5.0",
14 "emoji>=0.5.2",
15 "botbuilder-schema>=4.4.0b1",
16 "botframework-connector>=4.4.0b1",
17 "botbuilder-core>=4.4.0b1",
18 ]
19
20 TEST_REQUIRES = ["aiounittest>=1.1.0"]
21
22 root = os.path.abspath(os.path.dirname(__file__))
23
24 with open(os.path.join(root, "botbuilder", "dialogs", "about.py")) as f:
25 package_info = {}
26 info = f.read()
27 exec(info, package_info)
28
29 with open(os.path.join(root, "README.rst"), encoding="utf-8") as f:
30 long_description = f.read()
31
32 setup(
33 name=package_info["__title__"],
34 version=package_info["__version__"],
35 url=package_info["__uri__"],
36 author=package_info["__author__"],
37 description=package_info["__description__"],
38 keywords=["BotBuilderDialogs", "bots", "ai", "botframework", "botbuilder"],
39 long_description=long_description,
40 long_description_content_type="text/x-rst",
41 license=package_info["__license__"],
42 packages=[
43 "botbuilder.dialogs",
44 "botbuilder.dialogs.prompts",
45 "botbuilder.dialogs.choices",
46 ],
47 install_requires=REQUIRES + TEST_REQUIRES,
48 tests_require=TEST_REQUIRES,
49 include_package_data=True,
50 classifiers=[
51 "Programming Language :: Python :: 3.7",
52 "Intended Audience :: Developers",
53 "License :: OSI Approved :: MIT License",
54 "Operating System :: OS Independent",
55 "Development Status :: 3 - Alpha",
56 "Topic :: Scientific/Engineering :: Artificial Intelligence",
57 ],
58 )
59
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/libraries/botbuilder-dialogs/botbuilder/dialogs/prompts/number_prompt.py b/libraries/botbuilder-dialogs/botbuilder/dialogs/prompts/number_prompt.py
--- a/libraries/botbuilder-dialogs/botbuilder/dialogs/prompts/number_prompt.py
+++ b/libraries/botbuilder-dialogs/botbuilder/dialogs/prompts/number_prompt.py
@@ -1,18 +1,29 @@
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
-from typing import Dict
+from typing import Callable, Dict
+
+from babel.numbers import parse_decimal
from recognizers_number import recognize_number
+from recognizers_text import Culture, ModelResult
+
from botbuilder.core.turn_context import TurnContext
from botbuilder.schema import ActivityTypes
-from .prompt import Prompt
+
+from .prompt import Prompt, PromptValidatorContext
from .prompt_options import PromptOptions
from .prompt_recognizer_result import PromptRecognizerResult
class NumberPrompt(Prompt):
- # TODO: PromptValidator
- def __init__(self, dialog_id: str, validator: object, default_locale: str):
+ # TODO: PromptValidator needs to be fixed
+ # Does not accept answer as intended (times out)
+ def __init__(
+ self,
+ dialog_id: str,
+ validator: Callable[[PromptValidatorContext], bool] = None,
+ default_locale: str = None,
+ ):
super(NumberPrompt, self).__init__(dialog_id, validator)
self.default_locale = default_locale
@@ -30,9 +41,8 @@
if is_retry and options.retry_prompt is not None:
turn_context.send_activity(options.retry_prompt)
- else:
- if options.prompt is not None:
- await turn_context.send_activity(options.prompt)
+ elif options.prompt is not None:
+ await turn_context.send_activity(options.prompt)
async def on_recognize(
self,
@@ -46,17 +56,25 @@
result = PromptRecognizerResult()
if turn_context.activity.type == ActivityTypes.message:
message = turn_context.activity
+ culture = self._get_culture(turn_context)
+ results: [ModelResult] = recognize_number(message.text, culture)
- # TODO: Fix constant English with correct constant from text recognizer
- culture = (
- turn_context.activity.locale
- if turn_context.activity.locale is not None
- else "English"
- )
-
- results = recognize_number(message.text, culture)
if results:
result.succeeded = True
- result.value = results[0].resolution["value"]
+ result.value = parse_decimal(
+ results[0].resolution["value"], locale=culture.replace("-", "_")
+ )
return result
+
+ def _get_culture(self, turn_context: TurnContext):
+ culture = (
+ turn_context.activity.locale
+ if turn_context.activity.locale
+ else self.default_locale
+ )
+
+ if not culture:
+ culture = Culture.English
+
+ return culture
diff --git a/libraries/botbuilder-dialogs/setup.py b/libraries/botbuilder-dialogs/setup.py
--- a/libraries/botbuilder-dialogs/setup.py
+++ b/libraries/botbuilder-dialogs/setup.py
@@ -12,6 +12,7 @@
"recognizers-text-choice>=1.0.1a0",
"grapheme>=0.5.0",
"emoji>=0.5.2",
+ "babel>=2.7.0",
"botbuilder-schema>=4.4.0b1",
"botframework-connector>=4.4.0b1",
"botbuilder-core>=4.4.0b1",
| {"golden_diff": "diff --git a/libraries/botbuilder-dialogs/botbuilder/dialogs/prompts/number_prompt.py b/libraries/botbuilder-dialogs/botbuilder/dialogs/prompts/number_prompt.py\n--- a/libraries/botbuilder-dialogs/botbuilder/dialogs/prompts/number_prompt.py\n+++ b/libraries/botbuilder-dialogs/botbuilder/dialogs/prompts/number_prompt.py\n@@ -1,18 +1,29 @@\n # Copyright (c) Microsoft Corporation. All rights reserved.\n # Licensed under the MIT License.\n \n-from typing import Dict\n+from typing import Callable, Dict\n+\n+from babel.numbers import parse_decimal\n from recognizers_number import recognize_number\n+from recognizers_text import Culture, ModelResult\n+\n from botbuilder.core.turn_context import TurnContext\n from botbuilder.schema import ActivityTypes\n-from .prompt import Prompt\n+\n+from .prompt import Prompt, PromptValidatorContext\n from .prompt_options import PromptOptions\n from .prompt_recognizer_result import PromptRecognizerResult\n \n \n class NumberPrompt(Prompt):\n- # TODO: PromptValidator\n- def __init__(self, dialog_id: str, validator: object, default_locale: str):\n+ # TODO: PromptValidator needs to be fixed\n+ # Does not accept answer as intended (times out)\n+ def __init__(\n+ self,\n+ dialog_id: str,\n+ validator: Callable[[PromptValidatorContext], bool] = None,\n+ default_locale: str = None,\n+ ):\n super(NumberPrompt, self).__init__(dialog_id, validator)\n self.default_locale = default_locale\n \n@@ -30,9 +41,8 @@\n \n if is_retry and options.retry_prompt is not None:\n turn_context.send_activity(options.retry_prompt)\n- else:\n- if options.prompt is not None:\n- await turn_context.send_activity(options.prompt)\n+ elif options.prompt is not None:\n+ await turn_context.send_activity(options.prompt)\n \n async def on_recognize(\n self,\n@@ -46,17 +56,25 @@\n result = PromptRecognizerResult()\n if turn_context.activity.type == ActivityTypes.message:\n message = turn_context.activity\n+ culture = self._get_culture(turn_context)\n+ results: [ModelResult] = recognize_number(message.text, culture)\n \n- # TODO: Fix constant English with correct constant from text recognizer\n- culture = (\n- turn_context.activity.locale\n- if turn_context.activity.locale is not None\n- else \"English\"\n- )\n-\n- results = recognize_number(message.text, culture)\n if results:\n result.succeeded = True\n- result.value = results[0].resolution[\"value\"]\n+ result.value = parse_decimal(\n+ results[0].resolution[\"value\"], locale=culture.replace(\"-\", \"_\")\n+ )\n \n return result\n+\n+ def _get_culture(self, turn_context: TurnContext):\n+ culture = (\n+ turn_context.activity.locale\n+ if turn_context.activity.locale\n+ else self.default_locale\n+ )\n+\n+ if not culture:\n+ culture = Culture.English\n+\n+ return culture\ndiff --git a/libraries/botbuilder-dialogs/setup.py b/libraries/botbuilder-dialogs/setup.py\n--- a/libraries/botbuilder-dialogs/setup.py\n+++ b/libraries/botbuilder-dialogs/setup.py\n@@ -12,6 +12,7 @@\n \"recognizers-text-choice>=1.0.1a0\",\n \"grapheme>=0.5.0\",\n \"emoji>=0.5.2\",\n+ \"babel>=2.7.0\",\n \"botbuilder-schema>=4.4.0b1\",\n \"botframework-connector>=4.4.0b1\",\n \"botbuilder-core>=4.4.0b1\",\n", "issue": "NumberPrompt Locale Not Fully Implemented\n## Version\r\nv4.5\r\n\r\n## Describe the bug\r\nFound this bug while investigating for parity with regards to this [`NumberPrompt` bug filed in the dotnet repo](https://github.com/microsoft/botbuilder-dotnet/issues/2288)\r\n* in constructor, `default_locale` attribute is set, but never used in `NumberPrompt`'s implementation (not in `on_prompt()` nor `on_recognize()`\r\n* `on_recognize()` does allow you to pass in `locale` via `Activity`, however locale will not be used if only `default_locale` is specified\r\n* \"`English`\" is used as string to specify locale, when we should be using the constants provided by the python recognizers-text repo\r\n\r\n* Separately, there's definitely a lack of unit test coverage for this feature (only 2 tests written)\r\n\r\n## Expected behavior\r\n* implement use of `default_locale`\r\n* implement use of recognizers-text constants to specify locale\r\n\r\n\r\n\r\n[bug]\r\n\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nfrom typing import Dict\nfrom recognizers_number import recognize_number\nfrom botbuilder.core.turn_context import TurnContext\nfrom botbuilder.schema import ActivityTypes\nfrom .prompt import Prompt\nfrom .prompt_options import PromptOptions\nfrom .prompt_recognizer_result import PromptRecognizerResult\n\n\nclass NumberPrompt(Prompt):\n # TODO: PromptValidator\n def __init__(self, dialog_id: str, validator: object, default_locale: str):\n super(NumberPrompt, self).__init__(dialog_id, validator)\n self.default_locale = default_locale\n\n async def on_prompt(\n self,\n turn_context: TurnContext,\n state: Dict[str, object],\n options: PromptOptions,\n is_retry: bool,\n ):\n if not turn_context:\n raise TypeError(\"NumberPrompt.on_prompt(): turn_context cannot be None.\")\n if not options:\n raise TypeError(\"NumberPrompt.on_prompt(): options cannot be None.\")\n\n if is_retry and options.retry_prompt is not None:\n turn_context.send_activity(options.retry_prompt)\n else:\n if options.prompt is not None:\n await turn_context.send_activity(options.prompt)\n\n async def on_recognize(\n self,\n turn_context: TurnContext,\n state: Dict[str, object],\n options: PromptOptions,\n ) -> PromptRecognizerResult:\n if not turn_context:\n raise TypeError(\"NumberPrompt.on_recognize(): turn_context cannot be None.\")\n\n result = PromptRecognizerResult()\n if turn_context.activity.type == ActivityTypes.message:\n message = turn_context.activity\n\n # TODO: Fix constant English with correct constant from text recognizer\n culture = (\n turn_context.activity.locale\n if turn_context.activity.locale is not None\n else \"English\"\n )\n\n results = recognize_number(message.text, culture)\n if results:\n result.succeeded = True\n result.value = results[0].resolution[\"value\"]\n\n return result\n", "path": "libraries/botbuilder-dialogs/botbuilder/dialogs/prompts/number_prompt.py"}, {"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nimport os\nfrom setuptools import setup\n\nREQUIRES = [\n \"recognizers-text-date-time>=1.0.1a0\",\n \"recognizers-text-number-with-unit>=1.0.1a0\",\n \"recognizers-text-number>=1.0.1a0\",\n \"recognizers-text>=1.0.1a0\",\n \"recognizers-text-choice>=1.0.1a0\",\n \"grapheme>=0.5.0\",\n \"emoji>=0.5.2\",\n \"botbuilder-schema>=4.4.0b1\",\n \"botframework-connector>=4.4.0b1\",\n \"botbuilder-core>=4.4.0b1\",\n]\n\nTEST_REQUIRES = [\"aiounittest>=1.1.0\"]\n\nroot = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(root, \"botbuilder\", \"dialogs\", \"about.py\")) as f:\n package_info = {}\n info = f.read()\n exec(info, package_info)\n\nwith open(os.path.join(root, \"README.rst\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nsetup(\n name=package_info[\"__title__\"],\n version=package_info[\"__version__\"],\n url=package_info[\"__uri__\"],\n author=package_info[\"__author__\"],\n description=package_info[\"__description__\"],\n keywords=[\"BotBuilderDialogs\", \"bots\", \"ai\", \"botframework\", \"botbuilder\"],\n long_description=long_description,\n long_description_content_type=\"text/x-rst\",\n license=package_info[\"__license__\"],\n packages=[\n \"botbuilder.dialogs\",\n \"botbuilder.dialogs.prompts\",\n \"botbuilder.dialogs.choices\",\n ],\n install_requires=REQUIRES + TEST_REQUIRES,\n tests_require=TEST_REQUIRES,\n include_package_data=True,\n classifiers=[\n \"Programming Language :: Python :: 3.7\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Development Status :: 3 - Alpha\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n)\n", "path": "libraries/botbuilder-dialogs/setup.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nfrom typing import Callable, Dict\n\nfrom babel.numbers import parse_decimal\nfrom recognizers_number import recognize_number\nfrom recognizers_text import Culture, ModelResult\n\nfrom botbuilder.core.turn_context import TurnContext\nfrom botbuilder.schema import ActivityTypes\n\nfrom .prompt import Prompt, PromptValidatorContext\nfrom .prompt_options import PromptOptions\nfrom .prompt_recognizer_result import PromptRecognizerResult\n\n\nclass NumberPrompt(Prompt):\n # TODO: PromptValidator needs to be fixed\n # Does not accept answer as intended (times out)\n def __init__(\n self,\n dialog_id: str,\n validator: Callable[[PromptValidatorContext], bool] = None,\n default_locale: str = None,\n ):\n super(NumberPrompt, self).__init__(dialog_id, validator)\n self.default_locale = default_locale\n\n async def on_prompt(\n self,\n turn_context: TurnContext,\n state: Dict[str, object],\n options: PromptOptions,\n is_retry: bool,\n ):\n if not turn_context:\n raise TypeError(\"NumberPrompt.on_prompt(): turn_context cannot be None.\")\n if not options:\n raise TypeError(\"NumberPrompt.on_prompt(): options cannot be None.\")\n\n if is_retry and options.retry_prompt is not None:\n turn_context.send_activity(options.retry_prompt)\n elif options.prompt is not None:\n await turn_context.send_activity(options.prompt)\n\n async def on_recognize(\n self,\n turn_context: TurnContext,\n state: Dict[str, object],\n options: PromptOptions,\n ) -> PromptRecognizerResult:\n if not turn_context:\n raise TypeError(\"NumberPrompt.on_recognize(): turn_context cannot be None.\")\n\n result = PromptRecognizerResult()\n if turn_context.activity.type == ActivityTypes.message:\n message = turn_context.activity\n culture = self._get_culture(turn_context)\n results: [ModelResult] = recognize_number(message.text, culture)\n\n if results:\n result.succeeded = True\n result.value = parse_decimal(\n results[0].resolution[\"value\"], locale=culture.replace(\"-\", \"_\")\n )\n\n return result\n\n def _get_culture(self, turn_context: TurnContext):\n culture = (\n turn_context.activity.locale\n if turn_context.activity.locale\n else self.default_locale\n )\n\n if not culture:\n culture = Culture.English\n\n return culture\n", "path": "libraries/botbuilder-dialogs/botbuilder/dialogs/prompts/number_prompt.py"}, {"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nimport os\nfrom setuptools import setup\n\nREQUIRES = [\n \"recognizers-text-date-time>=1.0.1a0\",\n \"recognizers-text-number-with-unit>=1.0.1a0\",\n \"recognizers-text-number>=1.0.1a0\",\n \"recognizers-text>=1.0.1a0\",\n \"recognizers-text-choice>=1.0.1a0\",\n \"grapheme>=0.5.0\",\n \"emoji>=0.5.2\",\n \"babel>=2.7.0\",\n \"botbuilder-schema>=4.4.0b1\",\n \"botframework-connector>=4.4.0b1\",\n \"botbuilder-core>=4.4.0b1\",\n]\n\nTEST_REQUIRES = [\"aiounittest>=1.1.0\"]\n\nroot = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(root, \"botbuilder\", \"dialogs\", \"about.py\")) as f:\n package_info = {}\n info = f.read()\n exec(info, package_info)\n\nwith open(os.path.join(root, \"README.rst\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nsetup(\n name=package_info[\"__title__\"],\n version=package_info[\"__version__\"],\n url=package_info[\"__uri__\"],\n author=package_info[\"__author__\"],\n description=package_info[\"__description__\"],\n keywords=[\"BotBuilderDialogs\", \"bots\", \"ai\", \"botframework\", \"botbuilder\"],\n long_description=long_description,\n long_description_content_type=\"text/x-rst\",\n license=package_info[\"__license__\"],\n packages=[\n \"botbuilder.dialogs\",\n \"botbuilder.dialogs.prompts\",\n \"botbuilder.dialogs.choices\",\n ],\n install_requires=REQUIRES + TEST_REQUIRES,\n tests_require=TEST_REQUIRES,\n include_package_data=True,\n classifiers=[\n \"Programming Language :: Python :: 3.7\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Development Status :: 3 - Alpha\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n)\n", "path": "libraries/botbuilder-dialogs/setup.py"}]} | 1,651 | 832 |
gh_patches_debug_39763 | rasdani/github-patches | git_diff | frappe__frappe-11643 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Feature Request] Clean failed jobs from queue
Currently, there is no feature that allows to clean failed jobs from the job queue. Therefore, failed jobs will accumulate. It should be possible to clear failed jobs.
Discussion reference: https://discuss.erpnext.com/t/cleaning-failed-background-jobs/37886

Observed in Frappe 10.1.x
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `frappe/core/page/background_jobs/background_jobs.py`
Content:
```
1 # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
2 # MIT License. See license.txt
3
4 from __future__ import unicode_literals
5 import frappe
6
7 from rq import Queue, Worker
8 from frappe.utils.background_jobs import get_redis_conn
9 from frappe.utils import format_datetime, cint, convert_utc_to_user_timezone
10 from frappe.utils.scheduler import is_scheduler_inactive
11 from frappe import _
12
13 colors = {
14 'queued': 'orange',
15 'failed': 'red',
16 'started': 'blue',
17 'finished': 'green'
18 }
19
20 @frappe.whitelist()
21 def get_info(show_failed=False):
22 conn = get_redis_conn()
23 queues = Queue.all(conn)
24 workers = Worker.all(conn)
25 jobs = []
26
27 def add_job(j, name):
28 if j.kwargs.get('site')==frappe.local.site:
29 jobs.append({
30 'job_name': j.kwargs.get('kwargs', {}).get('playbook_method') \
31 or j.kwargs.get('kwargs', {}).get('job_type') \
32 or str(j.kwargs.get('job_name')),
33 'status': j.get_status(), 'queue': name,
34 'creation': format_datetime(convert_utc_to_user_timezone(j.created_at)),
35 'color': colors[j.get_status()]
36 })
37 if j.exc_info:
38 jobs[-1]['exc_info'] = j.exc_info
39
40 for w in workers:
41 j = w.get_current_job()
42 if j:
43 add_job(j, w.name)
44
45 for q in queues:
46 if q.name != 'failed':
47 for j in q.get_jobs(): add_job(j, q.name)
48
49 if cint(show_failed):
50 for q in queues:
51 if q.name == 'failed':
52 for j in q.get_jobs()[:10]: add_job(j, q.name)
53
54 return jobs
55
56 @frappe.whitelist()
57 def get_scheduler_status():
58 if is_scheduler_inactive():
59 return [_("Inactive"), "red"]
60 return [_("Active"), "green"]
61
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/frappe/core/page/background_jobs/background_jobs.py b/frappe/core/page/background_jobs/background_jobs.py
--- a/frappe/core/page/background_jobs/background_jobs.py
+++ b/frappe/core/page/background_jobs/background_jobs.py
@@ -1,58 +1,88 @@
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
-from __future__ import unicode_literals
-import frappe
+import json
+from typing import TYPE_CHECKING, Dict, List
from rq import Queue, Worker
+
+import frappe
+from frappe import _
+from frappe.utils import convert_utc_to_user_timezone, format_datetime
from frappe.utils.background_jobs import get_redis_conn
-from frappe.utils import format_datetime, cint, convert_utc_to_user_timezone
from frappe.utils.scheduler import is_scheduler_inactive
-from frappe import _
-colors = {
+if TYPE_CHECKING:
+ from rq.job import Job
+
+JOB_COLORS = {
'queued': 'orange',
'failed': 'red',
'started': 'blue',
'finished': 'green'
}
+
@frappe.whitelist()
-def get_info(show_failed=False):
+def get_info(show_failed=False) -> List[Dict]:
+ if isinstance(show_failed, str):
+ show_failed = json.loads(show_failed)
+
conn = get_redis_conn()
queues = Queue.all(conn)
workers = Worker.all(conn)
jobs = []
- def add_job(j, name):
- if j.kwargs.get('site')==frappe.local.site:
- jobs.append({
- 'job_name': j.kwargs.get('kwargs', {}).get('playbook_method') \
- or j.kwargs.get('kwargs', {}).get('job_type') \
- or str(j.kwargs.get('job_name')),
- 'status': j.get_status(), 'queue': name,
- 'creation': format_datetime(convert_utc_to_user_timezone(j.created_at)),
- 'color': colors[j.get_status()]
- })
- if j.exc_info:
- jobs[-1]['exc_info'] = j.exc_info
-
- for w in workers:
- j = w.get_current_job()
- if j:
- add_job(j, w.name)
-
- for q in queues:
- if q.name != 'failed':
- for j in q.get_jobs(): add_job(j, q.name)
-
- if cint(show_failed):
- for q in queues:
- if q.name == 'failed':
- for j in q.get_jobs()[:10]: add_job(j, q.name)
+ def add_job(job: 'Job', name: str) -> None:
+ if job.kwargs.get('site') == frappe.local.site:
+ job_info = {
+ 'job_name': job.kwargs.get('kwargs', {}).get('playbook_method')
+ or job.kwargs.get('kwargs', {}).get('job_type')
+ or str(job.kwargs.get('job_name')),
+ 'status': job.get_status(),
+ 'queue': name,
+ 'creation': format_datetime(convert_utc_to_user_timezone(job.created_at)),
+ 'color': JOB_COLORS[job.get_status()]
+ }
+
+ if job.exc_info:
+ job_info['exc_info'] = job.exc_info
+
+ jobs.append(job_info)
+
+ # show worker jobs
+ for worker in workers:
+ job = worker.get_current_job()
+ if job:
+ add_job(job, worker.name)
+
+ for queue in queues:
+ # show active queued jobs
+ if queue.name != 'failed':
+ for job in queue.jobs:
+ add_job(job, queue.name)
+
+ # show failed jobs, if requested
+ if show_failed:
+ fail_registry = queue.failed_job_registry
+ for job_id in fail_registry.get_job_ids():
+ job = queue.fetch_job(job_id)
+ add_job(job, queue.name)
return jobs
+
[email protected]()
+def remove_failed_jobs():
+ conn = get_redis_conn()
+ queues = Queue.all(conn)
+ for queue in queues:
+ fail_registry = queue.failed_job_registry
+ for job_id in fail_registry.get_job_ids():
+ job = queue.fetch_job(job_id)
+ fail_registry.remove(job, delete_job=True)
+
+
@frappe.whitelist()
def get_scheduler_status():
if is_scheduler_inactive():
| {"golden_diff": "diff --git a/frappe/core/page/background_jobs/background_jobs.py b/frappe/core/page/background_jobs/background_jobs.py\n--- a/frappe/core/page/background_jobs/background_jobs.py\n+++ b/frappe/core/page/background_jobs/background_jobs.py\n@@ -1,58 +1,88 @@\n # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors\n # MIT License. See license.txt\n \n-from __future__ import unicode_literals\n-import frappe\n+import json\n+from typing import TYPE_CHECKING, Dict, List\n \n from rq import Queue, Worker\n+\n+import frappe\n+from frappe import _\n+from frappe.utils import convert_utc_to_user_timezone, format_datetime\n from frappe.utils.background_jobs import get_redis_conn\n-from frappe.utils import format_datetime, cint, convert_utc_to_user_timezone\n from frappe.utils.scheduler import is_scheduler_inactive\n-from frappe import _\n \n-colors = {\n+if TYPE_CHECKING:\n+\tfrom rq.job import Job\n+\n+JOB_COLORS = {\n \t'queued': 'orange',\n \t'failed': 'red',\n \t'started': 'blue',\n \t'finished': 'green'\n }\n \n+\n @frappe.whitelist()\n-def get_info(show_failed=False):\n+def get_info(show_failed=False) -> List[Dict]:\n+\tif isinstance(show_failed, str):\n+\t\tshow_failed = json.loads(show_failed)\n+\n \tconn = get_redis_conn()\n \tqueues = Queue.all(conn)\n \tworkers = Worker.all(conn)\n \tjobs = []\n \n-\tdef add_job(j, name):\n-\t\tif j.kwargs.get('site')==frappe.local.site:\n-\t\t\tjobs.append({\n-\t\t\t\t'job_name': j.kwargs.get('kwargs', {}).get('playbook_method') \\\n-\t\t\t\t\tor j.kwargs.get('kwargs', {}).get('job_type') \\\n-\t\t\t\t\tor str(j.kwargs.get('job_name')),\n-\t\t\t\t'status': j.get_status(), 'queue': name,\n-\t\t\t\t'creation': format_datetime(convert_utc_to_user_timezone(j.created_at)),\n-\t\t\t\t'color': colors[j.get_status()]\n-\t\t\t})\n-\t\t\tif j.exc_info:\n-\t\t\t\tjobs[-1]['exc_info'] = j.exc_info\n-\n-\tfor w in workers:\n-\t\tj = w.get_current_job()\n-\t\tif j:\n-\t\t\tadd_job(j, w.name)\n-\n-\tfor q in queues:\n-\t\tif q.name != 'failed':\n-\t\t\tfor j in q.get_jobs(): add_job(j, q.name)\n-\n-\tif cint(show_failed):\n-\t\tfor q in queues:\n-\t\t\tif q.name == 'failed':\n-\t\t\t\tfor j in q.get_jobs()[:10]: add_job(j, q.name)\n+\tdef add_job(job: 'Job', name: str) -> None:\n+\t\tif job.kwargs.get('site') == frappe.local.site:\n+\t\t\tjob_info = {\n+\t\t\t\t'job_name': job.kwargs.get('kwargs', {}).get('playbook_method')\n+\t\t\t\t\tor job.kwargs.get('kwargs', {}).get('job_type')\n+\t\t\t\t\tor str(job.kwargs.get('job_name')),\n+\t\t\t\t'status': job.get_status(),\n+\t\t\t\t'queue': name,\n+\t\t\t\t'creation': format_datetime(convert_utc_to_user_timezone(job.created_at)),\n+\t\t\t\t'color': JOB_COLORS[job.get_status()]\n+\t\t\t}\n+\n+\t\t\tif job.exc_info:\n+\t\t\t\tjob_info['exc_info'] = job.exc_info\n+\n+\t\t\tjobs.append(job_info)\n+\n+\t# show worker jobs\n+\tfor worker in workers:\n+\t\tjob = worker.get_current_job()\n+\t\tif job:\n+\t\t\tadd_job(job, worker.name)\n+\n+\tfor queue in queues:\n+\t\t# show active queued jobs\n+\t\tif queue.name != 'failed':\n+\t\t\tfor job in queue.jobs:\n+\t\t\t\tadd_job(job, queue.name)\n+\n+\t\t# show failed jobs, if requested\n+\t\tif show_failed:\n+\t\t\tfail_registry = queue.failed_job_registry\n+\t\t\tfor job_id in fail_registry.get_job_ids():\n+\t\t\t\tjob = queue.fetch_job(job_id)\n+\t\t\t\tadd_job(job, queue.name)\n \n \treturn jobs\n \n+\[email protected]()\n+def remove_failed_jobs():\n+\tconn = get_redis_conn()\n+\tqueues = Queue.all(conn)\n+\tfor queue in queues:\n+\t\tfail_registry = queue.failed_job_registry\n+\t\tfor job_id in fail_registry.get_job_ids():\n+\t\t\tjob = queue.fetch_job(job_id)\n+\t\t\tfail_registry.remove(job, delete_job=True)\n+\n+\n @frappe.whitelist()\n def get_scheduler_status():\n \tif is_scheduler_inactive():\n", "issue": "[Feature Request] Clean failed jobs from queue\nCurrently, there is no feature that allows to clean failed jobs from the job queue. Therefore, failed jobs will accumulate. It should be possible to clear failed jobs.\r\n\r\nDiscussion reference: https://discuss.erpnext.com/t/cleaning-failed-background-jobs/37886\r\n\r\n\r\n\r\nObserved in Frappe 10.1.x\n", "before_files": [{"content": "# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors\n# MIT License. See license.txt\n\nfrom __future__ import unicode_literals\nimport frappe\n\nfrom rq import Queue, Worker\nfrom frappe.utils.background_jobs import get_redis_conn\nfrom frappe.utils import format_datetime, cint, convert_utc_to_user_timezone\nfrom frappe.utils.scheduler import is_scheduler_inactive\nfrom frappe import _\n\ncolors = {\n\t'queued': 'orange',\n\t'failed': 'red',\n\t'started': 'blue',\n\t'finished': 'green'\n}\n\[email protected]()\ndef get_info(show_failed=False):\n\tconn = get_redis_conn()\n\tqueues = Queue.all(conn)\n\tworkers = Worker.all(conn)\n\tjobs = []\n\n\tdef add_job(j, name):\n\t\tif j.kwargs.get('site')==frappe.local.site:\n\t\t\tjobs.append({\n\t\t\t\t'job_name': j.kwargs.get('kwargs', {}).get('playbook_method') \\\n\t\t\t\t\tor j.kwargs.get('kwargs', {}).get('job_type') \\\n\t\t\t\t\tor str(j.kwargs.get('job_name')),\n\t\t\t\t'status': j.get_status(), 'queue': name,\n\t\t\t\t'creation': format_datetime(convert_utc_to_user_timezone(j.created_at)),\n\t\t\t\t'color': colors[j.get_status()]\n\t\t\t})\n\t\t\tif j.exc_info:\n\t\t\t\tjobs[-1]['exc_info'] = j.exc_info\n\n\tfor w in workers:\n\t\tj = w.get_current_job()\n\t\tif j:\n\t\t\tadd_job(j, w.name)\n\n\tfor q in queues:\n\t\tif q.name != 'failed':\n\t\t\tfor j in q.get_jobs(): add_job(j, q.name)\n\n\tif cint(show_failed):\n\t\tfor q in queues:\n\t\t\tif q.name == 'failed':\n\t\t\t\tfor j in q.get_jobs()[:10]: add_job(j, q.name)\n\n\treturn jobs\n\[email protected]()\ndef get_scheduler_status():\n\tif is_scheduler_inactive():\n\t\treturn [_(\"Inactive\"), \"red\"]\n\treturn [_(\"Active\"), \"green\"]\n", "path": "frappe/core/page/background_jobs/background_jobs.py"}], "after_files": [{"content": "# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors\n# MIT License. See license.txt\n\nimport json\nfrom typing import TYPE_CHECKING, Dict, List\n\nfrom rq import Queue, Worker\n\nimport frappe\nfrom frappe import _\nfrom frappe.utils import convert_utc_to_user_timezone, format_datetime\nfrom frappe.utils.background_jobs import get_redis_conn\nfrom frappe.utils.scheduler import is_scheduler_inactive\n\nif TYPE_CHECKING:\n\tfrom rq.job import Job\n\nJOB_COLORS = {\n\t'queued': 'orange',\n\t'failed': 'red',\n\t'started': 'blue',\n\t'finished': 'green'\n}\n\n\[email protected]()\ndef get_info(show_failed=False) -> List[Dict]:\n\tif isinstance(show_failed, str):\n\t\tshow_failed = json.loads(show_failed)\n\n\tconn = get_redis_conn()\n\tqueues = Queue.all(conn)\n\tworkers = Worker.all(conn)\n\tjobs = []\n\n\tdef add_job(job: 'Job', name: str) -> None:\n\t\tif job.kwargs.get('site') == frappe.local.site:\n\t\t\tjob_info = {\n\t\t\t\t'job_name': job.kwargs.get('kwargs', {}).get('playbook_method')\n\t\t\t\t\tor job.kwargs.get('kwargs', {}).get('job_type')\n\t\t\t\t\tor str(job.kwargs.get('job_name')),\n\t\t\t\t'status': job.get_status(),\n\t\t\t\t'queue': name,\n\t\t\t\t'creation': format_datetime(convert_utc_to_user_timezone(job.created_at)),\n\t\t\t\t'color': JOB_COLORS[job.get_status()]\n\t\t\t}\n\n\t\t\tif job.exc_info:\n\t\t\t\tjob_info['exc_info'] = job.exc_info\n\n\t\t\tjobs.append(job_info)\n\n\t# show worker jobs\n\tfor worker in workers:\n\t\tjob = worker.get_current_job()\n\t\tif job:\n\t\t\tadd_job(job, worker.name)\n\n\tfor queue in queues:\n\t\t# show active queued jobs\n\t\tif queue.name != 'failed':\n\t\t\tfor job in queue.jobs:\n\t\t\t\tadd_job(job, queue.name)\n\n\t\t# show failed jobs, if requested\n\t\tif show_failed:\n\t\t\tfail_registry = queue.failed_job_registry\n\t\t\tfor job_id in fail_registry.get_job_ids():\n\t\t\t\tjob = queue.fetch_job(job_id)\n\t\t\t\tadd_job(job, queue.name)\n\n\treturn jobs\n\n\[email protected]()\ndef remove_failed_jobs():\n\tconn = get_redis_conn()\n\tqueues = Queue.all(conn)\n\tfor queue in queues:\n\t\tfail_registry = queue.failed_job_registry\n\t\tfor job_id in fail_registry.get_job_ids():\n\t\t\tjob = queue.fetch_job(job_id)\n\t\t\tfail_registry.remove(job, delete_job=True)\n\n\[email protected]()\ndef get_scheduler_status():\n\tif is_scheduler_inactive():\n\t\treturn [_(\"Inactive\"), \"red\"]\n\treturn [_(\"Active\"), \"green\"]\n", "path": "frappe/core/page/background_jobs/background_jobs.py"}]} | 968 | 985 |
gh_patches_debug_22335 | rasdani/github-patches | git_diff | bookwyrm-social__bookwyrm-1770 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Allow changing what email notifications are sent from
**Is your feature request related to a problem? Please describe.**
It came up on the Matrix chat that an admin didn't have an admin@ email for their domain, and wanted to change it to no-reply@, which is only possible by changing `settings.py`
**Describe the solution you'd like**
It would be nice to be able to configure this in the `.env` alongside the rest of the email things - perhaps `EMAIL_FROM_ADDRESS` or `EMAIL_SENDER_ADDRESS` or something.
**Describe alternatives you've considered**
Changing in settings.py works but is in a checked-in file so that's not great. We could allow just changing the username of the email, but it seems useful to have the additional flexibility (for example, I might want to send my Bookywrm emails from `[email protected]` instead of `[email protected]`, since I can't receive email at `bookwyrm.cincodenada.com`)
**Additional context**
[Matrix conversation](https://matrix.to/#/!zoxBMxLlvIyeEKkHuB:matrix.org/$tQg0cS2vzhBeziDszNLxnIc21TR-KzYk55PgWBpyRTo?via=matrix.org&via=tchncs.de&via=group.lt)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bookwyrm/emailing.py`
Content:
```
1 """ send emails """
2 from django.core.mail import EmailMultiAlternatives
3 from django.template.loader import get_template
4
5 from bookwyrm import models, settings
6 from bookwyrm.tasks import app
7 from bookwyrm.settings import DOMAIN
8
9
10 def email_data():
11 """fields every email needs"""
12 site = models.SiteSettings.objects.get()
13 return {
14 "site_name": site.name,
15 "logo": site.logo_small_url,
16 "domain": DOMAIN,
17 "user": None,
18 }
19
20
21 def email_confirmation_email(user):
22 """newly registered users confirm email address"""
23 data = email_data()
24 data["confirmation_code"] = user.confirmation_code
25 data["confirmation_link"] = user.confirmation_link
26 send_email.delay(user.email, *format_email("confirm", data))
27
28
29 def invite_email(invite_request):
30 """send out an invite code"""
31 data = email_data()
32 data["invite_link"] = invite_request.invite.link
33 send_email.delay(invite_request.email, *format_email("invite", data))
34
35
36 def password_reset_email(reset_code):
37 """generate a password reset email"""
38 data = email_data()
39 data["reset_link"] = reset_code.link
40 data["user"] = reset_code.user.display_name
41 send_email.delay(reset_code.user.email, *format_email("password_reset", data))
42
43
44 def moderation_report_email(report):
45 """a report was created"""
46 data = email_data()
47 data["reporter"] = report.reporter.localname or report.reporter.username
48 data["reportee"] = report.user.localname or report.user.username
49 data["report_link"] = report.remote_id
50
51 for admin in models.User.objects.filter(groups__name__in=["admin", "moderator"]):
52 data["user"] = admin.display_name
53 send_email.delay(admin.email, *format_email("moderation_report", data))
54
55
56 def format_email(email_name, data):
57 """render the email templates"""
58 subject = get_template(f"email/{email_name}/subject.html").render(data).strip()
59 html_content = (
60 get_template(f"email/{email_name}/html_content.html").render(data).strip()
61 )
62 text_content = (
63 get_template(f"email/{email_name}/text_content.html").render(data).strip()
64 )
65 return (subject, html_content, text_content)
66
67
68 @app.task(queue="high_priority")
69 def send_email(recipient, subject, html_content, text_content):
70 """use a task to send the email"""
71 email = EmailMultiAlternatives(
72 subject, text_content, settings.DEFAULT_FROM_EMAIL, [recipient]
73 )
74 email.attach_alternative(html_content, "text/html")
75 email.send()
76
```
Path: `bookwyrm/settings.py`
Content:
```
1 """ bookwyrm settings and configuration """
2 import os
3 from environs import Env
4
5 import requests
6 from django.utils.translation import gettext_lazy as _
7
8
9 env = Env()
10 env.read_env()
11 DOMAIN = env("DOMAIN")
12 VERSION = "0.1.0"
13
14 PAGE_LENGTH = env("PAGE_LENGTH", 15)
15 DEFAULT_LANGUAGE = env("DEFAULT_LANGUAGE", "English")
16
17 JS_CACHE = "2d3181e1"
18
19 # email
20 EMAIL_BACKEND = env("EMAIL_BACKEND", "django.core.mail.backends.smtp.EmailBackend")
21 EMAIL_HOST = env("EMAIL_HOST")
22 EMAIL_PORT = env("EMAIL_PORT", 587)
23 EMAIL_HOST_USER = env("EMAIL_HOST_USER")
24 EMAIL_HOST_PASSWORD = env("EMAIL_HOST_PASSWORD")
25 EMAIL_USE_TLS = env.bool("EMAIL_USE_TLS", True)
26 EMAIL_USE_SSL = env.bool("EMAIL_USE_SSL", False)
27 DEFAULT_FROM_EMAIL = f"admin@{DOMAIN}"
28
29 # Build paths inside the project like this: os.path.join(BASE_DIR, ...)
30 BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
31 LOCALE_PATHS = [
32 os.path.join(BASE_DIR, "locale"),
33 ]
34 LANGUAGE_COOKIE_NAME = env.str("LANGUAGE_COOKIE_NAME", "django_language")
35
36 DEFAULT_AUTO_FIELD = "django.db.models.AutoField"
37
38 # Preview image
39 ENABLE_PREVIEW_IMAGES = env.bool("ENABLE_PREVIEW_IMAGES", False)
40 PREVIEW_BG_COLOR = env.str("PREVIEW_BG_COLOR", "use_dominant_color_light")
41 PREVIEW_TEXT_COLOR = env.str("PREVIEW_TEXT_COLOR", "#363636")
42 PREVIEW_IMG_WIDTH = env.int("PREVIEW_IMG_WIDTH", 1200)
43 PREVIEW_IMG_HEIGHT = env.int("PREVIEW_IMG_HEIGHT", 630)
44 PREVIEW_DEFAULT_COVER_COLOR = env.str("PREVIEW_DEFAULT_COVER_COLOR", "#002549")
45
46 # Quick-start development settings - unsuitable for production
47 # See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
48
49 # SECURITY WARNING: keep the secret key used in production secret!
50 SECRET_KEY = env("SECRET_KEY")
51
52 # SECURITY WARNING: don't run with debug turned on in production!
53 DEBUG = env.bool("DEBUG", True)
54 USE_HTTPS = env.bool("USE_HTTPS", False)
55
56 ALLOWED_HOSTS = env.list("ALLOWED_HOSTS", ["*"])
57
58 # Application definition
59
60 INSTALLED_APPS = [
61 "django.contrib.admin",
62 "django.contrib.auth",
63 "django.contrib.contenttypes",
64 "django.contrib.sessions",
65 "django.contrib.messages",
66 "django.contrib.staticfiles",
67 "django.contrib.humanize",
68 "django_rename_app",
69 "bookwyrm",
70 "celery",
71 "imagekit",
72 "storages",
73 ]
74
75 MIDDLEWARE = [
76 "django.middleware.security.SecurityMiddleware",
77 "django.contrib.sessions.middleware.SessionMiddleware",
78 "django.middleware.locale.LocaleMiddleware",
79 "django.middleware.common.CommonMiddleware",
80 "django.middleware.csrf.CsrfViewMiddleware",
81 "django.contrib.auth.middleware.AuthenticationMiddleware",
82 "bookwyrm.middleware.TimezoneMiddleware",
83 "bookwyrm.middleware.IPBlocklistMiddleware",
84 "django.contrib.messages.middleware.MessageMiddleware",
85 "django.middleware.clickjacking.XFrameOptionsMiddleware",
86 ]
87
88 ROOT_URLCONF = "bookwyrm.urls"
89
90 TEMPLATES = [
91 {
92 "BACKEND": "django.template.backends.django.DjangoTemplates",
93 "DIRS": ["templates"],
94 "APP_DIRS": True,
95 "OPTIONS": {
96 "context_processors": [
97 "django.template.context_processors.debug",
98 "django.template.context_processors.request",
99 "django.contrib.auth.context_processors.auth",
100 "django.contrib.messages.context_processors.messages",
101 "bookwyrm.context_processors.site_settings",
102 ],
103 },
104 },
105 ]
106
107
108 WSGI_APPLICATION = "bookwyrm.wsgi.application"
109
110 # redis/activity streams settings
111 REDIS_ACTIVITY_HOST = env("REDIS_ACTIVITY_HOST", "localhost")
112 REDIS_ACTIVITY_PORT = env("REDIS_ACTIVITY_PORT", 6379)
113 REDIS_ACTIVITY_PASSWORD = env("REDIS_ACTIVITY_PASSWORD", None)
114
115 MAX_STREAM_LENGTH = int(env("MAX_STREAM_LENGTH", 200))
116
117 STREAMS = [
118 {"key": "home", "name": _("Home Timeline"), "shortname": _("Home")},
119 {"key": "books", "name": _("Books Timeline"), "shortname": _("Books")},
120 ]
121
122 # Search configuration
123 # total time in seconds that the instance will spend searching connectors
124 SEARCH_TIMEOUT = int(env("SEARCH_TIMEOUT", 15))
125 # timeout for a query to an individual connector
126 QUERY_TIMEOUT = int(env("QUERY_TIMEOUT", 5))
127
128 # Redis cache backend
129 if env("USE_DUMMY_CACHE", False):
130 CACHES = {
131 "default": {
132 "BACKEND": "django.core.cache.backends.dummy.DummyCache",
133 }
134 }
135 else:
136 # pylint: disable=line-too-long
137 CACHES = {
138 "default": {
139 "BACKEND": "django_redis.cache.RedisCache",
140 "LOCATION": f"redis://:{REDIS_ACTIVITY_PASSWORD}@{REDIS_ACTIVITY_HOST}:{REDIS_ACTIVITY_PORT}/0",
141 "OPTIONS": {
142 "CLIENT_CLASS": "django_redis.client.DefaultClient",
143 },
144 }
145 }
146
147 SESSION_ENGINE = "django.contrib.sessions.backends.cache"
148 SESSION_CACHE_ALIAS = "default"
149
150 # Database
151 # https://docs.djangoproject.com/en/3.2/ref/settings/#databases
152
153 DATABASES = {
154 "default": {
155 "ENGINE": "django.db.backends.postgresql_psycopg2",
156 "NAME": env("POSTGRES_DB", "bookwyrm"),
157 "USER": env("POSTGRES_USER", "bookwyrm"),
158 "PASSWORD": env("POSTGRES_PASSWORD", "bookwyrm"),
159 "HOST": env("POSTGRES_HOST", ""),
160 "PORT": env("PGPORT", 5432),
161 },
162 }
163
164
165 LOGIN_URL = "/login/"
166 AUTH_USER_MODEL = "bookwyrm.User"
167
168 # Password validation
169 # https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
170
171 # pylint: disable=line-too-long
172 AUTH_PASSWORD_VALIDATORS = [
173 {
174 "NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
175 },
176 {
177 "NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
178 },
179 {
180 "NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
181 },
182 {
183 "NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
184 },
185 ]
186
187
188 # Internationalization
189 # https://docs.djangoproject.com/en/3.2/topics/i18n/
190
191 LANGUAGE_CODE = "en-us"
192 LANGUAGES = [
193 ("en-us", _("English")),
194 ("de-de", _("Deutsch (German)")),
195 ("es-es", _("Español (Spanish)")),
196 ("gl-es", _("Galego (Galician)")),
197 ("fr-fr", _("Français (French)")),
198 ("lt-lt", _("Lietuvių (Lithuanian)")),
199 ("pt-br", _("Português do Brasil (Brazilian Portuguese)")),
200 ("pt-pt", _("Português Europeu (European Portuguese)")),
201 ("zh-hans", _("简体中文 (Simplified Chinese)")),
202 ("zh-hant", _("繁體中文 (Traditional Chinese)")),
203 ]
204
205
206 TIME_ZONE = "UTC"
207
208 USE_I18N = True
209
210 USE_L10N = True
211
212 USE_TZ = True
213
214
215 agent = requests.utils.default_user_agent()
216 USER_AGENT = f"{agent} (BookWyrm/{VERSION}; +https://{DOMAIN}/)"
217
218 # Imagekit generated thumbnails
219 ENABLE_THUMBNAIL_GENERATION = env.bool("ENABLE_THUMBNAIL_GENERATION", False)
220 IMAGEKIT_CACHEFILE_DIR = "thumbnails"
221
222 # Static files (CSS, JavaScript, Images)
223 # https://docs.djangoproject.com/en/3.2/howto/static-files/
224
225 PROJECT_DIR = os.path.dirname(os.path.abspath(__file__))
226
227 # Storage
228
229 PROTOCOL = "http"
230 if USE_HTTPS:
231 PROTOCOL = "https"
232
233 USE_S3 = env.bool("USE_S3", False)
234
235 if USE_S3:
236 # AWS settings
237 AWS_ACCESS_KEY_ID = env("AWS_ACCESS_KEY_ID")
238 AWS_SECRET_ACCESS_KEY = env("AWS_SECRET_ACCESS_KEY")
239 AWS_STORAGE_BUCKET_NAME = env("AWS_STORAGE_BUCKET_NAME")
240 AWS_S3_CUSTOM_DOMAIN = env("AWS_S3_CUSTOM_DOMAIN")
241 AWS_S3_REGION_NAME = env("AWS_S3_REGION_NAME", "")
242 AWS_S3_ENDPOINT_URL = env("AWS_S3_ENDPOINT_URL")
243 AWS_DEFAULT_ACL = "public-read"
244 AWS_S3_OBJECT_PARAMETERS = {"CacheControl": "max-age=86400"}
245 # S3 Static settings
246 STATIC_LOCATION = "static"
247 STATIC_URL = f"{PROTOCOL}://{AWS_S3_CUSTOM_DOMAIN}/{STATIC_LOCATION}/"
248 STATICFILES_STORAGE = "bookwyrm.storage_backends.StaticStorage"
249 # S3 Media settings
250 MEDIA_LOCATION = "images"
251 MEDIA_URL = f"{PROTOCOL}://{AWS_S3_CUSTOM_DOMAIN}/{MEDIA_LOCATION}/"
252 MEDIA_FULL_URL = MEDIA_URL
253 STATIC_FULL_URL = STATIC_URL
254 DEFAULT_FILE_STORAGE = "bookwyrm.storage_backends.ImagesStorage"
255 # I don't know if it's used, but the site crashes without it
256 STATIC_ROOT = os.path.join(BASE_DIR, env("STATIC_ROOT", "static"))
257 MEDIA_ROOT = os.path.join(BASE_DIR, env("MEDIA_ROOT", "images"))
258 else:
259 STATIC_URL = "/static/"
260 STATIC_ROOT = os.path.join(BASE_DIR, env("STATIC_ROOT", "static"))
261 MEDIA_URL = "/images/"
262 MEDIA_FULL_URL = f"{PROTOCOL}://{DOMAIN}{MEDIA_URL}"
263 STATIC_FULL_URL = f"{PROTOCOL}://{DOMAIN}{STATIC_URL}"
264 MEDIA_ROOT = os.path.join(BASE_DIR, env("MEDIA_ROOT", "images"))
265
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bookwyrm/emailing.py b/bookwyrm/emailing.py
--- a/bookwyrm/emailing.py
+++ b/bookwyrm/emailing.py
@@ -69,7 +69,7 @@
def send_email(recipient, subject, html_content, text_content):
"""use a task to send the email"""
email = EmailMultiAlternatives(
- subject, text_content, settings.DEFAULT_FROM_EMAIL, [recipient]
+ subject, text_content, settings.EMAIL_SENDER, [recipient]
)
email.attach_alternative(html_content, "text/html")
email.send()
diff --git a/bookwyrm/settings.py b/bookwyrm/settings.py
--- a/bookwyrm/settings.py
+++ b/bookwyrm/settings.py
@@ -24,7 +24,9 @@
EMAIL_HOST_PASSWORD = env("EMAIL_HOST_PASSWORD")
EMAIL_USE_TLS = env.bool("EMAIL_USE_TLS", True)
EMAIL_USE_SSL = env.bool("EMAIL_USE_SSL", False)
-DEFAULT_FROM_EMAIL = f"admin@{DOMAIN}"
+EMAIL_SENDER_NAME = env("EMAIL_SENDER_NAME", "admin")
+EMAIL_SENDER_DOMAIN = env("EMAIL_SENDER_NAME", DOMAIN)
+EMAIL_SENDER = f"{EMAIL_SENDER_NAME}@{EMAIL_SENDER_DOMAIN}"
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
| {"golden_diff": "diff --git a/bookwyrm/emailing.py b/bookwyrm/emailing.py\n--- a/bookwyrm/emailing.py\n+++ b/bookwyrm/emailing.py\n@@ -69,7 +69,7 @@\n def send_email(recipient, subject, html_content, text_content):\n \"\"\"use a task to send the email\"\"\"\n email = EmailMultiAlternatives(\n- subject, text_content, settings.DEFAULT_FROM_EMAIL, [recipient]\n+ subject, text_content, settings.EMAIL_SENDER, [recipient]\n )\n email.attach_alternative(html_content, \"text/html\")\n email.send()\ndiff --git a/bookwyrm/settings.py b/bookwyrm/settings.py\n--- a/bookwyrm/settings.py\n+++ b/bookwyrm/settings.py\n@@ -24,7 +24,9 @@\n EMAIL_HOST_PASSWORD = env(\"EMAIL_HOST_PASSWORD\")\n EMAIL_USE_TLS = env.bool(\"EMAIL_USE_TLS\", True)\n EMAIL_USE_SSL = env.bool(\"EMAIL_USE_SSL\", False)\n-DEFAULT_FROM_EMAIL = f\"admin@{DOMAIN}\"\n+EMAIL_SENDER_NAME = env(\"EMAIL_SENDER_NAME\", \"admin\")\n+EMAIL_SENDER_DOMAIN = env(\"EMAIL_SENDER_NAME\", DOMAIN)\n+EMAIL_SENDER = f\"{EMAIL_SENDER_NAME}@{EMAIL_SENDER_DOMAIN}\"\n \n # Build paths inside the project like this: os.path.join(BASE_DIR, ...)\n BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n", "issue": "Allow changing what email notifications are sent from\n**Is your feature request related to a problem? Please describe.**\r\nIt came up on the Matrix chat that an admin didn't have an admin@ email for their domain, and wanted to change it to no-reply@, which is only possible by changing `settings.py`\r\n\r\n**Describe the solution you'd like**\r\nIt would be nice to be able to configure this in the `.env` alongside the rest of the email things - perhaps `EMAIL_FROM_ADDRESS` or `EMAIL_SENDER_ADDRESS` or something.\r\n\r\n**Describe alternatives you've considered**\r\nChanging in settings.py works but is in a checked-in file so that's not great. We could allow just changing the username of the email, but it seems useful to have the additional flexibility (for example, I might want to send my Bookywrm emails from `[email protected]` instead of `[email protected]`, since I can't receive email at `bookwyrm.cincodenada.com`)\r\n\r\n**Additional context**\r\n[Matrix conversation](https://matrix.to/#/!zoxBMxLlvIyeEKkHuB:matrix.org/$tQg0cS2vzhBeziDszNLxnIc21TR-KzYk55PgWBpyRTo?via=matrix.org&via=tchncs.de&via=group.lt)\r\n\n", "before_files": [{"content": "\"\"\" send emails \"\"\"\nfrom django.core.mail import EmailMultiAlternatives\nfrom django.template.loader import get_template\n\nfrom bookwyrm import models, settings\nfrom bookwyrm.tasks import app\nfrom bookwyrm.settings import DOMAIN\n\n\ndef email_data():\n \"\"\"fields every email needs\"\"\"\n site = models.SiteSettings.objects.get()\n return {\n \"site_name\": site.name,\n \"logo\": site.logo_small_url,\n \"domain\": DOMAIN,\n \"user\": None,\n }\n\n\ndef email_confirmation_email(user):\n \"\"\"newly registered users confirm email address\"\"\"\n data = email_data()\n data[\"confirmation_code\"] = user.confirmation_code\n data[\"confirmation_link\"] = user.confirmation_link\n send_email.delay(user.email, *format_email(\"confirm\", data))\n\n\ndef invite_email(invite_request):\n \"\"\"send out an invite code\"\"\"\n data = email_data()\n data[\"invite_link\"] = invite_request.invite.link\n send_email.delay(invite_request.email, *format_email(\"invite\", data))\n\n\ndef password_reset_email(reset_code):\n \"\"\"generate a password reset email\"\"\"\n data = email_data()\n data[\"reset_link\"] = reset_code.link\n data[\"user\"] = reset_code.user.display_name\n send_email.delay(reset_code.user.email, *format_email(\"password_reset\", data))\n\n\ndef moderation_report_email(report):\n \"\"\"a report was created\"\"\"\n data = email_data()\n data[\"reporter\"] = report.reporter.localname or report.reporter.username\n data[\"reportee\"] = report.user.localname or report.user.username\n data[\"report_link\"] = report.remote_id\n\n for admin in models.User.objects.filter(groups__name__in=[\"admin\", \"moderator\"]):\n data[\"user\"] = admin.display_name\n send_email.delay(admin.email, *format_email(\"moderation_report\", data))\n\n\ndef format_email(email_name, data):\n \"\"\"render the email templates\"\"\"\n subject = get_template(f\"email/{email_name}/subject.html\").render(data).strip()\n html_content = (\n get_template(f\"email/{email_name}/html_content.html\").render(data).strip()\n )\n text_content = (\n get_template(f\"email/{email_name}/text_content.html\").render(data).strip()\n )\n return (subject, html_content, text_content)\n\n\[email protected](queue=\"high_priority\")\ndef send_email(recipient, subject, html_content, text_content):\n \"\"\"use a task to send the email\"\"\"\n email = EmailMultiAlternatives(\n subject, text_content, settings.DEFAULT_FROM_EMAIL, [recipient]\n )\n email.attach_alternative(html_content, \"text/html\")\n email.send()\n", "path": "bookwyrm/emailing.py"}, {"content": "\"\"\" bookwyrm settings and configuration \"\"\"\nimport os\nfrom environs import Env\n\nimport requests\nfrom django.utils.translation import gettext_lazy as _\n\n\nenv = Env()\nenv.read_env()\nDOMAIN = env(\"DOMAIN\")\nVERSION = \"0.1.0\"\n\nPAGE_LENGTH = env(\"PAGE_LENGTH\", 15)\nDEFAULT_LANGUAGE = env(\"DEFAULT_LANGUAGE\", \"English\")\n\nJS_CACHE = \"2d3181e1\"\n\n# email\nEMAIL_BACKEND = env(\"EMAIL_BACKEND\", \"django.core.mail.backends.smtp.EmailBackend\")\nEMAIL_HOST = env(\"EMAIL_HOST\")\nEMAIL_PORT = env(\"EMAIL_PORT\", 587)\nEMAIL_HOST_USER = env(\"EMAIL_HOST_USER\")\nEMAIL_HOST_PASSWORD = env(\"EMAIL_HOST_PASSWORD\")\nEMAIL_USE_TLS = env.bool(\"EMAIL_USE_TLS\", True)\nEMAIL_USE_SSL = env.bool(\"EMAIL_USE_SSL\", False)\nDEFAULT_FROM_EMAIL = f\"admin@{DOMAIN}\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nLOCALE_PATHS = [\n os.path.join(BASE_DIR, \"locale\"),\n]\nLANGUAGE_COOKIE_NAME = env.str(\"LANGUAGE_COOKIE_NAME\", \"django_language\")\n\nDEFAULT_AUTO_FIELD = \"django.db.models.AutoField\"\n\n# Preview image\nENABLE_PREVIEW_IMAGES = env.bool(\"ENABLE_PREVIEW_IMAGES\", False)\nPREVIEW_BG_COLOR = env.str(\"PREVIEW_BG_COLOR\", \"use_dominant_color_light\")\nPREVIEW_TEXT_COLOR = env.str(\"PREVIEW_TEXT_COLOR\", \"#363636\")\nPREVIEW_IMG_WIDTH = env.int(\"PREVIEW_IMG_WIDTH\", 1200)\nPREVIEW_IMG_HEIGHT = env.int(\"PREVIEW_IMG_HEIGHT\", 630)\nPREVIEW_DEFAULT_COVER_COLOR = env.str(\"PREVIEW_DEFAULT_COVER_COLOR\", \"#002549\")\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = env(\"SECRET_KEY\")\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = env.bool(\"DEBUG\", True)\nUSE_HTTPS = env.bool(\"USE_HTTPS\", False)\n\nALLOWED_HOSTS = env.list(\"ALLOWED_HOSTS\", [\"*\"])\n\n# Application definition\n\nINSTALLED_APPS = [\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"django.contrib.humanize\",\n \"django_rename_app\",\n \"bookwyrm\",\n \"celery\",\n \"imagekit\",\n \"storages\",\n]\n\nMIDDLEWARE = [\n \"django.middleware.security.SecurityMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.locale.LocaleMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"bookwyrm.middleware.TimezoneMiddleware\",\n \"bookwyrm.middleware.IPBlocklistMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n]\n\nROOT_URLCONF = \"bookwyrm.urls\"\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [\"templates\"],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n \"bookwyrm.context_processors.site_settings\",\n ],\n },\n },\n]\n\n\nWSGI_APPLICATION = \"bookwyrm.wsgi.application\"\n\n# redis/activity streams settings\nREDIS_ACTIVITY_HOST = env(\"REDIS_ACTIVITY_HOST\", \"localhost\")\nREDIS_ACTIVITY_PORT = env(\"REDIS_ACTIVITY_PORT\", 6379)\nREDIS_ACTIVITY_PASSWORD = env(\"REDIS_ACTIVITY_PASSWORD\", None)\n\nMAX_STREAM_LENGTH = int(env(\"MAX_STREAM_LENGTH\", 200))\n\nSTREAMS = [\n {\"key\": \"home\", \"name\": _(\"Home Timeline\"), \"shortname\": _(\"Home\")},\n {\"key\": \"books\", \"name\": _(\"Books Timeline\"), \"shortname\": _(\"Books\")},\n]\n\n# Search configuration\n# total time in seconds that the instance will spend searching connectors\nSEARCH_TIMEOUT = int(env(\"SEARCH_TIMEOUT\", 15))\n# timeout for a query to an individual connector\nQUERY_TIMEOUT = int(env(\"QUERY_TIMEOUT\", 5))\n\n# Redis cache backend\nif env(\"USE_DUMMY_CACHE\", False):\n CACHES = {\n \"default\": {\n \"BACKEND\": \"django.core.cache.backends.dummy.DummyCache\",\n }\n }\nelse:\n # pylint: disable=line-too-long\n CACHES = {\n \"default\": {\n \"BACKEND\": \"django_redis.cache.RedisCache\",\n \"LOCATION\": f\"redis://:{REDIS_ACTIVITY_PASSWORD}@{REDIS_ACTIVITY_HOST}:{REDIS_ACTIVITY_PORT}/0\",\n \"OPTIONS\": {\n \"CLIENT_CLASS\": \"django_redis.client.DefaultClient\",\n },\n }\n }\n\n SESSION_ENGINE = \"django.contrib.sessions.backends.cache\"\n SESSION_CACHE_ALIAS = \"default\"\n\n# Database\n# https://docs.djangoproject.com/en/3.2/ref/settings/#databases\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.postgresql_psycopg2\",\n \"NAME\": env(\"POSTGRES_DB\", \"bookwyrm\"),\n \"USER\": env(\"POSTGRES_USER\", \"bookwyrm\"),\n \"PASSWORD\": env(\"POSTGRES_PASSWORD\", \"bookwyrm\"),\n \"HOST\": env(\"POSTGRES_HOST\", \"\"),\n \"PORT\": env(\"PGPORT\", 5432),\n },\n}\n\n\nLOGIN_URL = \"/login/\"\nAUTH_USER_MODEL = \"bookwyrm.User\"\n\n# Password validation\n# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators\n\n# pylint: disable=line-too-long\nAUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n]\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/3.2/topics/i18n/\n\nLANGUAGE_CODE = \"en-us\"\nLANGUAGES = [\n (\"en-us\", _(\"English\")),\n (\"de-de\", _(\"Deutsch (German)\")),\n (\"es-es\", _(\"Espa\u00f1ol (Spanish)\")),\n (\"gl-es\", _(\"Galego (Galician)\")),\n (\"fr-fr\", _(\"Fran\u00e7ais (French)\")),\n (\"lt-lt\", _(\"Lietuvi\u0173 (Lithuanian)\")),\n (\"pt-br\", _(\"Portugu\u00eas do Brasil (Brazilian Portuguese)\")),\n (\"pt-pt\", _(\"Portugu\u00eas Europeu (European Portuguese)\")),\n (\"zh-hans\", _(\"\u7b80\u4f53\u4e2d\u6587 (Simplified Chinese)\")),\n (\"zh-hant\", _(\"\u7e41\u9ad4\u4e2d\u6587 (Traditional Chinese)\")),\n]\n\n\nTIME_ZONE = \"UTC\"\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\nagent = requests.utils.default_user_agent()\nUSER_AGENT = f\"{agent} (BookWyrm/{VERSION}; +https://{DOMAIN}/)\"\n\n# Imagekit generated thumbnails\nENABLE_THUMBNAIL_GENERATION = env.bool(\"ENABLE_THUMBNAIL_GENERATION\", False)\nIMAGEKIT_CACHEFILE_DIR = \"thumbnails\"\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/3.2/howto/static-files/\n\nPROJECT_DIR = os.path.dirname(os.path.abspath(__file__))\n\n# Storage\n\nPROTOCOL = \"http\"\nif USE_HTTPS:\n PROTOCOL = \"https\"\n\nUSE_S3 = env.bool(\"USE_S3\", False)\n\nif USE_S3:\n # AWS settings\n AWS_ACCESS_KEY_ID = env(\"AWS_ACCESS_KEY_ID\")\n AWS_SECRET_ACCESS_KEY = env(\"AWS_SECRET_ACCESS_KEY\")\n AWS_STORAGE_BUCKET_NAME = env(\"AWS_STORAGE_BUCKET_NAME\")\n AWS_S3_CUSTOM_DOMAIN = env(\"AWS_S3_CUSTOM_DOMAIN\")\n AWS_S3_REGION_NAME = env(\"AWS_S3_REGION_NAME\", \"\")\n AWS_S3_ENDPOINT_URL = env(\"AWS_S3_ENDPOINT_URL\")\n AWS_DEFAULT_ACL = \"public-read\"\n AWS_S3_OBJECT_PARAMETERS = {\"CacheControl\": \"max-age=86400\"}\n # S3 Static settings\n STATIC_LOCATION = \"static\"\n STATIC_URL = f\"{PROTOCOL}://{AWS_S3_CUSTOM_DOMAIN}/{STATIC_LOCATION}/\"\n STATICFILES_STORAGE = \"bookwyrm.storage_backends.StaticStorage\"\n # S3 Media settings\n MEDIA_LOCATION = \"images\"\n MEDIA_URL = f\"{PROTOCOL}://{AWS_S3_CUSTOM_DOMAIN}/{MEDIA_LOCATION}/\"\n MEDIA_FULL_URL = MEDIA_URL\n STATIC_FULL_URL = STATIC_URL\n DEFAULT_FILE_STORAGE = \"bookwyrm.storage_backends.ImagesStorage\"\n # I don't know if it's used, but the site crashes without it\n STATIC_ROOT = os.path.join(BASE_DIR, env(\"STATIC_ROOT\", \"static\"))\n MEDIA_ROOT = os.path.join(BASE_DIR, env(\"MEDIA_ROOT\", \"images\"))\nelse:\n STATIC_URL = \"/static/\"\n STATIC_ROOT = os.path.join(BASE_DIR, env(\"STATIC_ROOT\", \"static\"))\n MEDIA_URL = \"/images/\"\n MEDIA_FULL_URL = f\"{PROTOCOL}://{DOMAIN}{MEDIA_URL}\"\n STATIC_FULL_URL = f\"{PROTOCOL}://{DOMAIN}{STATIC_URL}\"\n MEDIA_ROOT = os.path.join(BASE_DIR, env(\"MEDIA_ROOT\", \"images\"))\n", "path": "bookwyrm/settings.py"}], "after_files": [{"content": "\"\"\" send emails \"\"\"\nfrom django.core.mail import EmailMultiAlternatives\nfrom django.template.loader import get_template\n\nfrom bookwyrm import models, settings\nfrom bookwyrm.tasks import app\nfrom bookwyrm.settings import DOMAIN\n\n\ndef email_data():\n \"\"\"fields every email needs\"\"\"\n site = models.SiteSettings.objects.get()\n return {\n \"site_name\": site.name,\n \"logo\": site.logo_small_url,\n \"domain\": DOMAIN,\n \"user\": None,\n }\n\n\ndef email_confirmation_email(user):\n \"\"\"newly registered users confirm email address\"\"\"\n data = email_data()\n data[\"confirmation_code\"] = user.confirmation_code\n data[\"confirmation_link\"] = user.confirmation_link\n send_email.delay(user.email, *format_email(\"confirm\", data))\n\n\ndef invite_email(invite_request):\n \"\"\"send out an invite code\"\"\"\n data = email_data()\n data[\"invite_link\"] = invite_request.invite.link\n send_email.delay(invite_request.email, *format_email(\"invite\", data))\n\n\ndef password_reset_email(reset_code):\n \"\"\"generate a password reset email\"\"\"\n data = email_data()\n data[\"reset_link\"] = reset_code.link\n data[\"user\"] = reset_code.user.display_name\n send_email.delay(reset_code.user.email, *format_email(\"password_reset\", data))\n\n\ndef moderation_report_email(report):\n \"\"\"a report was created\"\"\"\n data = email_data()\n data[\"reporter\"] = report.reporter.localname or report.reporter.username\n data[\"reportee\"] = report.user.localname or report.user.username\n data[\"report_link\"] = report.remote_id\n\n for admin in models.User.objects.filter(groups__name__in=[\"admin\", \"moderator\"]):\n data[\"user\"] = admin.display_name\n send_email.delay(admin.email, *format_email(\"moderation_report\", data))\n\n\ndef format_email(email_name, data):\n \"\"\"render the email templates\"\"\"\n subject = get_template(f\"email/{email_name}/subject.html\").render(data).strip()\n html_content = (\n get_template(f\"email/{email_name}/html_content.html\").render(data).strip()\n )\n text_content = (\n get_template(f\"email/{email_name}/text_content.html\").render(data).strip()\n )\n return (subject, html_content, text_content)\n\n\[email protected](queue=\"high_priority\")\ndef send_email(recipient, subject, html_content, text_content):\n \"\"\"use a task to send the email\"\"\"\n email = EmailMultiAlternatives(\n subject, text_content, settings.EMAIL_SENDER, [recipient]\n )\n email.attach_alternative(html_content, \"text/html\")\n email.send()\n", "path": "bookwyrm/emailing.py"}, {"content": "\"\"\" bookwyrm settings and configuration \"\"\"\nimport os\nfrom environs import Env\n\nimport requests\nfrom django.utils.translation import gettext_lazy as _\n\n\nenv = Env()\nenv.read_env()\nDOMAIN = env(\"DOMAIN\")\nVERSION = \"0.1.0\"\n\nPAGE_LENGTH = env(\"PAGE_LENGTH\", 15)\nDEFAULT_LANGUAGE = env(\"DEFAULT_LANGUAGE\", \"English\")\n\nJS_CACHE = \"2d3181e1\"\n\n# email\nEMAIL_BACKEND = env(\"EMAIL_BACKEND\", \"django.core.mail.backends.smtp.EmailBackend\")\nEMAIL_HOST = env(\"EMAIL_HOST\")\nEMAIL_PORT = env(\"EMAIL_PORT\", 587)\nEMAIL_HOST_USER = env(\"EMAIL_HOST_USER\")\nEMAIL_HOST_PASSWORD = env(\"EMAIL_HOST_PASSWORD\")\nEMAIL_USE_TLS = env.bool(\"EMAIL_USE_TLS\", True)\nEMAIL_USE_SSL = env.bool(\"EMAIL_USE_SSL\", False)\nEMAIL_SENDER_NAME = env(\"EMAIL_SENDER_NAME\", \"admin\")\nEMAIL_SENDER_DOMAIN = env(\"EMAIL_SENDER_NAME\", DOMAIN)\nEMAIL_SENDER = f\"{EMAIL_SENDER_NAME}@{EMAIL_SENDER_DOMAIN}\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nLOCALE_PATHS = [\n os.path.join(BASE_DIR, \"locale\"),\n]\nLANGUAGE_COOKIE_NAME = env.str(\"LANGUAGE_COOKIE_NAME\", \"django_language\")\n\nDEFAULT_AUTO_FIELD = \"django.db.models.AutoField\"\n\n# Preview image\nENABLE_PREVIEW_IMAGES = env.bool(\"ENABLE_PREVIEW_IMAGES\", False)\nPREVIEW_BG_COLOR = env.str(\"PREVIEW_BG_COLOR\", \"use_dominant_color_light\")\nPREVIEW_TEXT_COLOR = env.str(\"PREVIEW_TEXT_COLOR\", \"#363636\")\nPREVIEW_IMG_WIDTH = env.int(\"PREVIEW_IMG_WIDTH\", 1200)\nPREVIEW_IMG_HEIGHT = env.int(\"PREVIEW_IMG_HEIGHT\", 630)\nPREVIEW_DEFAULT_COVER_COLOR = env.str(\"PREVIEW_DEFAULT_COVER_COLOR\", \"#002549\")\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = env(\"SECRET_KEY\")\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = env.bool(\"DEBUG\", True)\nUSE_HTTPS = env.bool(\"USE_HTTPS\", False)\n\nALLOWED_HOSTS = env.list(\"ALLOWED_HOSTS\", [\"*\"])\n\n# Application definition\n\nINSTALLED_APPS = [\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"django.contrib.humanize\",\n \"django_rename_app\",\n \"bookwyrm\",\n \"celery\",\n \"imagekit\",\n \"storages\",\n]\n\nMIDDLEWARE = [\n \"django.middleware.security.SecurityMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.locale.LocaleMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"bookwyrm.middleware.TimezoneMiddleware\",\n \"bookwyrm.middleware.IPBlocklistMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n]\n\nROOT_URLCONF = \"bookwyrm.urls\"\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [\"templates\"],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n \"bookwyrm.context_processors.site_settings\",\n ],\n },\n },\n]\n\n\nWSGI_APPLICATION = \"bookwyrm.wsgi.application\"\n\n# redis/activity streams settings\nREDIS_ACTIVITY_HOST = env(\"REDIS_ACTIVITY_HOST\", \"localhost\")\nREDIS_ACTIVITY_PORT = env(\"REDIS_ACTIVITY_PORT\", 6379)\nREDIS_ACTIVITY_PASSWORD = env(\"REDIS_ACTIVITY_PASSWORD\", None)\n\nMAX_STREAM_LENGTH = int(env(\"MAX_STREAM_LENGTH\", 200))\n\nSTREAMS = [\n {\"key\": \"home\", \"name\": _(\"Home Timeline\"), \"shortname\": _(\"Home\")},\n {\"key\": \"books\", \"name\": _(\"Books Timeline\"), \"shortname\": _(\"Books\")},\n]\n\n# Database\n# https://docs.djangoproject.com/en/3.2/ref/settings/#databases\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.postgresql_psycopg2\",\n \"NAME\": env(\"POSTGRES_DB\", \"bookwyrm\"),\n \"USER\": env(\"POSTGRES_USER\", \"bookwyrm\"),\n \"PASSWORD\": env(\"POSTGRES_PASSWORD\", \"bookwyrm\"),\n \"HOST\": env(\"POSTGRES_HOST\", \"\"),\n \"PORT\": env(\"PGPORT\", 5432),\n },\n}\n\n\nLOGIN_URL = \"/login/\"\nAUTH_USER_MODEL = \"bookwyrm.User\"\n\n# Password validation\n# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators\n\n# pylint: disable=line-too-long\nAUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n]\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/3.2/topics/i18n/\n\nLANGUAGE_CODE = \"en-us\"\nLANGUAGES = [\n (\"en-us\", _(\"English\")),\n (\"de-de\", _(\"Deutsch (German)\")),\n (\"es-es\", _(\"Espa\u00f1ol (Spanish)\")),\n (\"gl-es\", _(\"Galego (Galician)\")),\n (\"fr-fr\", _(\"Fran\u00e7ais (French)\")),\n (\"lt-lt\", _(\"Lietuvi\u0173 (Lithuanian)\")),\n (\"pt-br\", _(\"Portugu\u00eas do Brasil (Brazilian Portuguese)\")),\n (\"pt-pt\", _(\"Portugu\u00eas Europeu (European Portuguese)\")),\n (\"zh-hans\", _(\"\u7b80\u4f53\u4e2d\u6587 (Simplified Chinese)\")),\n (\"zh-hant\", _(\"\u7e41\u9ad4\u4e2d\u6587 (Traditional Chinese)\")),\n]\n\n\nTIME_ZONE = \"UTC\"\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\nagent = requests.utils.default_user_agent()\nUSER_AGENT = f\"{agent} (BookWyrm/{VERSION}; +https://{DOMAIN}/)\"\n\n# Imagekit generated thumbnails\nENABLE_THUMBNAIL_GENERATION = env.bool(\"ENABLE_THUMBNAIL_GENERATION\", False)\nIMAGEKIT_CACHEFILE_DIR = \"thumbnails\"\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/3.2/howto/static-files/\n\nPROJECT_DIR = os.path.dirname(os.path.abspath(__file__))\n\n# Storage\n\nPROTOCOL = \"http\"\nif USE_HTTPS:\n PROTOCOL = \"https\"\n\nUSE_S3 = env.bool(\"USE_S3\", False)\n\nif USE_S3:\n # AWS settings\n AWS_ACCESS_KEY_ID = env(\"AWS_ACCESS_KEY_ID\")\n AWS_SECRET_ACCESS_KEY = env(\"AWS_SECRET_ACCESS_KEY\")\n AWS_STORAGE_BUCKET_NAME = env(\"AWS_STORAGE_BUCKET_NAME\")\n AWS_S3_CUSTOM_DOMAIN = env(\"AWS_S3_CUSTOM_DOMAIN\")\n AWS_S3_REGION_NAME = env(\"AWS_S3_REGION_NAME\", \"\")\n AWS_S3_ENDPOINT_URL = env(\"AWS_S3_ENDPOINT_URL\")\n AWS_DEFAULT_ACL = \"public-read\"\n AWS_S3_OBJECT_PARAMETERS = {\"CacheControl\": \"max-age=86400\"}\n # S3 Static settings\n STATIC_LOCATION = \"static\"\n STATIC_URL = f\"{PROTOCOL}://{AWS_S3_CUSTOM_DOMAIN}/{STATIC_LOCATION}/\"\n STATICFILES_STORAGE = \"bookwyrm.storage_backends.StaticStorage\"\n # S3 Media settings\n MEDIA_LOCATION = \"images\"\n MEDIA_URL = f\"{PROTOCOL}://{AWS_S3_CUSTOM_DOMAIN}/{MEDIA_LOCATION}/\"\n MEDIA_FULL_URL = MEDIA_URL\n STATIC_FULL_URL = STATIC_URL\n DEFAULT_FILE_STORAGE = \"bookwyrm.storage_backends.ImagesStorage\"\n # I don't know if it's used, but the site crashes without it\n STATIC_ROOT = os.path.join(BASE_DIR, env(\"STATIC_ROOT\", \"static\"))\n MEDIA_ROOT = os.path.join(BASE_DIR, env(\"MEDIA_ROOT\", \"images\"))\nelse:\n STATIC_URL = \"/static/\"\n STATIC_ROOT = os.path.join(BASE_DIR, env(\"STATIC_ROOT\", \"static\"))\n MEDIA_URL = \"/images/\"\n MEDIA_FULL_URL = f\"{PROTOCOL}://{DOMAIN}{MEDIA_URL}\"\n STATIC_FULL_URL = f\"{PROTOCOL}://{DOMAIN}{STATIC_URL}\"\n MEDIA_ROOT = os.path.join(BASE_DIR, env(\"MEDIA_ROOT\", \"images\"))\n", "path": "bookwyrm/settings.py"}]} | 4,092 | 302 |
gh_patches_debug_5445 | rasdani/github-patches | git_diff | comic__grand-challenge.org-1152 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add functionality to filter ImageFile objects by RawImageUploadSession from a client
**Is your feature request related to a problem? Please describe.**
I would like to know which ImageFile was generated by a specific RawImageUploadSession from gcapi
**Describe the solution you'd like**
Add an additional item to the existing set of filters in ImageViewSet.get_queryset()
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `app/grandchallenge/cases/views.py`
Content:
```
1 from django.contrib import messages
2 from django.http import Http404
3 from django.views.generic import DetailView
4 from guardian.mixins import (
5 LoginRequiredMixin,
6 PermissionRequiredMixin as ObjectPermissionRequiredMixin,
7 )
8 from rest_framework import status
9 from rest_framework.decorators import action
10 from rest_framework.mixins import (
11 CreateModelMixin,
12 ListModelMixin,
13 RetrieveModelMixin,
14 )
15 from rest_framework.permissions import DjangoObjectPermissions
16 from rest_framework.response import Response
17 from rest_framework.viewsets import GenericViewSet, ReadOnlyModelViewSet
18 from rest_framework_guardian.filters import ObjectPermissionsFilter
19
20 from grandchallenge.cases.models import (
21 Image,
22 ImageFile,
23 RawImageFile,
24 RawImageUploadSession,
25 )
26 from grandchallenge.cases.serializers import (
27 ImageSerializer,
28 RawImageFileSerializer,
29 RawImageUploadSessionSerializer,
30 )
31 from grandchallenge.core.permissions.rest_framework import (
32 DjangoObjectOnlyWithCustomPostPermissions,
33 )
34
35
36 class RawImageUploadSessionDetail(
37 LoginRequiredMixin, ObjectPermissionRequiredMixin, DetailView
38 ):
39 model = RawImageUploadSession
40 permission_required = f"{RawImageUploadSession._meta.app_label}.view_{RawImageUploadSession._meta.model_name}"
41 raise_exception = True
42
43
44 class ImageViewSet(ReadOnlyModelViewSet):
45 serializer_class = ImageSerializer
46 queryset = Image.objects.all()
47 permission_classes = [DjangoObjectPermissions]
48 filter_backends = [ObjectPermissionsFilter]
49
50 def get_queryset(self):
51 filters = {
52 "worklist": self.request.query_params.get("worklist", None),
53 "study": self.request.query_params.get("study", None),
54 }
55 filters = {k: v for k, v in filters.items() if v is not None}
56
57 queryset = super().get_queryset().filter(**filters)
58
59 return queryset
60
61
62 def show_image(request, *, pk):
63 from django.shortcuts import render
64
65 try:
66 image_file = ImageFile.objects.select_related("image").get(
67 image=pk, image_type="DZI"
68 )
69 except Image.DoesNotExist:
70 raise Http404("File not found.")
71
72 return render(
73 request,
74 "cases/show_image.html",
75 {"image_file": image_file, "url": image_file.file.url},
76 )
77
78
79 class RawImageUploadSessionViewSet(
80 CreateModelMixin, RetrieveModelMixin, ListModelMixin, GenericViewSet
81 ):
82 serializer_class = RawImageUploadSessionSerializer
83 queryset = RawImageUploadSession.objects.all()
84 permission_classes = [DjangoObjectOnlyWithCustomPostPermissions]
85 filter_backends = [ObjectPermissionsFilter]
86
87 def perform_create(self, serializer):
88 serializer.save(creator=self.request.user)
89
90 @action(detail=True, methods=["patch"])
91 def process_images(self, request, pk=None):
92 upload_session: RawImageUploadSession = self.get_object()
93 if (
94 upload_session.status == upload_session.PENDING
95 and not upload_session.rawimagefile_set.filter(
96 consumed=True
97 ).exists()
98 ):
99 upload_session.process_images()
100 messages.add_message(
101 request, messages.SUCCESS, "Image processing job queued."
102 )
103 return Response(status=status.HTTP_200_OK)
104 else:
105 messages.add_message(
106 request,
107 messages.ERROR,
108 "Image processing job could not be queued.",
109 )
110 return Response(status=status.HTTP_400_BAD_REQUEST)
111
112
113 class RawImageFileViewSet(
114 CreateModelMixin, RetrieveModelMixin, ListModelMixin, GenericViewSet
115 ):
116 serializer_class = RawImageFileSerializer
117 queryset = RawImageFile.objects.all()
118 permission_classes = [DjangoObjectOnlyWithCustomPostPermissions]
119 filter_backends = [ObjectPermissionsFilter]
120
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/app/grandchallenge/cases/views.py b/app/grandchallenge/cases/views.py
--- a/app/grandchallenge/cases/views.py
+++ b/app/grandchallenge/cases/views.py
@@ -51,6 +51,7 @@
filters = {
"worklist": self.request.query_params.get("worklist", None),
"study": self.request.query_params.get("study", None),
+ "origin": self.request.query_params.get("origin", None),
}
filters = {k: v for k, v in filters.items() if v is not None}
| {"golden_diff": "diff --git a/app/grandchallenge/cases/views.py b/app/grandchallenge/cases/views.py\n--- a/app/grandchallenge/cases/views.py\n+++ b/app/grandchallenge/cases/views.py\n@@ -51,6 +51,7 @@\n filters = {\n \"worklist\": self.request.query_params.get(\"worklist\", None),\n \"study\": self.request.query_params.get(\"study\", None),\n+ \"origin\": self.request.query_params.get(\"origin\", None),\n }\n filters = {k: v for k, v in filters.items() if v is not None}\n", "issue": "Add functionality to filter ImageFile objects by RawImageUploadSession from a client\n**Is your feature request related to a problem? Please describe.**\r\nI would like to know which ImageFile was generated by a specific RawImageUploadSession from gcapi\r\n**Describe the solution you'd like**\r\nAdd an additional item to the existing set of filters in ImageViewSet.get_queryset()\r\n\r\n\n", "before_files": [{"content": "from django.contrib import messages\nfrom django.http import Http404\nfrom django.views.generic import DetailView\nfrom guardian.mixins import (\n LoginRequiredMixin,\n PermissionRequiredMixin as ObjectPermissionRequiredMixin,\n)\nfrom rest_framework import status\nfrom rest_framework.decorators import action\nfrom rest_framework.mixins import (\n CreateModelMixin,\n ListModelMixin,\n RetrieveModelMixin,\n)\nfrom rest_framework.permissions import DjangoObjectPermissions\nfrom rest_framework.response import Response\nfrom rest_framework.viewsets import GenericViewSet, ReadOnlyModelViewSet\nfrom rest_framework_guardian.filters import ObjectPermissionsFilter\n\nfrom grandchallenge.cases.models import (\n Image,\n ImageFile,\n RawImageFile,\n RawImageUploadSession,\n)\nfrom grandchallenge.cases.serializers import (\n ImageSerializer,\n RawImageFileSerializer,\n RawImageUploadSessionSerializer,\n)\nfrom grandchallenge.core.permissions.rest_framework import (\n DjangoObjectOnlyWithCustomPostPermissions,\n)\n\n\nclass RawImageUploadSessionDetail(\n LoginRequiredMixin, ObjectPermissionRequiredMixin, DetailView\n):\n model = RawImageUploadSession\n permission_required = f\"{RawImageUploadSession._meta.app_label}.view_{RawImageUploadSession._meta.model_name}\"\n raise_exception = True\n\n\nclass ImageViewSet(ReadOnlyModelViewSet):\n serializer_class = ImageSerializer\n queryset = Image.objects.all()\n permission_classes = [DjangoObjectPermissions]\n filter_backends = [ObjectPermissionsFilter]\n\n def get_queryset(self):\n filters = {\n \"worklist\": self.request.query_params.get(\"worklist\", None),\n \"study\": self.request.query_params.get(\"study\", None),\n }\n filters = {k: v for k, v in filters.items() if v is not None}\n\n queryset = super().get_queryset().filter(**filters)\n\n return queryset\n\n\ndef show_image(request, *, pk):\n from django.shortcuts import render\n\n try:\n image_file = ImageFile.objects.select_related(\"image\").get(\n image=pk, image_type=\"DZI\"\n )\n except Image.DoesNotExist:\n raise Http404(\"File not found.\")\n\n return render(\n request,\n \"cases/show_image.html\",\n {\"image_file\": image_file, \"url\": image_file.file.url},\n )\n\n\nclass RawImageUploadSessionViewSet(\n CreateModelMixin, RetrieveModelMixin, ListModelMixin, GenericViewSet\n):\n serializer_class = RawImageUploadSessionSerializer\n queryset = RawImageUploadSession.objects.all()\n permission_classes = [DjangoObjectOnlyWithCustomPostPermissions]\n filter_backends = [ObjectPermissionsFilter]\n\n def perform_create(self, serializer):\n serializer.save(creator=self.request.user)\n\n @action(detail=True, methods=[\"patch\"])\n def process_images(self, request, pk=None):\n upload_session: RawImageUploadSession = self.get_object()\n if (\n upload_session.status == upload_session.PENDING\n and not upload_session.rawimagefile_set.filter(\n consumed=True\n ).exists()\n ):\n upload_session.process_images()\n messages.add_message(\n request, messages.SUCCESS, \"Image processing job queued.\"\n )\n return Response(status=status.HTTP_200_OK)\n else:\n messages.add_message(\n request,\n messages.ERROR,\n \"Image processing job could not be queued.\",\n )\n return Response(status=status.HTTP_400_BAD_REQUEST)\n\n\nclass RawImageFileViewSet(\n CreateModelMixin, RetrieveModelMixin, ListModelMixin, GenericViewSet\n):\n serializer_class = RawImageFileSerializer\n queryset = RawImageFile.objects.all()\n permission_classes = [DjangoObjectOnlyWithCustomPostPermissions]\n filter_backends = [ObjectPermissionsFilter]\n", "path": "app/grandchallenge/cases/views.py"}], "after_files": [{"content": "from django.contrib import messages\nfrom django.http import Http404\nfrom django.views.generic import DetailView\nfrom guardian.mixins import (\n LoginRequiredMixin,\n PermissionRequiredMixin as ObjectPermissionRequiredMixin,\n)\nfrom rest_framework import status\nfrom rest_framework.decorators import action\nfrom rest_framework.mixins import (\n CreateModelMixin,\n ListModelMixin,\n RetrieveModelMixin,\n)\nfrom rest_framework.permissions import DjangoObjectPermissions\nfrom rest_framework.response import Response\nfrom rest_framework.viewsets import GenericViewSet, ReadOnlyModelViewSet\nfrom rest_framework_guardian.filters import ObjectPermissionsFilter\n\nfrom grandchallenge.cases.models import (\n Image,\n ImageFile,\n RawImageFile,\n RawImageUploadSession,\n)\nfrom grandchallenge.cases.serializers import (\n ImageSerializer,\n RawImageFileSerializer,\n RawImageUploadSessionSerializer,\n)\nfrom grandchallenge.core.permissions.rest_framework import (\n DjangoObjectOnlyWithCustomPostPermissions,\n)\n\n\nclass RawImageUploadSessionDetail(\n LoginRequiredMixin, ObjectPermissionRequiredMixin, DetailView\n):\n model = RawImageUploadSession\n permission_required = f\"{RawImageUploadSession._meta.app_label}.view_{RawImageUploadSession._meta.model_name}\"\n raise_exception = True\n\n\nclass ImageViewSet(ReadOnlyModelViewSet):\n serializer_class = ImageSerializer\n queryset = Image.objects.all()\n permission_classes = [DjangoObjectPermissions]\n filter_backends = [ObjectPermissionsFilter]\n\n def get_queryset(self):\n filters = {\n \"worklist\": self.request.query_params.get(\"worklist\", None),\n \"study\": self.request.query_params.get(\"study\", None),\n \"origin\": self.request.query_params.get(\"origin\", None),\n }\n filters = {k: v for k, v in filters.items() if v is not None}\n\n queryset = super().get_queryset().filter(**filters)\n\n return queryset\n\n\ndef show_image(request, *, pk):\n from django.shortcuts import render\n\n try:\n image_file = ImageFile.objects.select_related(\"image\").get(\n image=pk, image_type=\"DZI\"\n )\n except Image.DoesNotExist:\n raise Http404(\"File not found.\")\n\n return render(\n request,\n \"cases/show_image.html\",\n {\"image_file\": image_file, \"url\": image_file.file.url},\n )\n\n\nclass RawImageUploadSessionViewSet(\n CreateModelMixin, RetrieveModelMixin, ListModelMixin, GenericViewSet\n):\n serializer_class = RawImageUploadSessionSerializer\n queryset = RawImageUploadSession.objects.all()\n permission_classes = [DjangoObjectOnlyWithCustomPostPermissions]\n filter_backends = [ObjectPermissionsFilter]\n\n def perform_create(self, serializer):\n serializer.save(creator=self.request.user)\n\n @action(detail=True, methods=[\"patch\"])\n def process_images(self, request, pk=None):\n upload_session: RawImageUploadSession = self.get_object()\n if (\n upload_session.status == upload_session.PENDING\n and not upload_session.rawimagefile_set.filter(\n consumed=True\n ).exists()\n ):\n upload_session.process_images()\n messages.add_message(\n request, messages.SUCCESS, \"Image processing job queued.\"\n )\n return Response(status=status.HTTP_200_OK)\n else:\n messages.add_message(\n request,\n messages.ERROR,\n \"Image processing job could not be queued.\",\n )\n return Response(status=status.HTTP_400_BAD_REQUEST)\n\n\nclass RawImageFileViewSet(\n CreateModelMixin, RetrieveModelMixin, ListModelMixin, GenericViewSet\n):\n serializer_class = RawImageFileSerializer\n queryset = RawImageFile.objects.all()\n permission_classes = [DjangoObjectOnlyWithCustomPostPermissions]\n filter_backends = [ObjectPermissionsFilter]\n", "path": "app/grandchallenge/cases/views.py"}]} | 1,368 | 128 |
gh_patches_debug_24971 | rasdani/github-patches | git_diff | pyjanitor-devs__pyjanitor-513 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[DOC] PyPI page has no project description
# Brief Description of Fix
The PyPI page for `pyjanitor` has no project description. I'm not sure if it previously did, and was lost in a recent version update. I'm not sure how to fix it, but I assume it's something that @ericmjl would be able to change.
# Relevant Context
- [Link to PyPI page](https://pypi.org/project/pyjanitor/)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 from setuptools import setup
2
3
4 def requirements():
5 with open("requirements.txt", "r+") as f:
6 return f.read()
7
8
9 setup(
10 name="pyjanitor",
11 version="0.18.0",
12 description="Tools for cleaning pandas DataFrames",
13 author="Eric J. Ma",
14 author_email="[email protected]",
15 url="https://github.com/ericmjl/pyjanitor",
16 packages=["janitor"],
17 install_requires=requirements(),
18 python_requires=">=3.6",
19 )
20
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -1,3 +1,6 @@
+import re
+from pathlib import Path
+
from setuptools import setup
@@ -6,6 +9,36 @@
return f.read()
+def generate_long_description() -> str:
+ """
+ Extra chunks from README for PyPI description.
+
+ Target chunks must be contained within `.. pypi-doc` pair comments,
+ so there must be an even number of comments in README.
+
+ :returns: Extracted description from README
+
+ """
+ # Read the contents of README file
+ this_directory = Path(__file__).parent
+ with open(this_directory / "README.rst", encoding="utf-8") as f:
+ readme = f.read()
+
+ # Find pypi-doc comments in README
+ indices = [m.start() for m in re.finditer(".. pypi-doc", readme)]
+ assert (
+ len(indices) % 2 == 0
+ ), "Odd number of `.. pypi-doc` comments in README"
+
+ # Loop through pairs of comments and save text between pairs
+ long_description = ""
+ for i in range(0, len(indices), 2):
+ start_index = indices[i] + 11
+ end_index = indices[i + 1]
+ long_description += readme[start_index:end_index]
+ return long_description
+
+
setup(
name="pyjanitor",
version="0.18.0",
@@ -16,4 +49,6 @@
packages=["janitor"],
install_requires=requirements(),
python_requires=">=3.6",
+ long_description=generate_long_description(),
+ long_description_content_type="text/x-rst",
)
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -1,3 +1,6 @@\n+import re\n+from pathlib import Path\n+\n from setuptools import setup\n \n \n@@ -6,6 +9,36 @@\n return f.read()\n \n \n+def generate_long_description() -> str:\n+ \"\"\"\n+ Extra chunks from README for PyPI description.\n+\n+ Target chunks must be contained within `.. pypi-doc` pair comments,\n+ so there must be an even number of comments in README.\n+\n+ :returns: Extracted description from README\n+\n+ \"\"\"\n+ # Read the contents of README file\n+ this_directory = Path(__file__).parent\n+ with open(this_directory / \"README.rst\", encoding=\"utf-8\") as f:\n+ readme = f.read()\n+\n+ # Find pypi-doc comments in README\n+ indices = [m.start() for m in re.finditer(\".. pypi-doc\", readme)]\n+ assert (\n+ len(indices) % 2 == 0\n+ ), \"Odd number of `.. pypi-doc` comments in README\"\n+\n+ # Loop through pairs of comments and save text between pairs\n+ long_description = \"\"\n+ for i in range(0, len(indices), 2):\n+ start_index = indices[i] + 11\n+ end_index = indices[i + 1]\n+ long_description += readme[start_index:end_index]\n+ return long_description\n+\n+\n setup(\n name=\"pyjanitor\",\n version=\"0.18.0\",\n@@ -16,4 +49,6 @@\n packages=[\"janitor\"],\n install_requires=requirements(),\n python_requires=\">=3.6\",\n+ long_description=generate_long_description(),\n+ long_description_content_type=\"text/x-rst\",\n )\n", "issue": "[DOC] PyPI page has no project description\n# Brief Description of Fix\r\n\r\nThe PyPI page for `pyjanitor` has no project description. I'm not sure if it previously did, and was lost in a recent version update. I'm not sure how to fix it, but I assume it's something that @ericmjl would be able to change. \r\n\r\n# Relevant Context\r\n- [Link to PyPI page](https://pypi.org/project/pyjanitor/)\r\n\n", "before_files": [{"content": "from setuptools import setup\n\n\ndef requirements():\n with open(\"requirements.txt\", \"r+\") as f:\n return f.read()\n\n\nsetup(\n name=\"pyjanitor\",\n version=\"0.18.0\",\n description=\"Tools for cleaning pandas DataFrames\",\n author=\"Eric J. Ma\",\n author_email=\"[email protected]\",\n url=\"https://github.com/ericmjl/pyjanitor\",\n packages=[\"janitor\"],\n install_requires=requirements(),\n python_requires=\">=3.6\",\n)\n", "path": "setup.py"}], "after_files": [{"content": "import re\nfrom pathlib import Path\n\nfrom setuptools import setup\n\n\ndef requirements():\n with open(\"requirements.txt\", \"r+\") as f:\n return f.read()\n\n\ndef generate_long_description() -> str:\n \"\"\"\n Extra chunks from README for PyPI description.\n\n Target chunks must be contained within `.. pypi-doc` pair comments,\n so there must be an even number of comments in README.\n\n :returns: Extracted description from README\n\n \"\"\"\n # Read the contents of README file\n this_directory = Path(__file__).parent\n with open(this_directory / \"README.rst\", encoding=\"utf-8\") as f:\n readme = f.read()\n\n # Find pypi-doc comments in README\n indices = [m.start() for m in re.finditer(\".. pypi-doc\", readme)]\n assert (\n len(indices) % 2 == 0\n ), \"Odd number of `.. pypi-doc` comments in README\"\n\n # Loop through pairs of comments and save text between pairs\n long_description = \"\"\n for i in range(0, len(indices), 2):\n start_index = indices[i] + 11\n end_index = indices[i + 1]\n long_description += readme[start_index:end_index]\n return long_description\n\n\nsetup(\n name=\"pyjanitor\",\n version=\"0.18.0\",\n description=\"Tools for cleaning pandas DataFrames\",\n author=\"Eric J. Ma\",\n author_email=\"[email protected]\",\n url=\"https://github.com/ericmjl/pyjanitor\",\n packages=[\"janitor\"],\n install_requires=requirements(),\n python_requires=\">=3.6\",\n long_description=generate_long_description(),\n long_description_content_type=\"text/x-rst\",\n)\n", "path": "setup.py"}]} | 503 | 403 |
gh_patches_debug_25054 | rasdani/github-patches | git_diff | translate__pootle-4141 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add live preview when editing static documents
When editing static document, let's split the screen vertically if it's width is more than 1920px, and horizontally otherwise, and add a live rendering when contents of the HTML editor area change. In the preview mode, use the same wrapper divs to make sure we pick up all styles and render the page as close to the final output as possible.
Quick illustration: http://www.htmlinstant.com/ or http://htmledit.squarefree.com/
Mockup screenshot:

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pootle/apps/staticpages/urls.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 #
4 # Copyright (C) Pootle contributors.
5 #
6 # This file is a part of the Pootle project. It is distributed under the GPL3
7 # or later license. See the LICENSE file for a copy of the license and the
8 # AUTHORS file for copyright and authorship information.
9
10 from django.conf.urls import include, patterns, url
11
12 from .views import (AdminTemplateView, PageCreateView, PageDeleteView,
13 PageUpdateView)
14
15
16 page_patterns = patterns('',
17 url(r'^legal/agreement/$',
18 'staticpages.views.legal_agreement',
19 name='pootle-staticpages-legal-agreement'),
20 url(r'^(?P<virtual_path>.+)/$',
21 'staticpages.views.display_page',
22 name='pootle-staticpages-display'),
23 )
24
25 admin_patterns = patterns('',
26 url(r'^$',
27 AdminTemplateView.as_view(),
28 name='pootle-staticpages'),
29
30 url(r'^(?P<page_type>[^/]+)/add/?$',
31 PageCreateView.as_view(),
32 name='pootle-staticpages-create'),
33 url(r'^(?P<page_type>[^/]+)/(?P<pk>\d+)/?$',
34 PageUpdateView.as_view(),
35 name='pootle-staticpages-edit'),
36 url(r'^(?P<page_type>[^/]+)/(?P<pk>\d+)/delete/?$',
37 PageDeleteView.as_view(),
38 name='pootle-staticpages-delete'),
39 )
40
41
42 urlpatterns = patterns('',
43 url(r'^pages/',
44 include(page_patterns)),
45 )
46
```
Path: `pootle/apps/staticpages/views.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 #
4 # Copyright (C) Pootle contributors.
5 #
6 # This file is a part of the Pootle project. It is distributed under the GPL3
7 # or later license. See the LICENSE file for a copy of the license and the
8 # AUTHORS file for copyright and authorship information.
9
10 from django.core.exceptions import ObjectDoesNotExist
11 from django.core.urlresolvers import reverse_lazy
12 from django.http import Http404
13 from django.shortcuts import redirect, render
14 from django.template import loader, RequestContext
15 from django.utils.translation import ugettext_lazy as _
16 from django.views.generic import (CreateView, DeleteView, TemplateView,
17 UpdateView)
18
19 from pootle.core.http import JsonResponse, JsonResponseBadRequest
20 from pootle.core.views import SuperuserRequiredMixin
21 from pootle_misc.util import ajax_required
22
23 from .forms import agreement_form_factory
24 from .models import AbstractPage, LegalPage, StaticPage
25
26
27 ANN_TYPE = u'announcements'
28 ANN_VPATH = ANN_TYPE + u'/'
29
30
31 class PageModelMixin(object):
32 """Mixin used to set the view's page model according to the
33 `page_type` argument caught in a url pattern.
34 """
35
36 def dispatch(self, request, *args, **kwargs):
37 self.page_type = kwargs.get('page_type', None)
38 self.model = {
39 'legal': LegalPage,
40 'static': StaticPage,
41 ANN_TYPE: StaticPage,
42 }.get(self.page_type)
43
44 if self.model is None:
45 raise Http404
46
47 return super(PageModelMixin, self).dispatch(request, *args, **kwargs)
48
49 def get_context_data(self, **kwargs):
50 ctx = super(PageModelMixin, self).get_context_data(**kwargs)
51 ctx.update({
52 'has_page_model': True,
53 'page_display_name': self.model.display_name,
54 })
55 return ctx
56
57 def get_form_kwargs(self):
58 kwargs = super(PageModelMixin, self).get_form_kwargs()
59 kwargs.update({'label_suffix': ''})
60 return kwargs
61
62 def get_form(self, form_class):
63 form = super(PageModelMixin, self).get_form(form_class)
64
65 if self.page_type == ANN_TYPE:
66 form.fields['virtual_path'].help_text = u'/pages/' + ANN_VPATH
67
68 return form
69
70 def form_valid(self, form):
71 if (self.page_type == ANN_TYPE and not
72 form.cleaned_data['virtual_path'].startswith(ANN_VPATH)):
73 orig_vpath = form.cleaned_data['virtual_path']
74 form.instance.virtual_path = ANN_VPATH + orig_vpath
75
76 return super(PageModelMixin, self).form_valid(form)
77
78
79 class AdminCtxMixin(object):
80
81 def get_context_data(self, **kwargs):
82 ctx = super(AdminCtxMixin, self).get_context_data(**kwargs)
83 ctx.update({
84 'page': 'admin-pages',
85 })
86 return ctx
87
88
89 class AdminTemplateView(SuperuserRequiredMixin, AdminCtxMixin, TemplateView):
90
91 template_name = 'admin/staticpages/page_list.html'
92
93 def get_context_data(self, **kwargs):
94 legal_pages = LegalPage.objects.all()
95 static_pages = StaticPage.objects.exclude(
96 virtual_path__startswith=ANN_VPATH,
97 )
98 announcements = StaticPage.objects.filter(
99 virtual_path__startswith=ANN_VPATH,
100 )
101
102 ctx = super(AdminTemplateView, self).get_context_data(**kwargs)
103 ctx.update({
104 'legalpages': legal_pages,
105 'staticpages': static_pages,
106 ANN_TYPE: announcements,
107 })
108 return ctx
109
110
111 class PageCreateView(SuperuserRequiredMixin, AdminCtxMixin, PageModelMixin, CreateView):
112 fields = ('title', 'virtual_path', 'active', 'url', 'body')
113
114 success_url = reverse_lazy('pootle-staticpages')
115 template_name = 'admin/staticpages/page_create.html'
116
117 def get_initial(self):
118 initial = super(PageModelMixin, self).get_initial()
119
120 initial_args = {
121 'title': _('Page Title'),
122 }
123
124 if self.page_type != ANN_TYPE:
125 next_page_number = AbstractPage.max_pk() + 1
126 initial_args['virtual_path'] = 'page-%d' % next_page_number
127
128 initial.update(initial_args)
129
130 return initial
131
132 def get_form(self, form_class):
133 form = super(PageCreateView, self).get_form(form_class)
134
135 if self.page_type == ANN_TYPE:
136 del form.fields['url']
137 # Translators: 'projects' must not be translated.
138 msg = _(u'projects/<project_code> or <language_code> or '
139 u'<language_code>/<project_code>')
140 form.fields['virtual_path'].widget.attrs['placeholder'] = msg
141 form.fields['virtual_path'].widget.attrs['size'] = 60
142
143 return form
144
145
146 class PageUpdateView(SuperuserRequiredMixin, AdminCtxMixin, PageModelMixin, UpdateView):
147 fields = ('title', 'virtual_path', 'active', 'url', 'body')
148
149 success_url = reverse_lazy('pootle-staticpages')
150 template_name = 'admin/staticpages/page_update.html'
151
152 def get_context_data(self, **kwargs):
153 ctx = super(PageUpdateView, self).get_context_data(**kwargs)
154 ctx.update({
155 'show_delete': True,
156 'page_type': self.page_type,
157 })
158 return ctx
159
160 def get_form_kwargs(self):
161 kwargs = super(PageUpdateView, self).get_form_kwargs()
162
163 if self.page_type == ANN_TYPE:
164 orig_vpath = self.object.virtual_path
165 self.object.virtual_path = orig_vpath.replace(ANN_VPATH, '')
166 kwargs.update({'instance': self.object})
167
168 return kwargs
169
170
171 class PageDeleteView(SuperuserRequiredMixin, PageModelMixin, DeleteView):
172
173 success_url = reverse_lazy('pootle-staticpages')
174
175
176 def display_page(request, virtual_path):
177 """Displays an active page defined in `virtual_path`."""
178 page = None
179 for page_model in AbstractPage.__subclasses__():
180 try:
181 page = page_model.objects.live(request.user).get(
182 virtual_path=virtual_path,
183 )
184 except ObjectDoesNotExist:
185 pass
186
187 if page is None:
188 raise Http404
189
190 if page.url:
191 return redirect(page.url)
192
193 template_name = 'staticpages/page_display.html'
194 if request.is_ajax():
195 template_name = 'staticpages/_body.html'
196
197 ctx = {
198 'page': page,
199 }
200 return render(request, template_name, ctx)
201
202
203 def _get_rendered_agreement(request, form):
204 template = loader.get_template('staticpages/agreement.html')
205 return template.render(RequestContext(request, {'form': form}))
206
207
208 @ajax_required
209 def legal_agreement(request):
210 """Displays the pending documents to be agreed by the current user."""
211 pending_pages = LegalPage.objects.pending_user_agreement(request.user)
212 form_class = agreement_form_factory(pending_pages, request.user)
213
214 if request.method == 'POST':
215 form = form_class(request.POST)
216
217 if form.is_valid():
218 form.save()
219 return JsonResponse({})
220
221 rendered_form = _get_rendered_agreement(request, form)
222 return JsonResponseBadRequest({'form': rendered_form})
223
224 rendered_form = _get_rendered_agreement(request, form_class())
225 return JsonResponse({'form': rendered_form})
226
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pootle/apps/staticpages/urls.py b/pootle/apps/staticpages/urls.py
--- a/pootle/apps/staticpages/urls.py
+++ b/pootle/apps/staticpages/urls.py
@@ -39,7 +39,16 @@
)
+xhr_patterns = patterns('',
+ url(r'^preview/?$',
+ 'staticpages.views.preview_content',
+ name='pootle-xhr-preview'),
+)
+
+
urlpatterns = patterns('',
url(r'^pages/',
include(page_patterns)),
+ url(r'^xhr/',
+ include(xhr_patterns)),
)
diff --git a/pootle/apps/staticpages/views.py b/pootle/apps/staticpages/views.py
--- a/pootle/apps/staticpages/views.py
+++ b/pootle/apps/staticpages/views.py
@@ -17,6 +17,7 @@
UpdateView)
from pootle.core.http import JsonResponse, JsonResponseBadRequest
+from pootle.core.markup.filters import apply_markup_filter
from pootle.core.views import SuperuserRequiredMixin
from pootle_misc.util import ajax_required
@@ -223,3 +224,16 @@
rendered_form = _get_rendered_agreement(request, form_class())
return JsonResponse({'form': rendered_form})
+
+
+@ajax_required
+def preview_content(request):
+ """Returns content rendered based on the configured markup settings."""
+ if 'text' not in request.POST:
+ return JsonResponseBadRequest({
+ 'msg': _('Text is missing'),
+ })
+
+ return JsonResponse({
+ 'rendered': apply_markup_filter(request.POST['text']),
+ })
| {"golden_diff": "diff --git a/pootle/apps/staticpages/urls.py b/pootle/apps/staticpages/urls.py\n--- a/pootle/apps/staticpages/urls.py\n+++ b/pootle/apps/staticpages/urls.py\n@@ -39,7 +39,16 @@\n )\n \n \n+xhr_patterns = patterns('',\n+ url(r'^preview/?$',\n+ 'staticpages.views.preview_content',\n+ name='pootle-xhr-preview'),\n+)\n+\n+\n urlpatterns = patterns('',\n url(r'^pages/',\n include(page_patterns)),\n+ url(r'^xhr/',\n+ include(xhr_patterns)),\n )\ndiff --git a/pootle/apps/staticpages/views.py b/pootle/apps/staticpages/views.py\n--- a/pootle/apps/staticpages/views.py\n+++ b/pootle/apps/staticpages/views.py\n@@ -17,6 +17,7 @@\n UpdateView)\n \n from pootle.core.http import JsonResponse, JsonResponseBadRequest\n+from pootle.core.markup.filters import apply_markup_filter\n from pootle.core.views import SuperuserRequiredMixin\n from pootle_misc.util import ajax_required\n \n@@ -223,3 +224,16 @@\n \n rendered_form = _get_rendered_agreement(request, form_class())\n return JsonResponse({'form': rendered_form})\n+\n+\n+@ajax_required\n+def preview_content(request):\n+ \"\"\"Returns content rendered based on the configured markup settings.\"\"\"\n+ if 'text' not in request.POST:\n+ return JsonResponseBadRequest({\n+ 'msg': _('Text is missing'),\n+ })\n+\n+ return JsonResponse({\n+ 'rendered': apply_markup_filter(request.POST['text']),\n+ })\n", "issue": "Add live preview when editing static documents\nWhen editing static document, let's split the screen vertically if it's width is more than 1920px, and horizontally otherwise, and add a live rendering when contents of the HTML editor area change. In the preview mode, use the same wrapper divs to make sure we pick up all styles and render the page as close to the final output as possible.\n\nQuick illustration: http://www.htmlinstant.com/ or http://htmledit.squarefree.com/\n\nMockup screenshot:\n\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nfrom django.conf.urls import include, patterns, url\n\nfrom .views import (AdminTemplateView, PageCreateView, PageDeleteView,\n PageUpdateView)\n\n\npage_patterns = patterns('',\n url(r'^legal/agreement/$',\n 'staticpages.views.legal_agreement',\n name='pootle-staticpages-legal-agreement'),\n url(r'^(?P<virtual_path>.+)/$',\n 'staticpages.views.display_page',\n name='pootle-staticpages-display'),\n)\n\nadmin_patterns = patterns('',\n url(r'^$',\n AdminTemplateView.as_view(),\n name='pootle-staticpages'),\n\n url(r'^(?P<page_type>[^/]+)/add/?$',\n PageCreateView.as_view(),\n name='pootle-staticpages-create'),\n url(r'^(?P<page_type>[^/]+)/(?P<pk>\\d+)/?$',\n PageUpdateView.as_view(),\n name='pootle-staticpages-edit'),\n url(r'^(?P<page_type>[^/]+)/(?P<pk>\\d+)/delete/?$',\n PageDeleteView.as_view(),\n name='pootle-staticpages-delete'),\n)\n\n\nurlpatterns = patterns('',\n url(r'^pages/',\n include(page_patterns)),\n)\n", "path": "pootle/apps/staticpages/urls.py"}, {"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.core.urlresolvers import reverse_lazy\nfrom django.http import Http404\nfrom django.shortcuts import redirect, render\nfrom django.template import loader, RequestContext\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.views.generic import (CreateView, DeleteView, TemplateView,\n UpdateView)\n\nfrom pootle.core.http import JsonResponse, JsonResponseBadRequest\nfrom pootle.core.views import SuperuserRequiredMixin\nfrom pootle_misc.util import ajax_required\n\nfrom .forms import agreement_form_factory\nfrom .models import AbstractPage, LegalPage, StaticPage\n\n\nANN_TYPE = u'announcements'\nANN_VPATH = ANN_TYPE + u'/'\n\n\nclass PageModelMixin(object):\n \"\"\"Mixin used to set the view's page model according to the\n `page_type` argument caught in a url pattern.\n \"\"\"\n\n def dispatch(self, request, *args, **kwargs):\n self.page_type = kwargs.get('page_type', None)\n self.model = {\n 'legal': LegalPage,\n 'static': StaticPage,\n ANN_TYPE: StaticPage,\n }.get(self.page_type)\n\n if self.model is None:\n raise Http404\n\n return super(PageModelMixin, self).dispatch(request, *args, **kwargs)\n\n def get_context_data(self, **kwargs):\n ctx = super(PageModelMixin, self).get_context_data(**kwargs)\n ctx.update({\n 'has_page_model': True,\n 'page_display_name': self.model.display_name,\n })\n return ctx\n\n def get_form_kwargs(self):\n kwargs = super(PageModelMixin, self).get_form_kwargs()\n kwargs.update({'label_suffix': ''})\n return kwargs\n\n def get_form(self, form_class):\n form = super(PageModelMixin, self).get_form(form_class)\n\n if self.page_type == ANN_TYPE:\n form.fields['virtual_path'].help_text = u'/pages/' + ANN_VPATH\n\n return form\n\n def form_valid(self, form):\n if (self.page_type == ANN_TYPE and not\n form.cleaned_data['virtual_path'].startswith(ANN_VPATH)):\n orig_vpath = form.cleaned_data['virtual_path']\n form.instance.virtual_path = ANN_VPATH + orig_vpath\n\n return super(PageModelMixin, self).form_valid(form)\n\n\nclass AdminCtxMixin(object):\n\n def get_context_data(self, **kwargs):\n ctx = super(AdminCtxMixin, self).get_context_data(**kwargs)\n ctx.update({\n 'page': 'admin-pages',\n })\n return ctx\n\n\nclass AdminTemplateView(SuperuserRequiredMixin, AdminCtxMixin, TemplateView):\n\n template_name = 'admin/staticpages/page_list.html'\n\n def get_context_data(self, **kwargs):\n legal_pages = LegalPage.objects.all()\n static_pages = StaticPage.objects.exclude(\n virtual_path__startswith=ANN_VPATH,\n )\n announcements = StaticPage.objects.filter(\n virtual_path__startswith=ANN_VPATH,\n )\n\n ctx = super(AdminTemplateView, self).get_context_data(**kwargs)\n ctx.update({\n 'legalpages': legal_pages,\n 'staticpages': static_pages,\n ANN_TYPE: announcements,\n })\n return ctx\n\n\nclass PageCreateView(SuperuserRequiredMixin, AdminCtxMixin, PageModelMixin, CreateView):\n fields = ('title', 'virtual_path', 'active', 'url', 'body')\n\n success_url = reverse_lazy('pootle-staticpages')\n template_name = 'admin/staticpages/page_create.html'\n\n def get_initial(self):\n initial = super(PageModelMixin, self).get_initial()\n\n initial_args = {\n 'title': _('Page Title'),\n }\n\n if self.page_type != ANN_TYPE:\n next_page_number = AbstractPage.max_pk() + 1\n initial_args['virtual_path'] = 'page-%d' % next_page_number\n\n initial.update(initial_args)\n\n return initial\n\n def get_form(self, form_class):\n form = super(PageCreateView, self).get_form(form_class)\n\n if self.page_type == ANN_TYPE:\n del form.fields['url']\n # Translators: 'projects' must not be translated.\n msg = _(u'projects/<project_code> or <language_code> or '\n u'<language_code>/<project_code>')\n form.fields['virtual_path'].widget.attrs['placeholder'] = msg\n form.fields['virtual_path'].widget.attrs['size'] = 60\n\n return form\n\n\nclass PageUpdateView(SuperuserRequiredMixin, AdminCtxMixin, PageModelMixin, UpdateView):\n fields = ('title', 'virtual_path', 'active', 'url', 'body')\n\n success_url = reverse_lazy('pootle-staticpages')\n template_name = 'admin/staticpages/page_update.html'\n\n def get_context_data(self, **kwargs):\n ctx = super(PageUpdateView, self).get_context_data(**kwargs)\n ctx.update({\n 'show_delete': True,\n 'page_type': self.page_type,\n })\n return ctx\n\n def get_form_kwargs(self):\n kwargs = super(PageUpdateView, self).get_form_kwargs()\n\n if self.page_type == ANN_TYPE:\n orig_vpath = self.object.virtual_path\n self.object.virtual_path = orig_vpath.replace(ANN_VPATH, '')\n kwargs.update({'instance': self.object})\n\n return kwargs\n\n\nclass PageDeleteView(SuperuserRequiredMixin, PageModelMixin, DeleteView):\n\n success_url = reverse_lazy('pootle-staticpages')\n\n\ndef display_page(request, virtual_path):\n \"\"\"Displays an active page defined in `virtual_path`.\"\"\"\n page = None\n for page_model in AbstractPage.__subclasses__():\n try:\n page = page_model.objects.live(request.user).get(\n virtual_path=virtual_path,\n )\n except ObjectDoesNotExist:\n pass\n\n if page is None:\n raise Http404\n\n if page.url:\n return redirect(page.url)\n\n template_name = 'staticpages/page_display.html'\n if request.is_ajax():\n template_name = 'staticpages/_body.html'\n\n ctx = {\n 'page': page,\n }\n return render(request, template_name, ctx)\n\n\ndef _get_rendered_agreement(request, form):\n template = loader.get_template('staticpages/agreement.html')\n return template.render(RequestContext(request, {'form': form}))\n\n\n@ajax_required\ndef legal_agreement(request):\n \"\"\"Displays the pending documents to be agreed by the current user.\"\"\"\n pending_pages = LegalPage.objects.pending_user_agreement(request.user)\n form_class = agreement_form_factory(pending_pages, request.user)\n\n if request.method == 'POST':\n form = form_class(request.POST)\n\n if form.is_valid():\n form.save()\n return JsonResponse({})\n\n rendered_form = _get_rendered_agreement(request, form)\n return JsonResponseBadRequest({'form': rendered_form})\n\n rendered_form = _get_rendered_agreement(request, form_class())\n return JsonResponse({'form': rendered_form})\n", "path": "pootle/apps/staticpages/views.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nfrom django.conf.urls import include, patterns, url\n\nfrom .views import (AdminTemplateView, PageCreateView, PageDeleteView,\n PageUpdateView)\n\n\npage_patterns = patterns('',\n url(r'^legal/agreement/$',\n 'staticpages.views.legal_agreement',\n name='pootle-staticpages-legal-agreement'),\n url(r'^(?P<virtual_path>.+)/$',\n 'staticpages.views.display_page',\n name='pootle-staticpages-display'),\n)\n\nadmin_patterns = patterns('',\n url(r'^$',\n AdminTemplateView.as_view(),\n name='pootle-staticpages'),\n\n url(r'^(?P<page_type>[^/]+)/add/?$',\n PageCreateView.as_view(),\n name='pootle-staticpages-create'),\n url(r'^(?P<page_type>[^/]+)/(?P<pk>\\d+)/?$',\n PageUpdateView.as_view(),\n name='pootle-staticpages-edit'),\n url(r'^(?P<page_type>[^/]+)/(?P<pk>\\d+)/delete/?$',\n PageDeleteView.as_view(),\n name='pootle-staticpages-delete'),\n)\n\n\nxhr_patterns = patterns('',\n url(r'^preview/?$',\n 'staticpages.views.preview_content',\n name='pootle-xhr-preview'),\n)\n\n\nurlpatterns = patterns('',\n url(r'^pages/',\n include(page_patterns)),\n url(r'^xhr/',\n include(xhr_patterns)),\n)\n", "path": "pootle/apps/staticpages/urls.py"}, {"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.core.urlresolvers import reverse_lazy\nfrom django.http import Http404\nfrom django.shortcuts import redirect, render\nfrom django.template import loader, RequestContext\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.views.generic import (CreateView, DeleteView, TemplateView,\n UpdateView)\n\nfrom pootle.core.http import JsonResponse, JsonResponseBadRequest\nfrom pootle.core.markup.filters import apply_markup_filter\nfrom pootle.core.views import SuperuserRequiredMixin\nfrom pootle_misc.util import ajax_required\n\nfrom .forms import agreement_form_factory\nfrom .models import AbstractPage, LegalPage, StaticPage\n\n\nANN_TYPE = u'announcements'\nANN_VPATH = ANN_TYPE + u'/'\n\n\nclass PageModelMixin(object):\n \"\"\"Mixin used to set the view's page model according to the\n `page_type` argument caught in a url pattern.\n \"\"\"\n\n def dispatch(self, request, *args, **kwargs):\n self.page_type = kwargs.get('page_type', None)\n self.model = {\n 'legal': LegalPage,\n 'static': StaticPage,\n ANN_TYPE: StaticPage,\n }.get(self.page_type)\n\n if self.model is None:\n raise Http404\n\n return super(PageModelMixin, self).dispatch(request, *args, **kwargs)\n\n def get_context_data(self, **kwargs):\n ctx = super(PageModelMixin, self).get_context_data(**kwargs)\n ctx.update({\n 'has_page_model': True,\n 'page_display_name': self.model.display_name,\n })\n return ctx\n\n def get_form_kwargs(self):\n kwargs = super(PageModelMixin, self).get_form_kwargs()\n kwargs.update({'label_suffix': ''})\n return kwargs\n\n def get_form(self, form_class):\n form = super(PageModelMixin, self).get_form(form_class)\n\n if self.page_type == ANN_TYPE:\n form.fields['virtual_path'].help_text = u'/pages/' + ANN_VPATH\n\n return form\n\n def form_valid(self, form):\n if (self.page_type == ANN_TYPE and not\n form.cleaned_data['virtual_path'].startswith(ANN_VPATH)):\n orig_vpath = form.cleaned_data['virtual_path']\n form.instance.virtual_path = ANN_VPATH + orig_vpath\n\n return super(PageModelMixin, self).form_valid(form)\n\n\nclass AdminCtxMixin(object):\n\n def get_context_data(self, **kwargs):\n ctx = super(AdminCtxMixin, self).get_context_data(**kwargs)\n ctx.update({\n 'page': 'admin-pages',\n })\n return ctx\n\n\nclass AdminTemplateView(SuperuserRequiredMixin, AdminCtxMixin, TemplateView):\n\n template_name = 'admin/staticpages/page_list.html'\n\n def get_context_data(self, **kwargs):\n legal_pages = LegalPage.objects.all()\n static_pages = StaticPage.objects.exclude(\n virtual_path__startswith=ANN_VPATH,\n )\n announcements = StaticPage.objects.filter(\n virtual_path__startswith=ANN_VPATH,\n )\n\n ctx = super(AdminTemplateView, self).get_context_data(**kwargs)\n ctx.update({\n 'legalpages': legal_pages,\n 'staticpages': static_pages,\n ANN_TYPE: announcements,\n })\n return ctx\n\n\nclass PageCreateView(SuperuserRequiredMixin, AdminCtxMixin, PageModelMixin, CreateView):\n fields = ('title', 'virtual_path', 'active', 'url', 'body')\n\n success_url = reverse_lazy('pootle-staticpages')\n template_name = 'admin/staticpages/page_create.html'\n\n def get_initial(self):\n initial = super(PageModelMixin, self).get_initial()\n\n initial_args = {\n 'title': _('Page Title'),\n }\n\n if self.page_type != ANN_TYPE:\n next_page_number = AbstractPage.max_pk() + 1\n initial_args['virtual_path'] = 'page-%d' % next_page_number\n\n initial.update(initial_args)\n\n return initial\n\n def get_form(self, form_class):\n form = super(PageCreateView, self).get_form(form_class)\n\n if self.page_type == ANN_TYPE:\n del form.fields['url']\n # Translators: 'projects' must not be translated.\n msg = _(u'projects/<project_code> or <language_code> or '\n u'<language_code>/<project_code>')\n form.fields['virtual_path'].widget.attrs['placeholder'] = msg\n form.fields['virtual_path'].widget.attrs['size'] = 60\n\n return form\n\n\nclass PageUpdateView(SuperuserRequiredMixin, AdminCtxMixin, PageModelMixin, UpdateView):\n fields = ('title', 'virtual_path', 'active', 'url', 'body')\n\n success_url = reverse_lazy('pootle-staticpages')\n template_name = 'admin/staticpages/page_update.html'\n\n def get_context_data(self, **kwargs):\n ctx = super(PageUpdateView, self).get_context_data(**kwargs)\n ctx.update({\n 'show_delete': True,\n 'page_type': self.page_type,\n })\n return ctx\n\n def get_form_kwargs(self):\n kwargs = super(PageUpdateView, self).get_form_kwargs()\n\n if self.page_type == ANN_TYPE:\n orig_vpath = self.object.virtual_path\n self.object.virtual_path = orig_vpath.replace(ANN_VPATH, '')\n kwargs.update({'instance': self.object})\n\n return kwargs\n\n\nclass PageDeleteView(SuperuserRequiredMixin, PageModelMixin, DeleteView):\n\n success_url = reverse_lazy('pootle-staticpages')\n\n\ndef display_page(request, virtual_path):\n \"\"\"Displays an active page defined in `virtual_path`.\"\"\"\n page = None\n for page_model in AbstractPage.__subclasses__():\n try:\n page = page_model.objects.live(request.user).get(\n virtual_path=virtual_path,\n )\n except ObjectDoesNotExist:\n pass\n\n if page is None:\n raise Http404\n\n if page.url:\n return redirect(page.url)\n\n template_name = 'staticpages/page_display.html'\n if request.is_ajax():\n template_name = 'staticpages/_body.html'\n\n ctx = {\n 'page': page,\n }\n return render(request, template_name, ctx)\n\n\ndef _get_rendered_agreement(request, form):\n template = loader.get_template('staticpages/agreement.html')\n return template.render(RequestContext(request, {'form': form}))\n\n\n@ajax_required\ndef legal_agreement(request):\n \"\"\"Displays the pending documents to be agreed by the current user.\"\"\"\n pending_pages = LegalPage.objects.pending_user_agreement(request.user)\n form_class = agreement_form_factory(pending_pages, request.user)\n\n if request.method == 'POST':\n form = form_class(request.POST)\n\n if form.is_valid():\n form.save()\n return JsonResponse({})\n\n rendered_form = _get_rendered_agreement(request, form)\n return JsonResponseBadRequest({'form': rendered_form})\n\n rendered_form = _get_rendered_agreement(request, form_class())\n return JsonResponse({'form': rendered_form})\n\n\n@ajax_required\ndef preview_content(request):\n \"\"\"Returns content rendered based on the configured markup settings.\"\"\"\n if 'text' not in request.POST:\n return JsonResponseBadRequest({\n 'msg': _('Text is missing'),\n })\n\n return JsonResponse({\n 'rendered': apply_markup_filter(request.POST['text']),\n })\n", "path": "pootle/apps/staticpages/views.py"}]} | 3,063 | 360 |
gh_patches_debug_18823 | rasdani/github-patches | git_diff | yt-dlp__yt-dlp-5104 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
bongacams.com moved to bongacams.net
### DO NOT REMOVE OR SKIP THE ISSUE TEMPLATE
- [X] I understand that I will be **blocked** if I remove or skip any mandatory\* field
### Checklist
- [X] I'm reporting a broken site
- [X] I've verified that I'm running yt-dlp version **2022.09.01** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
- [X] I've checked that all provided URLs are playable in a browser with the same IP and same login details
- [X] I've checked that all URLs and arguments with special characters are [properly quoted or escaped](https://github.com/yt-dlp/yt-dlp/wiki/FAQ#video-url-contains-an-ampersand--and-im-getting-some-strange-output-1-2839-or-v-is-not-recognized-as-an-internal-or-external-command)
- [X] I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues **including closed ones**. DO NOT post duplicates
- [X] I've read the [guidelines for opening an issue](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#opening-an-issue)
- [X] I've read about [sharing account credentials](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#are-you-willing-to-share-account-details-if-needed) and I'm willing to share it if required
### Provide a description that is worded well enough to be understood
bongacams.com has been moved to the new bongacams.net domain, please fix the code to work in the bongacams.net domain
bongacams.com moved to bongacams.net
### DO NOT REMOVE OR SKIP THE ISSUE TEMPLATE
- [X] I understand that I will be **blocked** if I remove or skip any mandatory\* field
### Checklist
- [X] I'm reporting a broken site
- [X] I've verified that I'm running yt-dlp version **2022.09.01** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
- [X] I've checked that all provided URLs are playable in a browser with the same IP and same login details
- [X] I've checked that all URLs and arguments with special characters are [properly quoted or escaped](https://github.com/yt-dlp/yt-dlp/wiki/FAQ#video-url-contains-an-ampersand--and-im-getting-some-strange-output-1-2839-or-v-is-not-recognized-as-an-internal-or-external-command)
- [X] I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues **including closed ones**. DO NOT post duplicates
- [X] I've read the [guidelines for opening an issue](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#opening-an-issue)
- [X] I've read about [sharing account credentials](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#are-you-willing-to-share-account-details-if-needed) and I'm willing to share it if required
### Provide a description that is worded well enough to be understood
bongacams.com has been moved to the new bongacams.net domain, please fix the code to work in the bongacams.net domain
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `yt_dlp/extractor/bongacams.py`
Content:
```
1 from .common import InfoExtractor
2 from ..compat import compat_str
3 from ..utils import (
4 int_or_none,
5 try_get,
6 urlencode_postdata,
7 )
8
9
10 class BongaCamsIE(InfoExtractor):
11 _VALID_URL = r'https?://(?P<host>(?:[^/]+\.)?bongacams\d*\.com)/(?P<id>[^/?&#]+)'
12 _TESTS = [{
13 'url': 'https://de.bongacams.com/azumi-8',
14 'only_matching': True,
15 }, {
16 'url': 'https://cn.bongacams.com/azumi-8',
17 'only_matching': True,
18 }]
19
20 def _real_extract(self, url):
21 mobj = self._match_valid_url(url)
22 host = mobj.group('host')
23 channel_id = mobj.group('id')
24
25 amf = self._download_json(
26 'https://%s/tools/amf.php' % host, channel_id,
27 data=urlencode_postdata((
28 ('method', 'getRoomData'),
29 ('args[]', channel_id),
30 ('args[]', 'false'),
31 )), headers={'X-Requested-With': 'XMLHttpRequest'})
32
33 server_url = amf['localData']['videoServerUrl']
34
35 uploader_id = try_get(
36 amf, lambda x: x['performerData']['username'], compat_str) or channel_id
37 uploader = try_get(
38 amf, lambda x: x['performerData']['displayName'], compat_str)
39 like_count = int_or_none(try_get(
40 amf, lambda x: x['performerData']['loversCount']))
41
42 formats = self._extract_m3u8_formats(
43 '%s/hls/stream_%s/playlist.m3u8' % (server_url, uploader_id),
44 channel_id, 'mp4', m3u8_id='hls', live=True)
45 self._sort_formats(formats)
46
47 return {
48 'id': channel_id,
49 'title': uploader or uploader_id,
50 'uploader': uploader,
51 'uploader_id': uploader_id,
52 'like_count': like_count,
53 'age_limit': 18,
54 'is_live': True,
55 'formats': formats,
56 }
57
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/yt_dlp/extractor/bongacams.py b/yt_dlp/extractor/bongacams.py
--- a/yt_dlp/extractor/bongacams.py
+++ b/yt_dlp/extractor/bongacams.py
@@ -8,13 +8,28 @@
class BongaCamsIE(InfoExtractor):
- _VALID_URL = r'https?://(?P<host>(?:[^/]+\.)?bongacams\d*\.com)/(?P<id>[^/?&#]+)'
+ _VALID_URL = r'https?://(?P<host>(?:[^/]+\.)?bongacams\d*\.(?:com|net))/(?P<id>[^/?&#]+)'
_TESTS = [{
'url': 'https://de.bongacams.com/azumi-8',
'only_matching': True,
}, {
'url': 'https://cn.bongacams.com/azumi-8',
'only_matching': True,
+ }, {
+ 'url': 'https://de.bongacams.net/claireashton',
+ 'info_dict': {
+ 'id': 'claireashton',
+ 'ext': 'mp4',
+ 'title': r're:ClaireAshton \d{4}-\d{2}-\d{2} \d{2}:\d{2}',
+ 'age_limit': 18,
+ 'uploader_id': 'ClaireAshton',
+ 'uploader': 'ClaireAshton',
+ 'like_count': int,
+ 'is_live': True,
+ },
+ 'params': {
+ 'skip_download': True,
+ },
}]
def _real_extract(self, url):
| {"golden_diff": "diff --git a/yt_dlp/extractor/bongacams.py b/yt_dlp/extractor/bongacams.py\n--- a/yt_dlp/extractor/bongacams.py\n+++ b/yt_dlp/extractor/bongacams.py\n@@ -8,13 +8,28 @@\n \n \n class BongaCamsIE(InfoExtractor):\n- _VALID_URL = r'https?://(?P<host>(?:[^/]+\\.)?bongacams\\d*\\.com)/(?P<id>[^/?&#]+)'\n+ _VALID_URL = r'https?://(?P<host>(?:[^/]+\\.)?bongacams\\d*\\.(?:com|net))/(?P<id>[^/?&#]+)'\n _TESTS = [{\n 'url': 'https://de.bongacams.com/azumi-8',\n 'only_matching': True,\n }, {\n 'url': 'https://cn.bongacams.com/azumi-8',\n 'only_matching': True,\n+ }, {\n+ 'url': 'https://de.bongacams.net/claireashton',\n+ 'info_dict': {\n+ 'id': 'claireashton',\n+ 'ext': 'mp4',\n+ 'title': r're:ClaireAshton \\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}',\n+ 'age_limit': 18,\n+ 'uploader_id': 'ClaireAshton',\n+ 'uploader': 'ClaireAshton',\n+ 'like_count': int,\n+ 'is_live': True,\n+ },\n+ 'params': {\n+ 'skip_download': True,\n+ },\n }]\n \n def _real_extract(self, url):\n", "issue": "bongacams.com moved to bongacams.net\n### DO NOT REMOVE OR SKIP THE ISSUE TEMPLATE\r\n\r\n- [X] I understand that I will be **blocked** if I remove or skip any mandatory\\* field\r\n\r\n### Checklist\r\n\r\n- [X] I'm reporting a broken site\r\n- [X] I've verified that I'm running yt-dlp version **2022.09.01** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)\r\n- [X] I've checked that all provided URLs are playable in a browser with the same IP and same login details\r\n- [X] I've checked that all URLs and arguments with special characters are [properly quoted or escaped](https://github.com/yt-dlp/yt-dlp/wiki/FAQ#video-url-contains-an-ampersand--and-im-getting-some-strange-output-1-2839-or-v-is-not-recognized-as-an-internal-or-external-command)\r\n- [X] I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues **including closed ones**. DO NOT post duplicates\r\n- [X] I've read the [guidelines for opening an issue](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#opening-an-issue)\r\n- [X] I've read about [sharing account credentials](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#are-you-willing-to-share-account-details-if-needed) and I'm willing to share it if required\r\n\r\n### Provide a description that is worded well enough to be understood\r\n\r\nbongacams.com has been moved to the new bongacams.net domain, please fix the code to work in the bongacams.net domain\r\n\r\n\nbongacams.com moved to bongacams.net\n### DO NOT REMOVE OR SKIP THE ISSUE TEMPLATE\r\n\r\n- [X] I understand that I will be **blocked** if I remove or skip any mandatory\\* field\r\n\r\n### Checklist\r\n\r\n- [X] I'm reporting a broken site\r\n- [X] I've verified that I'm running yt-dlp version **2022.09.01** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)\r\n- [X] I've checked that all provided URLs are playable in a browser with the same IP and same login details\r\n- [X] I've checked that all URLs and arguments with special characters are [properly quoted or escaped](https://github.com/yt-dlp/yt-dlp/wiki/FAQ#video-url-contains-an-ampersand--and-im-getting-some-strange-output-1-2839-or-v-is-not-recognized-as-an-internal-or-external-command)\r\n- [X] I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues **including closed ones**. DO NOT post duplicates\r\n- [X] I've read the [guidelines for opening an issue](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#opening-an-issue)\r\n- [X] I've read about [sharing account credentials](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#are-you-willing-to-share-account-details-if-needed) and I'm willing to share it if required\r\n\r\n### Provide a description that is worded well enough to be understood\r\n\r\nbongacams.com has been moved to the new bongacams.net domain, please fix the code to work in the bongacams.net domain\r\n\r\n\n", "before_files": [{"content": "from .common import InfoExtractor\nfrom ..compat import compat_str\nfrom ..utils import (\n int_or_none,\n try_get,\n urlencode_postdata,\n)\n\n\nclass BongaCamsIE(InfoExtractor):\n _VALID_URL = r'https?://(?P<host>(?:[^/]+\\.)?bongacams\\d*\\.com)/(?P<id>[^/?&#]+)'\n _TESTS = [{\n 'url': 'https://de.bongacams.com/azumi-8',\n 'only_matching': True,\n }, {\n 'url': 'https://cn.bongacams.com/azumi-8',\n 'only_matching': True,\n }]\n\n def _real_extract(self, url):\n mobj = self._match_valid_url(url)\n host = mobj.group('host')\n channel_id = mobj.group('id')\n\n amf = self._download_json(\n 'https://%s/tools/amf.php' % host, channel_id,\n data=urlencode_postdata((\n ('method', 'getRoomData'),\n ('args[]', channel_id),\n ('args[]', 'false'),\n )), headers={'X-Requested-With': 'XMLHttpRequest'})\n\n server_url = amf['localData']['videoServerUrl']\n\n uploader_id = try_get(\n amf, lambda x: x['performerData']['username'], compat_str) or channel_id\n uploader = try_get(\n amf, lambda x: x['performerData']['displayName'], compat_str)\n like_count = int_or_none(try_get(\n amf, lambda x: x['performerData']['loversCount']))\n\n formats = self._extract_m3u8_formats(\n '%s/hls/stream_%s/playlist.m3u8' % (server_url, uploader_id),\n channel_id, 'mp4', m3u8_id='hls', live=True)\n self._sort_formats(formats)\n\n return {\n 'id': channel_id,\n 'title': uploader or uploader_id,\n 'uploader': uploader,\n 'uploader_id': uploader_id,\n 'like_count': like_count,\n 'age_limit': 18,\n 'is_live': True,\n 'formats': formats,\n }\n", "path": "yt_dlp/extractor/bongacams.py"}], "after_files": [{"content": "from .common import InfoExtractor\nfrom ..compat import compat_str\nfrom ..utils import (\n int_or_none,\n try_get,\n urlencode_postdata,\n)\n\n\nclass BongaCamsIE(InfoExtractor):\n _VALID_URL = r'https?://(?P<host>(?:[^/]+\\.)?bongacams\\d*\\.(?:com|net))/(?P<id>[^/?&#]+)'\n _TESTS = [{\n 'url': 'https://de.bongacams.com/azumi-8',\n 'only_matching': True,\n }, {\n 'url': 'https://cn.bongacams.com/azumi-8',\n 'only_matching': True,\n }, {\n 'url': 'https://de.bongacams.net/claireashton',\n 'info_dict': {\n 'id': 'claireashton',\n 'ext': 'mp4',\n 'title': r're:ClaireAshton \\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}',\n 'age_limit': 18,\n 'uploader_id': 'ClaireAshton',\n 'uploader': 'ClaireAshton',\n 'like_count': int,\n 'is_live': True,\n },\n 'params': {\n 'skip_download': True,\n },\n }]\n\n def _real_extract(self, url):\n mobj = self._match_valid_url(url)\n host = mobj.group('host')\n channel_id = mobj.group('id')\n\n amf = self._download_json(\n 'https://%s/tools/amf.php' % host, channel_id,\n data=urlencode_postdata((\n ('method', 'getRoomData'),\n ('args[]', channel_id),\n ('args[]', 'false'),\n )), headers={'X-Requested-With': 'XMLHttpRequest'})\n\n server_url = amf['localData']['videoServerUrl']\n\n uploader_id = try_get(\n amf, lambda x: x['performerData']['username'], compat_str) or channel_id\n uploader = try_get(\n amf, lambda x: x['performerData']['displayName'], compat_str)\n like_count = int_or_none(try_get(\n amf, lambda x: x['performerData']['loversCount']))\n\n formats = self._extract_m3u8_formats(\n '%s/hls/stream_%s/playlist.m3u8' % (server_url, uploader_id),\n channel_id, 'mp4', m3u8_id='hls', live=True)\n self._sort_formats(formats)\n\n return {\n 'id': channel_id,\n 'title': uploader or uploader_id,\n 'uploader': uploader,\n 'uploader_id': uploader_id,\n 'like_count': like_count,\n 'age_limit': 18,\n 'is_live': True,\n 'formats': formats,\n }\n", "path": "yt_dlp/extractor/bongacams.py"}]} | 1,667 | 395 |
gh_patches_debug_23907 | rasdani/github-patches | git_diff | Project-MONAI__MONAI-443 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
model checkpoint saver/loader dictionary
**Describe the bug**
when `save_dict` of `monai.handlers.CheckpointSaver` is a dictionary with a single item,
loading the file with `monai.handlers.CheckpointLoader` raises an error.
**To Reproduce**
To reproduce the issue:
```python
import logging
import sys
import torch
from ignite.engine import Engine
from monai.handlers import CheckpointLoader, CheckpointSaver
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
net = torch.nn.PReLU()
engine = Engine(lambda e, b: None)
CheckpointSaver(save_dir=".", save_dict={"net": net}, save_final=True).attach(engine)
engine.run([0] * 8, max_epochs=5)
CheckpointLoader(load_path="./net_final_iteration=40.pth", load_dict={"net": net}).attach(engine)
engine.run([0] * 8, max_epochs=1)
```
the output (showing loader failure) is:
```
INFO:ignite.engine.engine.Engine:Engine run starting with max_epochs=5.
INFO:ignite.engine.engine.Engine:Epoch[1] Complete. Time taken: 00:00:00
INFO:ignite.engine.engine.Engine:Epoch[2] Complete. Time taken: 00:00:00
INFO:ignite.engine.engine.Engine:Epoch[3] Complete. Time taken: 00:00:00
INFO:ignite.engine.engine.Engine:Epoch[4] Complete. Time taken: 00:00:00
INFO:ignite.engine.engine.Engine:Epoch[5] Complete. Time taken: 00:00:00
INFO:ignite.engine.engine.Engine:Train completed, saved final checkpoint: ./net_final_iteration=40.pth
INFO:ignite.engine.engine.Engine:Engine run complete. Time taken 00:00:00
INFO:ignite.engine.engine.Engine:Engine run starting with max_epochs=1.
ERROR:ignite.engine.engine.Engine:Engine run is terminating due to exception: Object labeled by 'net' from `to_load` is not found in the checkpoint.
INFO:ignite.engine.engine.Engine:Exception_raised, saved exception checkpoint: ./net_final_iteration=40.pth
```
**Expected behavior**
the loader should be able to read this dict structure, to be consistent with the case of `save_dict` where the dict has more than one item, example:
```python
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
net = torch.nn.PReLU()
engine = Engine(lambda e, b: None)
CheckpointSaver(save_dir=".", save_dict={"net": net, 'net1': net}, save_final=True).attach(engine)
engine.run([0] * 8, max_epochs=5)
CheckpointLoader(load_path="./checkpoint_final_iteration=40.pth", load_dict={"net": net}).attach(engine)
engine.run([0] * 8, max_epochs=1)
```
this example uses `{"net": net, 'net1': net}` and the output is expected:
```
INFO:ignite.engine.engine.Engine:Engine run starting with max_epochs=5.
INFO:ignite.engine.engine.Engine:Epoch[1] Complete. Time taken: 00:00:00
INFO:ignite.engine.engine.Engine:Epoch[2] Complete. Time taken: 00:00:00
INFO:ignite.engine.engine.Engine:Epoch[3] Complete. Time taken: 00:00:00
INFO:ignite.engine.engine.Engine:Epoch[4] Complete. Time taken: 00:00:00
INFO:ignite.engine.engine.Engine:Epoch[5] Complete. Time taken: 00:00:00
INFO:ignite.engine.engine.Engine:Train completed, saved final checkpoint: ./checkpoint_final_iteration=40.pth
INFO:ignite.engine.engine.Engine:Engine run complete. Time taken 00:00:00
INFO:ignite.engine.engine.Engine:Engine run starting with max_epochs=1.
INFO:ignite.engine.engine.Engine:Restored all variables from ./checkpoint_final_iteration=40.pth
INFO:ignite.engine.engine.Engine:Epoch[1] Complete. Time taken: 00:00:00
INFO:ignite.engine.engine.Engine:Train completed, saved final checkpoint: ./checkpoint_final_iteration=40.pth
INFO:ignite.engine.engine.Engine:Engine run complete. Time taken 00:00:00
```
**Environment (please complete the following information):**
- OS macos
- Python version 3.6
- MONAI version 1d73f65f3a1c2bf47fb769cf21b0329acfabd114
**Additional context**
see also https://github.com/pytorch/ignite/issues/770
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `monai/handlers/checkpoint_loader.py`
Content:
```
1 # Copyright 2020 MONAI Consortium
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at
5 # http://www.apache.org/licenses/LICENSE-2.0
6 # Unless required by applicable law or agreed to in writing, software
7 # distributed under the License is distributed on an "AS IS" BASIS,
8 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9 # See the License for the specific language governing permissions and
10 # limitations under the License.
11
12 import logging
13 import torch
14 from ignite.engine import Events
15 from ignite.handlers import Checkpoint
16
17
18 class CheckpointLoader:
19 """
20 CheckpointLoader acts as an Ignite handler to load checkpoint data from file.
21 It can load variables for network, optimizer, lr_scheduler.
22 And also can restore training if load the state_dict of Ignite engine.
23
24 Args:
25 load_path (str): the file path of checkpoint, it should be a PyTorch pth file.
26 load_dict (dict): target objects that load checkpoint to. examples::
27
28 {'network': net, 'optimizer': optimizer, 'engine', engine}
29
30 name (str): identifier of logging.logger to use, if None, defaulting to ``engine.logger``.
31
32 """
33
34 def __init__(self, load_path, load_dict, name=None):
35 assert load_path is not None, "must provide clear path to load checkpoint."
36 self.load_path = load_path
37 assert load_dict is not None and len(load_dict) > 0, "must provide target objects to load."
38 self.logger = None if name is None else logging.getLogger(name)
39 for k, v in load_dict.items():
40 if hasattr(v, "module"):
41 load_dict[k] = v.module
42 self.load_dict = load_dict
43
44 def attach(self, engine):
45 if self.logger is None:
46 self.logger = engine.logger
47 return engine.add_event_handler(Events.STARTED, self)
48
49 def __call__(self, engine):
50 checkpoint = torch.load(self.load_path)
51 Checkpoint.load_objects(to_load=self.load_dict, checkpoint=checkpoint)
52 self.logger.info(f"Restored all variables from {self.load_path}")
53
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/monai/handlers/checkpoint_loader.py b/monai/handlers/checkpoint_loader.py
--- a/monai/handlers/checkpoint_loader.py
+++ b/monai/handlers/checkpoint_loader.py
@@ -19,7 +19,9 @@
"""
CheckpointLoader acts as an Ignite handler to load checkpoint data from file.
It can load variables for network, optimizer, lr_scheduler.
- And also can restore training if load the state_dict of Ignite engine.
+ If saving checkpoint after `torch.nn.DataParallel`, need to save `model.module` instead
+ as PyTorch recommended and then use this loader to load the model.
+ And also can restore training session if load the state_dict of Ignite engine.
Args:
load_path (str): the file path of checkpoint, it should be a PyTorch pth file.
@@ -48,5 +50,10 @@
def __call__(self, engine):
checkpoint = torch.load(self.load_path)
+ if len(self.load_dict) == 1:
+ key = list(self.load_dict.keys())[0]
+ if not (key in checkpoint):
+ checkpoint = {key: checkpoint}
+
Checkpoint.load_objects(to_load=self.load_dict, checkpoint=checkpoint)
self.logger.info(f"Restored all variables from {self.load_path}")
| {"golden_diff": "diff --git a/monai/handlers/checkpoint_loader.py b/monai/handlers/checkpoint_loader.py\n--- a/monai/handlers/checkpoint_loader.py\n+++ b/monai/handlers/checkpoint_loader.py\n@@ -19,7 +19,9 @@\n \"\"\"\n CheckpointLoader acts as an Ignite handler to load checkpoint data from file.\n It can load variables for network, optimizer, lr_scheduler.\n- And also can restore training if load the state_dict of Ignite engine.\n+ If saving checkpoint after `torch.nn.DataParallel`, need to save `model.module` instead\n+ as PyTorch recommended and then use this loader to load the model.\n+ And also can restore training session if load the state_dict of Ignite engine.\n \n Args:\n load_path (str): the file path of checkpoint, it should be a PyTorch pth file.\n@@ -48,5 +50,10 @@\n \n def __call__(self, engine):\n checkpoint = torch.load(self.load_path)\n+ if len(self.load_dict) == 1:\n+ key = list(self.load_dict.keys())[0]\n+ if not (key in checkpoint):\n+ checkpoint = {key: checkpoint}\n+\n Checkpoint.load_objects(to_load=self.load_dict, checkpoint=checkpoint)\n self.logger.info(f\"Restored all variables from {self.load_path}\")\n", "issue": "model checkpoint saver/loader dictionary\n**Describe the bug**\r\nwhen `save_dict` of `monai.handlers.CheckpointSaver` is a dictionary with a single item,\r\nloading the file with `monai.handlers.CheckpointLoader` raises an error. \r\n\r\n**To Reproduce**\r\nTo reproduce the issue:\r\n```python\r\nimport logging\r\nimport sys\r\n\r\nimport torch\r\nfrom ignite.engine import Engine\r\n\r\nfrom monai.handlers import CheckpointLoader, CheckpointSaver\r\n\r\nlogging.basicConfig(stream=sys.stdout, level=logging.INFO)\r\n\r\nnet = torch.nn.PReLU()\r\nengine = Engine(lambda e, b: None)\r\n\r\nCheckpointSaver(save_dir=\".\", save_dict={\"net\": net}, save_final=True).attach(engine)\r\nengine.run([0] * 8, max_epochs=5)\r\n\r\nCheckpointLoader(load_path=\"./net_final_iteration=40.pth\", load_dict={\"net\": net}).attach(engine)\r\nengine.run([0] * 8, max_epochs=1)\r\n```\r\nthe output (showing loader failure) is:\r\n```\r\nINFO:ignite.engine.engine.Engine:Engine run starting with max_epochs=5.\r\nINFO:ignite.engine.engine.Engine:Epoch[1] Complete. Time taken: 00:00:00\r\nINFO:ignite.engine.engine.Engine:Epoch[2] Complete. Time taken: 00:00:00\r\nINFO:ignite.engine.engine.Engine:Epoch[3] Complete. Time taken: 00:00:00\r\nINFO:ignite.engine.engine.Engine:Epoch[4] Complete. Time taken: 00:00:00\r\nINFO:ignite.engine.engine.Engine:Epoch[5] Complete. Time taken: 00:00:00\r\nINFO:ignite.engine.engine.Engine:Train completed, saved final checkpoint: ./net_final_iteration=40.pth\r\nINFO:ignite.engine.engine.Engine:Engine run complete. Time taken 00:00:00\r\nINFO:ignite.engine.engine.Engine:Engine run starting with max_epochs=1.\r\nERROR:ignite.engine.engine.Engine:Engine run is terminating due to exception: Object labeled by 'net' from `to_load` is not found in the checkpoint.\r\nINFO:ignite.engine.engine.Engine:Exception_raised, saved exception checkpoint: ./net_final_iteration=40.pth\r\n```\r\n\r\n**Expected behavior**\r\nthe loader should be able to read this dict structure, to be consistent with the case of `save_dict` where the dict has more than one item, example:\r\n```python\r\nlogging.basicConfig(stream=sys.stdout, level=logging.INFO)\r\n\r\nnet = torch.nn.PReLU()\r\nengine = Engine(lambda e, b: None)\r\n\r\nCheckpointSaver(save_dir=\".\", save_dict={\"net\": net, 'net1': net}, save_final=True).attach(engine)\r\nengine.run([0] * 8, max_epochs=5)\r\n\r\nCheckpointLoader(load_path=\"./checkpoint_final_iteration=40.pth\", load_dict={\"net\": net}).attach(engine)\r\nengine.run([0] * 8, max_epochs=1)\r\n```\r\nthis example uses `{\"net\": net, 'net1': net}` and the output is expected:\r\n```\r\nINFO:ignite.engine.engine.Engine:Engine run starting with max_epochs=5.\r\nINFO:ignite.engine.engine.Engine:Epoch[1] Complete. Time taken: 00:00:00\r\nINFO:ignite.engine.engine.Engine:Epoch[2] Complete. Time taken: 00:00:00\r\nINFO:ignite.engine.engine.Engine:Epoch[3] Complete. Time taken: 00:00:00\r\nINFO:ignite.engine.engine.Engine:Epoch[4] Complete. Time taken: 00:00:00\r\nINFO:ignite.engine.engine.Engine:Epoch[5] Complete. Time taken: 00:00:00\r\nINFO:ignite.engine.engine.Engine:Train completed, saved final checkpoint: ./checkpoint_final_iteration=40.pth\r\nINFO:ignite.engine.engine.Engine:Engine run complete. Time taken 00:00:00\r\nINFO:ignite.engine.engine.Engine:Engine run starting with max_epochs=1.\r\nINFO:ignite.engine.engine.Engine:Restored all variables from ./checkpoint_final_iteration=40.pth\r\nINFO:ignite.engine.engine.Engine:Epoch[1] Complete. Time taken: 00:00:00\r\nINFO:ignite.engine.engine.Engine:Train completed, saved final checkpoint: ./checkpoint_final_iteration=40.pth\r\nINFO:ignite.engine.engine.Engine:Engine run complete. Time taken 00:00:00\r\n```\r\n\r\n**Environment (please complete the following information):**\r\n - OS macos\r\n - Python version 3.6\r\n - MONAI version 1d73f65f3a1c2bf47fb769cf21b0329acfabd114\r\n\r\n**Additional context**\r\nsee also https://github.com/pytorch/ignite/issues/770\n", "before_files": [{"content": "# Copyright 2020 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nimport torch\nfrom ignite.engine import Events\nfrom ignite.handlers import Checkpoint\n\n\nclass CheckpointLoader:\n \"\"\"\n CheckpointLoader acts as an Ignite handler to load checkpoint data from file.\n It can load variables for network, optimizer, lr_scheduler.\n And also can restore training if load the state_dict of Ignite engine.\n\n Args:\n load_path (str): the file path of checkpoint, it should be a PyTorch pth file.\n load_dict (dict): target objects that load checkpoint to. examples::\n\n {'network': net, 'optimizer': optimizer, 'engine', engine}\n\n name (str): identifier of logging.logger to use, if None, defaulting to ``engine.logger``.\n\n \"\"\"\n\n def __init__(self, load_path, load_dict, name=None):\n assert load_path is not None, \"must provide clear path to load checkpoint.\"\n self.load_path = load_path\n assert load_dict is not None and len(load_dict) > 0, \"must provide target objects to load.\"\n self.logger = None if name is None else logging.getLogger(name)\n for k, v in load_dict.items():\n if hasattr(v, \"module\"):\n load_dict[k] = v.module\n self.load_dict = load_dict\n\n def attach(self, engine):\n if self.logger is None:\n self.logger = engine.logger\n return engine.add_event_handler(Events.STARTED, self)\n\n def __call__(self, engine):\n checkpoint = torch.load(self.load_path)\n Checkpoint.load_objects(to_load=self.load_dict, checkpoint=checkpoint)\n self.logger.info(f\"Restored all variables from {self.load_path}\")\n", "path": "monai/handlers/checkpoint_loader.py"}], "after_files": [{"content": "# Copyright 2020 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nimport torch\nfrom ignite.engine import Events\nfrom ignite.handlers import Checkpoint\n\n\nclass CheckpointLoader:\n \"\"\"\n CheckpointLoader acts as an Ignite handler to load checkpoint data from file.\n It can load variables for network, optimizer, lr_scheduler.\n If saving checkpoint after `torch.nn.DataParallel`, need to save `model.module` instead\n as PyTorch recommended and then use this loader to load the model.\n And also can restore training session if load the state_dict of Ignite engine.\n\n Args:\n load_path (str): the file path of checkpoint, it should be a PyTorch pth file.\n load_dict (dict): target objects that load checkpoint to. examples::\n\n {'network': net, 'optimizer': optimizer, 'engine', engine}\n\n name (str): identifier of logging.logger to use, if None, defaulting to ``engine.logger``.\n\n \"\"\"\n\n def __init__(self, load_path, load_dict, name=None):\n assert load_path is not None, \"must provide clear path to load checkpoint.\"\n self.load_path = load_path\n assert load_dict is not None and len(load_dict) > 0, \"must provide target objects to load.\"\n self.logger = None if name is None else logging.getLogger(name)\n for k, v in load_dict.items():\n if hasattr(v, \"module\"):\n load_dict[k] = v.module\n self.load_dict = load_dict\n\n def attach(self, engine):\n if self.logger is None:\n self.logger = engine.logger\n return engine.add_event_handler(Events.STARTED, self)\n\n def __call__(self, engine):\n checkpoint = torch.load(self.load_path)\n if len(self.load_dict) == 1:\n key = list(self.load_dict.keys())[0]\n if not (key in checkpoint):\n checkpoint = {key: checkpoint}\n\n Checkpoint.load_objects(to_load=self.load_dict, checkpoint=checkpoint)\n self.logger.info(f\"Restored all variables from {self.load_path}\")\n", "path": "monai/handlers/checkpoint_loader.py"}]} | 1,879 | 302 |
gh_patches_debug_41295 | rasdani/github-patches | git_diff | Azure__azure-cli-extensions-96 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Switch to using Node 8.9 as the default version for az webapp new
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/webapp/azext_webapp/custom.py`
Content:
```
1 # --------------------------------------------------------------------------------------------
2 # Copyright (c) Microsoft Corporation. All rights reserved.
3 # Licensed under the MIT License. See License.txt in the project root for license information.
4 # --------------------------------------------------------------------------------------------
5
6 from __future__ import print_function
7 from knack.log import get_logger
8
9 from azure.mgmt.web.models import (AppServicePlan, SkuDescription)
10
11 from azure.cli.command_modules.appservice.custom import (
12 enable_zip_deploy,
13 create_webapp,
14 update_app_settings,
15 _get_site_credential,
16 _get_scm_url,
17 _get_sku_name)
18
19 from .create_util import (
20 zip_contents_from_dir,
21 get_runtime_version_details,
22 create_resource_group,
23 check_resource_group_exists,
24 check_resource_group_supports_os,
25 check_if_asp_exists,
26 check_app_exists,
27 get_lang_from_content,
28 web_client_factory
29 )
30
31 from ._constants import (NODE_RUNTIME_NAME, OS_DEFAULT)
32
33 logger = get_logger(__name__)
34
35 # pylint:disable=no-member,too-many-lines,too-many-locals,too-many-statements
36
37
38 def create_deploy_webapp(cmd, name, location=None, dryrun=False):
39 import os
40 import json
41
42 client = web_client_factory(cmd.cli_ctx)
43 # the code to deploy is expected to be the current directory the command is running from
44 src_dir = os.getcwd()
45
46 # if dir is empty, show a message in dry run
47 do_deployment = False if os.listdir(src_dir) == [] else True
48
49 # determine the details for app to be created from src contents
50 lang_details = get_lang_from_content(src_dir)
51 # we support E2E create and deploy for Node & dotnetcore, any other stack, set defaults for os & runtime
52 # and skip deployment
53 if lang_details['language'] is None:
54 do_deployment = False
55 sku = 'F1'
56 os_val = OS_DEFAULT
57 detected_version = '-'
58 runtime_version = '-'
59 else:
60 sku = lang_details.get("default_sku")
61 language = lang_details.get("language")
62 os_val = "Linux" if language.lower() == NODE_RUNTIME_NAME else OS_DEFAULT
63 # detect the version
64 data = get_runtime_version_details(lang_details.get('file_loc'), language)
65 version_used_create = data.get('to_create')
66 detected_version = data.get('detected')
67 runtime_version = "{}|{}".format(language, version_used_create)
68
69 if location is None:
70 locs = client.list_geo_regions(sku, True)
71 available_locs = []
72 for loc in locs:
73 available_locs.append(loc.geo_region_name)
74 location = available_locs[0]
75 # Remove spaces from the location string, incase the GeoRegion string is used
76 loc_name = location.replace(" ", "")
77 full_sku = _get_sku_name(sku)
78
79 is_linux = True if os_val == 'Linux' else False
80
81 asp = "appsvc_asp_{}_{}".format(os_val, loc_name)
82 rg_name = "appsvc_rg_{}_{}".format(os_val, loc_name)
83
84 str_no_contents_warn = ""
85 if not do_deployment:
86 str_no_contents_warn = "[Empty directory, no deployment will be triggered]"
87
88 # Resource group: check if default RG is set
89 default_rg = cmd.cli_ctx.config.get('defaults', 'group', fallback=None)
90 if default_rg and check_resource_group_supports_os(cmd, default_rg, location, is_linux):
91 rg_name = default_rg
92 rg_mssg = "[Using default Resource group]"
93 else:
94 rg_mssg = ""
95
96 src_path = "{} {}".format(src_dir.replace("\\", "\\\\"), str_no_contents_warn)
97 rg_str = "{} {}".format(rg_name, rg_mssg)
98
99 dry_run_str = r""" {
100 "name" : "%s",
101 "serverfarm" : "%s",
102 "resourcegroup" : "%s",
103 "sku": "%s",
104 "os": "%s",
105 "location" : "%s",
106 "src_path" : "%s",
107 "version_detected": "%s",
108 "version_to_create": "%s"
109 }
110 """ % (name, asp, rg_str, full_sku, os_val, location, src_path,
111 detected_version, runtime_version)
112
113 create_json = json.dumps(json.loads(dry_run_str), indent=4, sort_keys=True)
114 if dryrun:
115 logger.warning("Web app will be created with the below configuration,re-run command "
116 "without the --dryrun flag to create & deploy a new app")
117 logger.warning(create_json)
118 return None
119
120 # create RG if the RG doesn't already exist
121 if not check_resource_group_exists(cmd, rg_name):
122 logger.warning("Creating Resource group '%s' ...", rg_name)
123 create_resource_group(cmd, rg_name, location)
124 logger.warning("Resource group creation complete")
125 else:
126 logger.warning("Resource group '%s' already exists.", rg_name)
127
128 # create asp
129 if not check_if_asp_exists(cmd, rg_name, asp):
130 logger.warning("Creating App service plan '%s' ...", asp)
131 sku_def = SkuDescription(tier=full_sku, name=sku, capacity=(1 if is_linux else None))
132 plan_def = AppServicePlan(loc_name, app_service_plan_name=asp,
133 sku=sku_def, reserved=(is_linux or None))
134 client.app_service_plans.create_or_update(rg_name, asp, plan_def)
135 logger.warning("App service plan creation complete")
136 else:
137 logger.warning("App service plan '%s' already exists.", asp)
138
139 # create the app
140 if not check_app_exists(cmd, rg_name, name):
141 logger.warning("Creating app '%s' ....", name)
142 create_webapp(cmd, rg_name, name, asp, runtime_version if is_linux else None)
143 logger.warning("Webapp creation complete")
144 else:
145 logger.warning("App '%s' already exists", name)
146
147 if do_deployment:
148 # setting to build after deployment
149 logger.warning("Updating app settings to enable build after deployment")
150 update_app_settings(cmd, rg_name, name, ["SCM_DO_BUILD_DURING_DEPLOYMENT=true"])
151 # work around until the timeout limits issue for linux is investigated & fixed
152 # wakeup kudu, by making an SCM call
153
154 import requests
155 # work around until the timeout limits issue for linux is investigated & fixed
156 user_name, password = _get_site_credential(cmd.cli_ctx, rg_name, name)
157 scm_url = _get_scm_url(cmd, rg_name, name)
158 import urllib3
159 authorization = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(user_name, password))
160 requests.get(scm_url + '/api/settings', headers=authorization)
161
162 logger.warning("Creating zip with contents of dir %s ...", src_dir)
163 # zip contents & deploy
164 zip_file_path = zip_contents_from_dir(src_dir, language)
165
166 logger.warning("Deploying and building contents to app."
167 "This operation can take some time to finish...")
168 enable_zip_deploy(cmd, rg_name, name, zip_file_path)
169 else:
170 logger.warning("No 'NODE' or 'DOTNETCORE' package detected, skipping zip and deploy process")
171
172 logger.warning("All done. %s", create_json)
173 return None
174
```
Path: `src/webapp/azext_webapp/_constants.py`
Content:
```
1 # --------------------------------------------------------------------------------------------
2 # Copyright (c) Microsoft Corporation. All rights reserved.
3 # Licensed under the MIT License. See License.txt in the project root for license information.
4 # --------------------------------------------------------------------------------------------
5
6 NODE_VERSION_DEFAULT = "8.1"
7 NETCORE_VERSION_DEFAULT = "2.0"
8 # TODO: Remove this once we have the api returning the versions
9 NODE_VERSIONS = ['4.4', '4.5', '6.2', '6.6', '6.9', '6.11', '8.0', '8.1']
10 NETCORE_VERSIONS = ['1.0', '1.1', '2.0']
11 NODE_RUNTIME_NAME = "node"
12 NETCORE_RUNTIME_NAME = "dotnetcore"
13 OS_DEFAULT = "Windows"
14
```
Path: `src/webapp/setup.py`
Content:
```
1 #!/usr/bin/env python
2
3 # --------------------------------------------------------------------------------------------
4 # Copyright (c) Microsoft Corporation. All rights reserved.
5 # Licensed under the MIT License. See License.txt in the project root for license information.
6 # --------------------------------------------------------------------------------------------
7
8 from codecs import open
9 from setuptools import setup, find_packages
10
11 VERSION = "0.1.1"
12
13 CLASSIFIERS = [
14 'Development Status :: 4 - Beta',
15 'Intended Audience :: Developers',
16 'Intended Audience :: System Administrators',
17 'Programming Language :: Python',
18 'Programming Language :: Python :: 2',
19 'Programming Language :: Python :: 2.7',
20 'Programming Language :: Python :: 3',
21 'Programming Language :: Python :: 3.4',
22 'Programming Language :: Python :: 3.5',
23 'Programming Language :: Python :: 3.6',
24 'License :: OSI Approved :: MIT License',
25 ]
26
27 DEPENDENCIES = []
28
29 setup(
30 name='webapp',
31 version=VERSION,
32 description='An Azure CLI Extension to manage appservice resources',
33 long_description='An Azure CLI Extension to manage appservice resources',
34 license='MIT',
35 author='Sisira Panchagnula',
36 author_email='[email protected]',
37 url='https://github.com/Azure/azure-cli-extensions',
38 classifiers=CLASSIFIERS,
39 packages=find_packages(exclude=["tests"]),
40 install_requires=DEPENDENCIES
41 )
42
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/webapp/azext_webapp/_constants.py b/src/webapp/azext_webapp/_constants.py
--- a/src/webapp/azext_webapp/_constants.py
+++ b/src/webapp/azext_webapp/_constants.py
@@ -3,7 +3,7 @@
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
-NODE_VERSION_DEFAULT = "8.1"
+NODE_VERSION_DEFAULT = "8.9"
NETCORE_VERSION_DEFAULT = "2.0"
# TODO: Remove this once we have the api returning the versions
NODE_VERSIONS = ['4.4', '4.5', '6.2', '6.6', '6.9', '6.11', '8.0', '8.1']
diff --git a/src/webapp/azext_webapp/custom.py b/src/webapp/azext_webapp/custom.py
--- a/src/webapp/azext_webapp/custom.py
+++ b/src/webapp/azext_webapp/custom.py
@@ -109,13 +109,12 @@
}
""" % (name, asp, rg_str, full_sku, os_val, location, src_path,
detected_version, runtime_version)
+ create_json = json.loads(dry_run_str)
- create_json = json.dumps(json.loads(dry_run_str), indent=4, sort_keys=True)
if dryrun:
logger.warning("Web app will be created with the below configuration,re-run command "
"without the --dryrun flag to create & deploy a new app")
- logger.warning(create_json)
- return None
+ return create_json
# create RG if the RG doesn't already exist
if not check_resource_group_exists(cmd, rg_name):
@@ -139,7 +138,10 @@
# create the app
if not check_app_exists(cmd, rg_name, name):
logger.warning("Creating app '%s' ....", name)
- create_webapp(cmd, rg_name, name, asp, runtime_version if is_linux else None)
+ app_created = create_webapp(cmd, rg_name, name, asp, runtime_version if is_linux else None)
+ # update create_json to include the app_url
+ url = app_created.enabled_host_names[0] # picks the custom domain URL incase a domain is assigned
+ url = 'https://' + url
logger.warning("Webapp creation complete")
else:
logger.warning("App '%s' already exists", name)
@@ -166,8 +168,13 @@
logger.warning("Deploying and building contents to app."
"This operation can take some time to finish...")
enable_zip_deploy(cmd, rg_name, name, zip_file_path)
+ # Remove the file afer deployment, handling exception if user removed the file manually
+ try:
+ os.remove(zip_file_path)
+ except OSError:
+ pass
else:
logger.warning("No 'NODE' or 'DOTNETCORE' package detected, skipping zip and deploy process")
-
- logger.warning("All done. %s", create_json)
- return None
+ create_json.update({'app_url': url})
+ logger.warning("All done.")
+ return create_json
diff --git a/src/webapp/setup.py b/src/webapp/setup.py
--- a/src/webapp/setup.py
+++ b/src/webapp/setup.py
@@ -8,7 +8,7 @@
from codecs import open
from setuptools import setup, find_packages
-VERSION = "0.1.1"
+VERSION = "0.1.2"
CLASSIFIERS = [
'Development Status :: 4 - Beta',
| {"golden_diff": "diff --git a/src/webapp/azext_webapp/_constants.py b/src/webapp/azext_webapp/_constants.py\n--- a/src/webapp/azext_webapp/_constants.py\n+++ b/src/webapp/azext_webapp/_constants.py\n@@ -3,7 +3,7 @@\n # Licensed under the MIT License. See License.txt in the project root for license information.\n # --------------------------------------------------------------------------------------------\n \n-NODE_VERSION_DEFAULT = \"8.1\"\n+NODE_VERSION_DEFAULT = \"8.9\"\n NETCORE_VERSION_DEFAULT = \"2.0\"\n # TODO: Remove this once we have the api returning the versions\n NODE_VERSIONS = ['4.4', '4.5', '6.2', '6.6', '6.9', '6.11', '8.0', '8.1']\ndiff --git a/src/webapp/azext_webapp/custom.py b/src/webapp/azext_webapp/custom.py\n--- a/src/webapp/azext_webapp/custom.py\n+++ b/src/webapp/azext_webapp/custom.py\n@@ -109,13 +109,12 @@\n }\n \"\"\" % (name, asp, rg_str, full_sku, os_val, location, src_path,\n detected_version, runtime_version)\n+ create_json = json.loads(dry_run_str)\n \n- create_json = json.dumps(json.loads(dry_run_str), indent=4, sort_keys=True)\n if dryrun:\n logger.warning(\"Web app will be created with the below configuration,re-run command \"\n \"without the --dryrun flag to create & deploy a new app\")\n- logger.warning(create_json)\n- return None\n+ return create_json\n \n # create RG if the RG doesn't already exist\n if not check_resource_group_exists(cmd, rg_name):\n@@ -139,7 +138,10 @@\n # create the app\n if not check_app_exists(cmd, rg_name, name):\n logger.warning(\"Creating app '%s' ....\", name)\n- create_webapp(cmd, rg_name, name, asp, runtime_version if is_linux else None)\n+ app_created = create_webapp(cmd, rg_name, name, asp, runtime_version if is_linux else None)\n+ # update create_json to include the app_url\n+ url = app_created.enabled_host_names[0] # picks the custom domain URL incase a domain is assigned\n+ url = 'https://' + url\n logger.warning(\"Webapp creation complete\")\n else:\n logger.warning(\"App '%s' already exists\", name)\n@@ -166,8 +168,13 @@\n logger.warning(\"Deploying and building contents to app.\"\n \"This operation can take some time to finish...\")\n enable_zip_deploy(cmd, rg_name, name, zip_file_path)\n+ # Remove the file afer deployment, handling exception if user removed the file manually\n+ try:\n+ os.remove(zip_file_path)\n+ except OSError:\n+ pass\n else:\n logger.warning(\"No 'NODE' or 'DOTNETCORE' package detected, skipping zip and deploy process\")\n-\n- logger.warning(\"All done. %s\", create_json)\n- return None\n+ create_json.update({'app_url': url})\n+ logger.warning(\"All done.\")\n+ return create_json\ndiff --git a/src/webapp/setup.py b/src/webapp/setup.py\n--- a/src/webapp/setup.py\n+++ b/src/webapp/setup.py\n@@ -8,7 +8,7 @@\n from codecs import open\n from setuptools import setup, find_packages\n \n-VERSION = \"0.1.1\"\n+VERSION = \"0.1.2\"\n \n CLASSIFIERS = [\n 'Development Status :: 4 - Beta',\n", "issue": "Switch to using Node 8.9 as the default version for az webapp new\n\n", "before_files": [{"content": "# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\nfrom __future__ import print_function\nfrom knack.log import get_logger\n\nfrom azure.mgmt.web.models import (AppServicePlan, SkuDescription)\n\nfrom azure.cli.command_modules.appservice.custom import (\n enable_zip_deploy,\n create_webapp,\n update_app_settings,\n _get_site_credential,\n _get_scm_url,\n _get_sku_name)\n\nfrom .create_util import (\n zip_contents_from_dir,\n get_runtime_version_details,\n create_resource_group,\n check_resource_group_exists,\n check_resource_group_supports_os,\n check_if_asp_exists,\n check_app_exists,\n get_lang_from_content,\n web_client_factory\n)\n\nfrom ._constants import (NODE_RUNTIME_NAME, OS_DEFAULT)\n\nlogger = get_logger(__name__)\n\n# pylint:disable=no-member,too-many-lines,too-many-locals,too-many-statements\n\n\ndef create_deploy_webapp(cmd, name, location=None, dryrun=False):\n import os\n import json\n\n client = web_client_factory(cmd.cli_ctx)\n # the code to deploy is expected to be the current directory the command is running from\n src_dir = os.getcwd()\n\n # if dir is empty, show a message in dry run\n do_deployment = False if os.listdir(src_dir) == [] else True\n\n # determine the details for app to be created from src contents\n lang_details = get_lang_from_content(src_dir)\n # we support E2E create and deploy for Node & dotnetcore, any other stack, set defaults for os & runtime\n # and skip deployment\n if lang_details['language'] is None:\n do_deployment = False\n sku = 'F1'\n os_val = OS_DEFAULT\n detected_version = '-'\n runtime_version = '-'\n else:\n sku = lang_details.get(\"default_sku\")\n language = lang_details.get(\"language\")\n os_val = \"Linux\" if language.lower() == NODE_RUNTIME_NAME else OS_DEFAULT\n # detect the version\n data = get_runtime_version_details(lang_details.get('file_loc'), language)\n version_used_create = data.get('to_create')\n detected_version = data.get('detected')\n runtime_version = \"{}|{}\".format(language, version_used_create)\n\n if location is None:\n locs = client.list_geo_regions(sku, True)\n available_locs = []\n for loc in locs:\n available_locs.append(loc.geo_region_name)\n location = available_locs[0]\n # Remove spaces from the location string, incase the GeoRegion string is used\n loc_name = location.replace(\" \", \"\")\n full_sku = _get_sku_name(sku)\n\n is_linux = True if os_val == 'Linux' else False\n\n asp = \"appsvc_asp_{}_{}\".format(os_val, loc_name)\n rg_name = \"appsvc_rg_{}_{}\".format(os_val, loc_name)\n\n str_no_contents_warn = \"\"\n if not do_deployment:\n str_no_contents_warn = \"[Empty directory, no deployment will be triggered]\"\n\n # Resource group: check if default RG is set\n default_rg = cmd.cli_ctx.config.get('defaults', 'group', fallback=None)\n if default_rg and check_resource_group_supports_os(cmd, default_rg, location, is_linux):\n rg_name = default_rg\n rg_mssg = \"[Using default Resource group]\"\n else:\n rg_mssg = \"\"\n\n src_path = \"{} {}\".format(src_dir.replace(\"\\\\\", \"\\\\\\\\\"), str_no_contents_warn)\n rg_str = \"{} {}\".format(rg_name, rg_mssg)\n\n dry_run_str = r\"\"\" {\n \"name\" : \"%s\",\n \"serverfarm\" : \"%s\",\n \"resourcegroup\" : \"%s\",\n \"sku\": \"%s\",\n \"os\": \"%s\",\n \"location\" : \"%s\",\n \"src_path\" : \"%s\",\n \"version_detected\": \"%s\",\n \"version_to_create\": \"%s\"\n }\n \"\"\" % (name, asp, rg_str, full_sku, os_val, location, src_path,\n detected_version, runtime_version)\n\n create_json = json.dumps(json.loads(dry_run_str), indent=4, sort_keys=True)\n if dryrun:\n logger.warning(\"Web app will be created with the below configuration,re-run command \"\n \"without the --dryrun flag to create & deploy a new app\")\n logger.warning(create_json)\n return None\n\n # create RG if the RG doesn't already exist\n if not check_resource_group_exists(cmd, rg_name):\n logger.warning(\"Creating Resource group '%s' ...\", rg_name)\n create_resource_group(cmd, rg_name, location)\n logger.warning(\"Resource group creation complete\")\n else:\n logger.warning(\"Resource group '%s' already exists.\", rg_name)\n\n # create asp\n if not check_if_asp_exists(cmd, rg_name, asp):\n logger.warning(\"Creating App service plan '%s' ...\", asp)\n sku_def = SkuDescription(tier=full_sku, name=sku, capacity=(1 if is_linux else None))\n plan_def = AppServicePlan(loc_name, app_service_plan_name=asp,\n sku=sku_def, reserved=(is_linux or None))\n client.app_service_plans.create_or_update(rg_name, asp, plan_def)\n logger.warning(\"App service plan creation complete\")\n else:\n logger.warning(\"App service plan '%s' already exists.\", asp)\n\n # create the app\n if not check_app_exists(cmd, rg_name, name):\n logger.warning(\"Creating app '%s' ....\", name)\n create_webapp(cmd, rg_name, name, asp, runtime_version if is_linux else None)\n logger.warning(\"Webapp creation complete\")\n else:\n logger.warning(\"App '%s' already exists\", name)\n\n if do_deployment:\n # setting to build after deployment\n logger.warning(\"Updating app settings to enable build after deployment\")\n update_app_settings(cmd, rg_name, name, [\"SCM_DO_BUILD_DURING_DEPLOYMENT=true\"])\n # work around until the timeout limits issue for linux is investigated & fixed\n # wakeup kudu, by making an SCM call\n\n import requests\n # work around until the timeout limits issue for linux is investigated & fixed\n user_name, password = _get_site_credential(cmd.cli_ctx, rg_name, name)\n scm_url = _get_scm_url(cmd, rg_name, name)\n import urllib3\n authorization = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(user_name, password))\n requests.get(scm_url + '/api/settings', headers=authorization)\n\n logger.warning(\"Creating zip with contents of dir %s ...\", src_dir)\n # zip contents & deploy\n zip_file_path = zip_contents_from_dir(src_dir, language)\n\n logger.warning(\"Deploying and building contents to app.\"\n \"This operation can take some time to finish...\")\n enable_zip_deploy(cmd, rg_name, name, zip_file_path)\n else:\n logger.warning(\"No 'NODE' or 'DOTNETCORE' package detected, skipping zip and deploy process\")\n\n logger.warning(\"All done. %s\", create_json)\n return None\n", "path": "src/webapp/azext_webapp/custom.py"}, {"content": "# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\nNODE_VERSION_DEFAULT = \"8.1\"\nNETCORE_VERSION_DEFAULT = \"2.0\"\n# TODO: Remove this once we have the api returning the versions\nNODE_VERSIONS = ['4.4', '4.5', '6.2', '6.6', '6.9', '6.11', '8.0', '8.1']\nNETCORE_VERSIONS = ['1.0', '1.1', '2.0']\nNODE_RUNTIME_NAME = \"node\"\nNETCORE_RUNTIME_NAME = \"dotnetcore\"\nOS_DEFAULT = \"Windows\"\n", "path": "src/webapp/azext_webapp/_constants.py"}, {"content": "#!/usr/bin/env python\n\n# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\nfrom codecs import open\nfrom setuptools import setup, find_packages\n\nVERSION = \"0.1.1\"\n\nCLASSIFIERS = [\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'License :: OSI Approved :: MIT License',\n]\n\nDEPENDENCIES = []\n\nsetup(\n name='webapp',\n version=VERSION,\n description='An Azure CLI Extension to manage appservice resources',\n long_description='An Azure CLI Extension to manage appservice resources',\n license='MIT',\n author='Sisira Panchagnula',\n author_email='[email protected]',\n url='https://github.com/Azure/azure-cli-extensions',\n classifiers=CLASSIFIERS,\n packages=find_packages(exclude=[\"tests\"]),\n install_requires=DEPENDENCIES\n)\n", "path": "src/webapp/setup.py"}], "after_files": [{"content": "# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\nfrom __future__ import print_function\nfrom knack.log import get_logger\n\nfrom azure.mgmt.web.models import (AppServicePlan, SkuDescription)\n\nfrom azure.cli.command_modules.appservice.custom import (\n enable_zip_deploy,\n create_webapp,\n update_app_settings,\n _get_site_credential,\n _get_scm_url,\n _get_sku_name)\n\nfrom .create_util import (\n zip_contents_from_dir,\n get_runtime_version_details,\n create_resource_group,\n check_resource_group_exists,\n check_resource_group_supports_os,\n check_if_asp_exists,\n check_app_exists,\n get_lang_from_content,\n web_client_factory\n)\n\nfrom ._constants import (NODE_RUNTIME_NAME, OS_DEFAULT)\n\nlogger = get_logger(__name__)\n\n# pylint:disable=no-member,too-many-lines,too-many-locals,too-many-statements\n\n\ndef create_deploy_webapp(cmd, name, location=None, dryrun=False):\n import os\n import json\n\n client = web_client_factory(cmd.cli_ctx)\n # the code to deploy is expected to be the current directory the command is running from\n src_dir = os.getcwd()\n\n # if dir is empty, show a message in dry run\n do_deployment = False if os.listdir(src_dir) == [] else True\n\n # determine the details for app to be created from src contents\n lang_details = get_lang_from_content(src_dir)\n # we support E2E create and deploy for Node & dotnetcore, any other stack, set defaults for os & runtime\n # and skip deployment\n if lang_details['language'] is None:\n do_deployment = False\n sku = 'F1'\n os_val = OS_DEFAULT\n detected_version = '-'\n runtime_version = '-'\n else:\n sku = lang_details.get(\"default_sku\")\n language = lang_details.get(\"language\")\n os_val = \"Linux\" if language.lower() == NODE_RUNTIME_NAME else OS_DEFAULT\n # detect the version\n data = get_runtime_version_details(lang_details.get('file_loc'), language)\n version_used_create = data.get('to_create')\n detected_version = data.get('detected')\n runtime_version = \"{}|{}\".format(language, version_used_create)\n\n if location is None:\n locs = client.list_geo_regions(sku, True)\n available_locs = []\n for loc in locs:\n available_locs.append(loc.geo_region_name)\n location = available_locs[0]\n # Remove spaces from the location string, incase the GeoRegion string is used\n loc_name = location.replace(\" \", \"\")\n full_sku = _get_sku_name(sku)\n\n is_linux = True if os_val == 'Linux' else False\n\n asp = \"appsvc_asp_{}_{}\".format(os_val, loc_name)\n rg_name = \"appsvc_rg_{}_{}\".format(os_val, loc_name)\n\n str_no_contents_warn = \"\"\n if not do_deployment:\n str_no_contents_warn = \"[Empty directory, no deployment will be triggered]\"\n\n # Resource group: check if default RG is set\n default_rg = cmd.cli_ctx.config.get('defaults', 'group', fallback=None)\n if default_rg and check_resource_group_supports_os(cmd, default_rg, location, is_linux):\n rg_name = default_rg\n rg_mssg = \"[Using default Resource group]\"\n else:\n rg_mssg = \"\"\n\n src_path = \"{} {}\".format(src_dir.replace(\"\\\\\", \"\\\\\\\\\"), str_no_contents_warn)\n rg_str = \"{} {}\".format(rg_name, rg_mssg)\n\n dry_run_str = r\"\"\" {\n \"name\" : \"%s\",\n \"serverfarm\" : \"%s\",\n \"resourcegroup\" : \"%s\",\n \"sku\": \"%s\",\n \"os\": \"%s\",\n \"location\" : \"%s\",\n \"src_path\" : \"%s\",\n \"version_detected\": \"%s\",\n \"version_to_create\": \"%s\"\n }\n \"\"\" % (name, asp, rg_str, full_sku, os_val, location, src_path,\n detected_version, runtime_version)\n create_json = json.loads(dry_run_str)\n\n if dryrun:\n logger.warning(\"Web app will be created with the below configuration,re-run command \"\n \"without the --dryrun flag to create & deploy a new app\")\n return create_json\n\n # create RG if the RG doesn't already exist\n if not check_resource_group_exists(cmd, rg_name):\n logger.warning(\"Creating Resource group '%s' ...\", rg_name)\n create_resource_group(cmd, rg_name, location)\n logger.warning(\"Resource group creation complete\")\n else:\n logger.warning(\"Resource group '%s' already exists.\", rg_name)\n\n # create asp\n if not check_if_asp_exists(cmd, rg_name, asp):\n logger.warning(\"Creating App service plan '%s' ...\", asp)\n sku_def = SkuDescription(tier=full_sku, name=sku, capacity=(1 if is_linux else None))\n plan_def = AppServicePlan(loc_name, app_service_plan_name=asp,\n sku=sku_def, reserved=(is_linux or None))\n client.app_service_plans.create_or_update(rg_name, asp, plan_def)\n logger.warning(\"App service plan creation complete\")\n else:\n logger.warning(\"App service plan '%s' already exists.\", asp)\n\n # create the app\n if not check_app_exists(cmd, rg_name, name):\n logger.warning(\"Creating app '%s' ....\", name)\n app_created = create_webapp(cmd, rg_name, name, asp, runtime_version if is_linux else None)\n # update create_json to include the app_url\n url = app_created.enabled_host_names[0] # picks the custom domain URL incase a domain is assigned\n url = 'https://' + url\n logger.warning(\"Webapp creation complete\")\n else:\n logger.warning(\"App '%s' already exists\", name)\n\n if do_deployment:\n # setting to build after deployment\n logger.warning(\"Updating app settings to enable build after deployment\")\n update_app_settings(cmd, rg_name, name, [\"SCM_DO_BUILD_DURING_DEPLOYMENT=true\"])\n # work around until the timeout limits issue for linux is investigated & fixed\n # wakeup kudu, by making an SCM call\n\n import requests\n # work around until the timeout limits issue for linux is investigated & fixed\n user_name, password = _get_site_credential(cmd.cli_ctx, rg_name, name)\n scm_url = _get_scm_url(cmd, rg_name, name)\n import urllib3\n authorization = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(user_name, password))\n requests.get(scm_url + '/api/settings', headers=authorization)\n\n logger.warning(\"Creating zip with contents of dir %s ...\", src_dir)\n # zip contents & deploy\n zip_file_path = zip_contents_from_dir(src_dir, language)\n\n logger.warning(\"Deploying and building contents to app.\"\n \"This operation can take some time to finish...\")\n enable_zip_deploy(cmd, rg_name, name, zip_file_path)\n # Remove the file afer deployment, handling exception if user removed the file manually\n try:\n os.remove(zip_file_path)\n except OSError:\n pass\n else:\n logger.warning(\"No 'NODE' or 'DOTNETCORE' package detected, skipping zip and deploy process\")\n create_json.update({'app_url': url})\n logger.warning(\"All done.\")\n return create_json\n", "path": "src/webapp/azext_webapp/custom.py"}, {"content": "# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\nNODE_VERSION_DEFAULT = \"8.9\"\nNETCORE_VERSION_DEFAULT = \"2.0\"\n# TODO: Remove this once we have the api returning the versions\nNODE_VERSIONS = ['4.4', '4.5', '6.2', '6.6', '6.9', '6.11', '8.0', '8.1']\nNETCORE_VERSIONS = ['1.0', '1.1', '2.0']\nNODE_RUNTIME_NAME = \"node\"\nNETCORE_RUNTIME_NAME = \"dotnetcore\"\nOS_DEFAULT = \"Windows\"\n", "path": "src/webapp/azext_webapp/_constants.py"}, {"content": "#!/usr/bin/env python\n\n# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\nfrom codecs import open\nfrom setuptools import setup, find_packages\n\nVERSION = \"0.1.2\"\n\nCLASSIFIERS = [\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'License :: OSI Approved :: MIT License',\n]\n\nDEPENDENCIES = []\n\nsetup(\n name='webapp',\n version=VERSION,\n description='An Azure CLI Extension to manage appservice resources',\n long_description='An Azure CLI Extension to manage appservice resources',\n license='MIT',\n author='Sisira Panchagnula',\n author_email='[email protected]',\n url='https://github.com/Azure/azure-cli-extensions',\n classifiers=CLASSIFIERS,\n packages=find_packages(exclude=[\"tests\"]),\n install_requires=DEPENDENCIES\n)\n", "path": "src/webapp/setup.py"}]} | 2,885 | 817 |
gh_patches_debug_40941 | rasdani/github-patches | git_diff | mathesar-foundation__mathesar-3349 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Modify schemas API to accept connection IDs instead of connection nicknames
## Context
I'm working on some frontend changes to [allow the user to edit connection names](https://github.com/mathesar-foundation/mathesar/issues/3333), and this work has had some cascading implications. Before we allow the user to edit the connection name, I want to make sure that everywhere we're handling connections on the frontend, we're using ids (not names) to identify them. I've started a [PR](https://github.com/mathesar-foundation/mathesar/pull/3341) with that refactoring work, and it's coming along nicely. However, I've hit a small snag.
## Problem
Our schemas API at `/api/db/v0/schemas/` requires the client to supply the connection nickname when listing schemas or creating a schema. I'm at a point in this refactor where I _can_ make changes on the front end to supply that nickname to the API, but it's going to be a bit weird on the frontend side because there are places where I need the nickname and only have the id. To me it seems much cleaner to change it on the backend side so that the API accepts ids instead of nicknames.
## The change I want
Currently the API requests look like this:
- `GET /api/db/v0/schemas/?database=mathesar_tables`
- `POST /api/db/v0/schemas/ { "name": "foo", "database": "mathesar_tables" }`
Instead, I'd like them to look like this:
- `GET /api/db/v0/schemas/?connection_id=3`
- `POST /api/db/v0/schemas/ { "name": "foo", "connection_id": 3 }`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mathesar/api/serializers/schemas.py`
Content:
```
1 from rest_access_policy import PermittedSlugRelatedField
2 from rest_framework import serializers
3
4 from db.identifiers import is_identifier_too_long
5
6 from mathesar.api.db.permissions.table import TableAccessPolicy
7 from mathesar.api.db.permissions.database import DatabaseAccessPolicy
8 from mathesar.api.exceptions.mixins import MathesarErrorMessageMixin
9 from mathesar.models.base import Database, Schema, Table
10 from mathesar.api.exceptions.database_exceptions import (
11 exceptions as database_api_exceptions
12 )
13
14
15 class SchemaSerializer(MathesarErrorMessageMixin, serializers.HyperlinkedModelSerializer):
16 name = serializers.CharField()
17 # Restrict access to databases with create access.
18 # Unlike PermittedPkRelatedField this field uses a slug instead of an id
19 # Refer https://rsinger86.github.io/drf-access-policy/policy_reuse/
20 database = PermittedSlugRelatedField(
21 access_policy=DatabaseAccessPolicy,
22 slug_field='name',
23 queryset=Database.current_objects.all()
24 )
25 description = serializers.CharField(
26 required=False, allow_blank=True, default=None, allow_null=True
27 )
28 num_tables = serializers.SerializerMethodField()
29 num_queries = serializers.SerializerMethodField()
30
31 class Meta:
32 model = Schema
33 fields = [
34 'id', 'name', 'database', 'has_dependents', 'description',
35 'num_tables', 'num_queries'
36 ]
37
38 def get_num_tables(self, obj):
39 qs = Table.objects.filter(schema=obj)
40 count = TableAccessPolicy.scope_queryset(self.context['request'], qs).count()
41 return count
42
43 def get_num_queries(self, obj):
44 return sum(t.queries.count() for t in obj.tables.all())
45
46 def validate_name(self, name):
47 if is_identifier_too_long(name):
48 raise database_api_exceptions.IdentifierTooLong(field='name')
49 return name
50
```
Path: `mathesar/api/db/viewsets/schemas.py`
Content:
```
1 from django_filters import rest_framework as filters
2 from rest_access_policy import AccessViewSetMixin
3 from rest_framework import status, viewsets
4 from rest_framework.decorators import action
5 from rest_framework.mixins import ListModelMixin, RetrieveModelMixin
6 from rest_framework.response import Response
7
8 from mathesar.api.db.permissions.schema import SchemaAccessPolicy
9 from mathesar.api.dj_filters import SchemaFilter
10 from mathesar.api.pagination import DefaultLimitOffsetPagination
11 from mathesar.api.serializers.dependents import DependentSerializer, DependentFilterSerializer
12 from mathesar.api.serializers.schemas import SchemaSerializer
13 from mathesar.models.base import Schema
14 from mathesar.utils.schemas import create_schema_and_object
15 from mathesar.api.exceptions.validation_exceptions.exceptions import EditingPublicSchemaIsDisallowed
16
17
18 class SchemaViewSet(AccessViewSetMixin, viewsets.GenericViewSet, ListModelMixin, RetrieveModelMixin):
19 serializer_class = SchemaSerializer
20 pagination_class = DefaultLimitOffsetPagination
21 filter_backends = (filters.DjangoFilterBackend,)
22 filterset_class = SchemaFilter
23 access_policy = SchemaAccessPolicy
24
25 def get_queryset(self):
26 qs = Schema.objects.all().order_by('-created_at')
27 return self.access_policy.scope_viewset_queryset(self.request, qs)
28
29 def create(self, request):
30 serializer = SchemaSerializer(data=request.data, context={'request': request})
31 serializer.is_valid(raise_exception=True)
32 database_name = serializer.validated_data['database'].name
33 schema = create_schema_and_object(
34 serializer.validated_data['name'],
35 database_name,
36 comment=serializer.validated_data.get('description')
37 )
38 serializer = SchemaSerializer(schema, context={'request': request})
39 return Response(serializer.data, status=status.HTTP_201_CREATED)
40
41 def partial_update(self, request, pk=None):
42 serializer = SchemaSerializer(
43 data=request.data, context={'request': request}, partial=True
44 )
45 serializer.is_valid(raise_exception=True)
46
47 schema = self.get_object()
48
49 # We forbid editing the public schema
50 if schema.name == "public":
51 raise EditingPublicSchemaIsDisallowed()
52
53 schema.update_sa_schema(serializer.validated_data)
54
55 # Reload the schema to avoid cached properties
56 schema = self.get_object()
57 schema.clear_name_cache()
58 serializer = SchemaSerializer(schema, context={'request': request})
59 return Response(serializer.data)
60
61 def destroy(self, request, pk=None):
62 schema = self.get_object()
63 schema.delete_sa_schema()
64 return Response(status=status.HTTP_204_NO_CONTENT)
65
66 @action(methods=['get'], detail=True)
67 def dependents(self, request, pk=None):
68 serializer = DependentFilterSerializer(data=request.GET)
69 serializer.is_valid(raise_exception=True)
70 types_exclude = serializer.validated_data['exclude']
71
72 schema = self.get_object()
73 serializer = DependentSerializer(schema.get_dependents(types_exclude), many=True, context={'request': request})
74 return Response(serializer.data)
75
```
Path: `mathesar/utils/schemas.py`
Content:
```
1 from django.core.exceptions import ObjectDoesNotExist
2 from rest_framework.exceptions import ValidationError
3
4 from db.schemas.operations.create import create_schema
5 from db.schemas.utils import get_schema_oid_from_name, get_mathesar_schemas
6 from mathesar.database.base import create_mathesar_engine
7 from mathesar.models.base import Schema, Database
8
9
10 def create_schema_and_object(name, database, comment=None):
11 try:
12 database_model = Database.objects.get(name=database)
13 except ObjectDoesNotExist:
14 raise ValidationError({"database": f"Database '{database}' not found"})
15
16 engine = create_mathesar_engine(database_model)
17
18 all_schemas = get_mathesar_schemas(engine)
19 if name in all_schemas:
20 raise ValidationError({"name": f"Schema name {name} is not unique"})
21 create_schema(name, engine, comment=comment)
22 schema_oid = get_schema_oid_from_name(name, engine)
23
24 schema = Schema.objects.create(oid=schema_oid, database=database_model)
25 return schema
26
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mathesar/api/db/viewsets/schemas.py b/mathesar/api/db/viewsets/schemas.py
--- a/mathesar/api/db/viewsets/schemas.py
+++ b/mathesar/api/db/viewsets/schemas.py
@@ -24,15 +24,18 @@
def get_queryset(self):
qs = Schema.objects.all().order_by('-created_at')
+ connection_id = self.request.query_params.get('connection_id')
+ if connection_id:
+ qs = qs.filter(database=connection_id)
return self.access_policy.scope_viewset_queryset(self.request, qs)
def create(self, request):
serializer = SchemaSerializer(data=request.data, context={'request': request})
serializer.is_valid(raise_exception=True)
- database_name = serializer.validated_data['database'].name
+ connection_id = serializer.validated_data['database'].id
schema = create_schema_and_object(
serializer.validated_data['name'],
- database_name,
+ connection_id,
comment=serializer.validated_data.get('description')
)
serializer = SchemaSerializer(schema, context={'request': request})
diff --git a/mathesar/api/serializers/schemas.py b/mathesar/api/serializers/schemas.py
--- a/mathesar/api/serializers/schemas.py
+++ b/mathesar/api/serializers/schemas.py
@@ -1,4 +1,4 @@
-from rest_access_policy import PermittedSlugRelatedField
+from rest_access_policy import PermittedPkRelatedField
from rest_framework import serializers
from db.identifiers import is_identifier_too_long
@@ -15,11 +15,10 @@
class SchemaSerializer(MathesarErrorMessageMixin, serializers.HyperlinkedModelSerializer):
name = serializers.CharField()
# Restrict access to databases with create access.
- # Unlike PermittedPkRelatedField this field uses a slug instead of an id
# Refer https://rsinger86.github.io/drf-access-policy/policy_reuse/
- database = PermittedSlugRelatedField(
+ connection_id = PermittedPkRelatedField(
+ source='database',
access_policy=DatabaseAccessPolicy,
- slug_field='name',
queryset=Database.current_objects.all()
)
description = serializers.CharField(
@@ -31,7 +30,7 @@
class Meta:
model = Schema
fields = [
- 'id', 'name', 'database', 'has_dependents', 'description',
+ 'id', 'name', 'connection_id', 'has_dependents', 'description',
'num_tables', 'num_queries'
]
diff --git a/mathesar/utils/schemas.py b/mathesar/utils/schemas.py
--- a/mathesar/utils/schemas.py
+++ b/mathesar/utils/schemas.py
@@ -7,11 +7,12 @@
from mathesar.models.base import Schema, Database
-def create_schema_and_object(name, database, comment=None):
+def create_schema_and_object(name, connection_id, comment=None):
try:
- database_model = Database.objects.get(name=database)
+ database_model = Database.objects.get(id=connection_id)
+ database_name = database_model.name
except ObjectDoesNotExist:
- raise ValidationError({"database": f"Database '{database}' not found"})
+ raise ValidationError({"database": f"Database '{database_name}' not found"})
engine = create_mathesar_engine(database_model)
| {"golden_diff": "diff --git a/mathesar/api/db/viewsets/schemas.py b/mathesar/api/db/viewsets/schemas.py\n--- a/mathesar/api/db/viewsets/schemas.py\n+++ b/mathesar/api/db/viewsets/schemas.py\n@@ -24,15 +24,18 @@\n \n def get_queryset(self):\n qs = Schema.objects.all().order_by('-created_at')\n+ connection_id = self.request.query_params.get('connection_id')\n+ if connection_id:\n+ qs = qs.filter(database=connection_id)\n return self.access_policy.scope_viewset_queryset(self.request, qs)\n \n def create(self, request):\n serializer = SchemaSerializer(data=request.data, context={'request': request})\n serializer.is_valid(raise_exception=True)\n- database_name = serializer.validated_data['database'].name\n+ connection_id = serializer.validated_data['database'].id\n schema = create_schema_and_object(\n serializer.validated_data['name'],\n- database_name,\n+ connection_id,\n comment=serializer.validated_data.get('description')\n )\n serializer = SchemaSerializer(schema, context={'request': request})\ndiff --git a/mathesar/api/serializers/schemas.py b/mathesar/api/serializers/schemas.py\n--- a/mathesar/api/serializers/schemas.py\n+++ b/mathesar/api/serializers/schemas.py\n@@ -1,4 +1,4 @@\n-from rest_access_policy import PermittedSlugRelatedField\n+from rest_access_policy import PermittedPkRelatedField\n from rest_framework import serializers\n \n from db.identifiers import is_identifier_too_long\n@@ -15,11 +15,10 @@\n class SchemaSerializer(MathesarErrorMessageMixin, serializers.HyperlinkedModelSerializer):\n name = serializers.CharField()\n # Restrict access to databases with create access.\n- # Unlike PermittedPkRelatedField this field uses a slug instead of an id\n # Refer https://rsinger86.github.io/drf-access-policy/policy_reuse/\n- database = PermittedSlugRelatedField(\n+ connection_id = PermittedPkRelatedField(\n+ source='database',\n access_policy=DatabaseAccessPolicy,\n- slug_field='name',\n queryset=Database.current_objects.all()\n )\n description = serializers.CharField(\n@@ -31,7 +30,7 @@\n class Meta:\n model = Schema\n fields = [\n- 'id', 'name', 'database', 'has_dependents', 'description',\n+ 'id', 'name', 'connection_id', 'has_dependents', 'description',\n 'num_tables', 'num_queries'\n ]\n \ndiff --git a/mathesar/utils/schemas.py b/mathesar/utils/schemas.py\n--- a/mathesar/utils/schemas.py\n+++ b/mathesar/utils/schemas.py\n@@ -7,11 +7,12 @@\n from mathesar.models.base import Schema, Database\n \n \n-def create_schema_and_object(name, database, comment=None):\n+def create_schema_and_object(name, connection_id, comment=None):\n try:\n- database_model = Database.objects.get(name=database)\n+ database_model = Database.objects.get(id=connection_id)\n+ database_name = database_model.name\n except ObjectDoesNotExist:\n- raise ValidationError({\"database\": f\"Database '{database}' not found\"})\n+ raise ValidationError({\"database\": f\"Database '{database_name}' not found\"})\n \n engine = create_mathesar_engine(database_model)\n", "issue": "Modify schemas API to accept connection IDs instead of connection nicknames\n## Context\r\n\r\nI'm working on some frontend changes to [allow the user to edit connection names](https://github.com/mathesar-foundation/mathesar/issues/3333), and this work has had some cascading implications. Before we allow the user to edit the connection name, I want to make sure that everywhere we're handling connections on the frontend, we're using ids (not names) to identify them. I've started a [PR](https://github.com/mathesar-foundation/mathesar/pull/3341) with that refactoring work, and it's coming along nicely. However, I've hit a small snag.\r\n\r\n## Problem\r\n\r\nOur schemas API at `/api/db/v0/schemas/` requires the client to supply the connection nickname when listing schemas or creating a schema. I'm at a point in this refactor where I _can_ make changes on the front end to supply that nickname to the API, but it's going to be a bit weird on the frontend side because there are places where I need the nickname and only have the id. To me it seems much cleaner to change it on the backend side so that the API accepts ids instead of nicknames.\r\n\r\n## The change I want\r\n\r\nCurrently the API requests look like this:\r\n\r\n- `GET /api/db/v0/schemas/?database=mathesar_tables`\r\n- `POST /api/db/v0/schemas/ { \"name\": \"foo\", \"database\": \"mathesar_tables\" }`\r\n\r\nInstead, I'd like them to look like this:\r\n\r\n- `GET /api/db/v0/schemas/?connection_id=3`\r\n- `POST /api/db/v0/schemas/ { \"name\": \"foo\", \"connection_id\": 3 }`\r\n\r\n\n", "before_files": [{"content": "from rest_access_policy import PermittedSlugRelatedField\nfrom rest_framework import serializers\n\nfrom db.identifiers import is_identifier_too_long\n\nfrom mathesar.api.db.permissions.table import TableAccessPolicy\nfrom mathesar.api.db.permissions.database import DatabaseAccessPolicy\nfrom mathesar.api.exceptions.mixins import MathesarErrorMessageMixin\nfrom mathesar.models.base import Database, Schema, Table\nfrom mathesar.api.exceptions.database_exceptions import (\n exceptions as database_api_exceptions\n)\n\n\nclass SchemaSerializer(MathesarErrorMessageMixin, serializers.HyperlinkedModelSerializer):\n name = serializers.CharField()\n # Restrict access to databases with create access.\n # Unlike PermittedPkRelatedField this field uses a slug instead of an id\n # Refer https://rsinger86.github.io/drf-access-policy/policy_reuse/\n database = PermittedSlugRelatedField(\n access_policy=DatabaseAccessPolicy,\n slug_field='name',\n queryset=Database.current_objects.all()\n )\n description = serializers.CharField(\n required=False, allow_blank=True, default=None, allow_null=True\n )\n num_tables = serializers.SerializerMethodField()\n num_queries = serializers.SerializerMethodField()\n\n class Meta:\n model = Schema\n fields = [\n 'id', 'name', 'database', 'has_dependents', 'description',\n 'num_tables', 'num_queries'\n ]\n\n def get_num_tables(self, obj):\n qs = Table.objects.filter(schema=obj)\n count = TableAccessPolicy.scope_queryset(self.context['request'], qs).count()\n return count\n\n def get_num_queries(self, obj):\n return sum(t.queries.count() for t in obj.tables.all())\n\n def validate_name(self, name):\n if is_identifier_too_long(name):\n raise database_api_exceptions.IdentifierTooLong(field='name')\n return name\n", "path": "mathesar/api/serializers/schemas.py"}, {"content": "from django_filters import rest_framework as filters\nfrom rest_access_policy import AccessViewSetMixin\nfrom rest_framework import status, viewsets\nfrom rest_framework.decorators import action\nfrom rest_framework.mixins import ListModelMixin, RetrieveModelMixin\nfrom rest_framework.response import Response\n\nfrom mathesar.api.db.permissions.schema import SchemaAccessPolicy\nfrom mathesar.api.dj_filters import SchemaFilter\nfrom mathesar.api.pagination import DefaultLimitOffsetPagination\nfrom mathesar.api.serializers.dependents import DependentSerializer, DependentFilterSerializer\nfrom mathesar.api.serializers.schemas import SchemaSerializer\nfrom mathesar.models.base import Schema\nfrom mathesar.utils.schemas import create_schema_and_object\nfrom mathesar.api.exceptions.validation_exceptions.exceptions import EditingPublicSchemaIsDisallowed\n\n\nclass SchemaViewSet(AccessViewSetMixin, viewsets.GenericViewSet, ListModelMixin, RetrieveModelMixin):\n serializer_class = SchemaSerializer\n pagination_class = DefaultLimitOffsetPagination\n filter_backends = (filters.DjangoFilterBackend,)\n filterset_class = SchemaFilter\n access_policy = SchemaAccessPolicy\n\n def get_queryset(self):\n qs = Schema.objects.all().order_by('-created_at')\n return self.access_policy.scope_viewset_queryset(self.request, qs)\n\n def create(self, request):\n serializer = SchemaSerializer(data=request.data, context={'request': request})\n serializer.is_valid(raise_exception=True)\n database_name = serializer.validated_data['database'].name\n schema = create_schema_and_object(\n serializer.validated_data['name'],\n database_name,\n comment=serializer.validated_data.get('description')\n )\n serializer = SchemaSerializer(schema, context={'request': request})\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n\n def partial_update(self, request, pk=None):\n serializer = SchemaSerializer(\n data=request.data, context={'request': request}, partial=True\n )\n serializer.is_valid(raise_exception=True)\n\n schema = self.get_object()\n\n # We forbid editing the public schema\n if schema.name == \"public\":\n raise EditingPublicSchemaIsDisallowed()\n\n schema.update_sa_schema(serializer.validated_data)\n\n # Reload the schema to avoid cached properties\n schema = self.get_object()\n schema.clear_name_cache()\n serializer = SchemaSerializer(schema, context={'request': request})\n return Response(serializer.data)\n\n def destroy(self, request, pk=None):\n schema = self.get_object()\n schema.delete_sa_schema()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n @action(methods=['get'], detail=True)\n def dependents(self, request, pk=None):\n serializer = DependentFilterSerializer(data=request.GET)\n serializer.is_valid(raise_exception=True)\n types_exclude = serializer.validated_data['exclude']\n\n schema = self.get_object()\n serializer = DependentSerializer(schema.get_dependents(types_exclude), many=True, context={'request': request})\n return Response(serializer.data)\n", "path": "mathesar/api/db/viewsets/schemas.py"}, {"content": "from django.core.exceptions import ObjectDoesNotExist\nfrom rest_framework.exceptions import ValidationError\n\nfrom db.schemas.operations.create import create_schema\nfrom db.schemas.utils import get_schema_oid_from_name, get_mathesar_schemas\nfrom mathesar.database.base import create_mathesar_engine\nfrom mathesar.models.base import Schema, Database\n\n\ndef create_schema_and_object(name, database, comment=None):\n try:\n database_model = Database.objects.get(name=database)\n except ObjectDoesNotExist:\n raise ValidationError({\"database\": f\"Database '{database}' not found\"})\n\n engine = create_mathesar_engine(database_model)\n\n all_schemas = get_mathesar_schemas(engine)\n if name in all_schemas:\n raise ValidationError({\"name\": f\"Schema name {name} is not unique\"})\n create_schema(name, engine, comment=comment)\n schema_oid = get_schema_oid_from_name(name, engine)\n\n schema = Schema.objects.create(oid=schema_oid, database=database_model)\n return schema\n", "path": "mathesar/utils/schemas.py"}], "after_files": [{"content": "from rest_access_policy import PermittedPkRelatedField\nfrom rest_framework import serializers\n\nfrom db.identifiers import is_identifier_too_long\n\nfrom mathesar.api.db.permissions.table import TableAccessPolicy\nfrom mathesar.api.db.permissions.database import DatabaseAccessPolicy\nfrom mathesar.api.exceptions.mixins import MathesarErrorMessageMixin\nfrom mathesar.models.base import Database, Schema, Table\nfrom mathesar.api.exceptions.database_exceptions import (\n exceptions as database_api_exceptions\n)\n\n\nclass SchemaSerializer(MathesarErrorMessageMixin, serializers.HyperlinkedModelSerializer):\n name = serializers.CharField()\n # Restrict access to databases with create access.\n # Refer https://rsinger86.github.io/drf-access-policy/policy_reuse/\n connection_id = PermittedPkRelatedField(\n source='database',\n access_policy=DatabaseAccessPolicy,\n queryset=Database.current_objects.all()\n )\n description = serializers.CharField(\n required=False, allow_blank=True, default=None, allow_null=True\n )\n num_tables = serializers.SerializerMethodField()\n num_queries = serializers.SerializerMethodField()\n\n class Meta:\n model = Schema\n fields = [\n 'id', 'name', 'connection_id', 'has_dependents', 'description',\n 'num_tables', 'num_queries'\n ]\n\n def get_num_tables(self, obj):\n qs = Table.objects.filter(schema=obj)\n count = TableAccessPolicy.scope_queryset(self.context['request'], qs).count()\n return count\n\n def get_num_queries(self, obj):\n return sum(t.queries.count() for t in obj.tables.all())\n\n def validate_name(self, name):\n if is_identifier_too_long(name):\n raise database_api_exceptions.IdentifierTooLong(field='name')\n return name\n", "path": "mathesar/api/serializers/schemas.py"}, {"content": "from django_filters import rest_framework as filters\nfrom rest_access_policy import AccessViewSetMixin\nfrom rest_framework import status, viewsets\nfrom rest_framework.decorators import action\nfrom rest_framework.mixins import ListModelMixin, RetrieveModelMixin\nfrom rest_framework.response import Response\n\nfrom mathesar.api.db.permissions.schema import SchemaAccessPolicy\nfrom mathesar.api.dj_filters import SchemaFilter\nfrom mathesar.api.pagination import DefaultLimitOffsetPagination\nfrom mathesar.api.serializers.dependents import DependentSerializer, DependentFilterSerializer\nfrom mathesar.api.serializers.schemas import SchemaSerializer\nfrom mathesar.models.base import Schema\nfrom mathesar.utils.schemas import create_schema_and_object\nfrom mathesar.api.exceptions.validation_exceptions.exceptions import EditingPublicSchemaIsDisallowed\n\n\nclass SchemaViewSet(AccessViewSetMixin, viewsets.GenericViewSet, ListModelMixin, RetrieveModelMixin):\n serializer_class = SchemaSerializer\n pagination_class = DefaultLimitOffsetPagination\n filter_backends = (filters.DjangoFilterBackend,)\n filterset_class = SchemaFilter\n access_policy = SchemaAccessPolicy\n\n def get_queryset(self):\n qs = Schema.objects.all().order_by('-created_at')\n connection_id = self.request.query_params.get('connection_id')\n if connection_id:\n qs = qs.filter(database=connection_id)\n return self.access_policy.scope_viewset_queryset(self.request, qs)\n\n def create(self, request):\n serializer = SchemaSerializer(data=request.data, context={'request': request})\n serializer.is_valid(raise_exception=True)\n connection_id = serializer.validated_data['database'].id\n schema = create_schema_and_object(\n serializer.validated_data['name'],\n connection_id,\n comment=serializer.validated_data.get('description')\n )\n serializer = SchemaSerializer(schema, context={'request': request})\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n\n def partial_update(self, request, pk=None):\n serializer = SchemaSerializer(\n data=request.data, context={'request': request}, partial=True\n )\n serializer.is_valid(raise_exception=True)\n\n schema = self.get_object()\n\n # We forbid editing the public schema\n if schema.name == \"public\":\n raise EditingPublicSchemaIsDisallowed()\n\n schema.update_sa_schema(serializer.validated_data)\n\n # Reload the schema to avoid cached properties\n schema = self.get_object()\n schema.clear_name_cache()\n serializer = SchemaSerializer(schema, context={'request': request})\n return Response(serializer.data)\n\n def destroy(self, request, pk=None):\n schema = self.get_object()\n schema.delete_sa_schema()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n @action(methods=['get'], detail=True)\n def dependents(self, request, pk=None):\n serializer = DependentFilterSerializer(data=request.GET)\n serializer.is_valid(raise_exception=True)\n types_exclude = serializer.validated_data['exclude']\n\n schema = self.get_object()\n serializer = DependentSerializer(schema.get_dependents(types_exclude), many=True, context={'request': request})\n return Response(serializer.data)\n", "path": "mathesar/api/db/viewsets/schemas.py"}, {"content": "from django.core.exceptions import ObjectDoesNotExist\nfrom rest_framework.exceptions import ValidationError\n\nfrom db.schemas.operations.create import create_schema\nfrom db.schemas.utils import get_schema_oid_from_name, get_mathesar_schemas\nfrom mathesar.database.base import create_mathesar_engine\nfrom mathesar.models.base import Schema, Database\n\n\ndef create_schema_and_object(name, connection_id, comment=None):\n try:\n database_model = Database.objects.get(id=connection_id)\n database_name = database_model.name\n except ObjectDoesNotExist:\n raise ValidationError({\"database\": f\"Database '{database_name}' not found\"})\n\n engine = create_mathesar_engine(database_model)\n\n all_schemas = get_mathesar_schemas(engine)\n if name in all_schemas:\n raise ValidationError({\"name\": f\"Schema name {name} is not unique\"})\n create_schema(name, engine, comment=comment)\n schema_oid = get_schema_oid_from_name(name, engine)\n\n schema = Schema.objects.create(oid=schema_oid, database=database_model)\n return schema\n", "path": "mathesar/utils/schemas.py"}]} | 2,162 | 728 |
gh_patches_debug_34001 | rasdani/github-patches | git_diff | kornia__kornia-2526 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Use Kornia resize in ResizePreprocessor in ObjectDetector
Use Kornia resize in ResizePreprocessor
_Originally posted by @edgarriba in https://github.com/kornia/kornia/pull/2363#discussion_r1257304346_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kornia/contrib/object_detection.py`
Content:
```
1 from __future__ import annotations
2
3 from typing import Any
4
5 import torch
6 import torch.nn.functional as F
7
8 from kornia.core import Module, Tensor, concatenate
9
10
11 class ResizePreProcessor(Module):
12 """This module resizes a list of image tensors to the given size, and also returns the original image sizes for
13 further post-processing."""
14
15 def __init__(self, size: int | tuple[int, int], interpolation_mode: str = "bilinear") -> None:
16 """
17 Args:
18 size: images will be resized to this value. If a 2-integer tuple is given, it is interpreted as
19 (height, width). If an integer is given, images will be resized to a square.
20 interpolation_mode: interpolation mode for image resizing. Supported values: ``nearest``, ``bilinear``,
21 ``bicubic``, ``area``, and ``nearest-exact``.
22 """
23 super().__init__()
24 self.size = size
25 self.interpolation_mode = interpolation_mode
26
27 def forward(self, imgs: list[Tensor]) -> tuple[Tensor, dict[str, Any]]:
28 # TODO: support other input formats e.g. file path, numpy
29 # NOTE: antialias=False is used in F.interpolate()
30 original_sizes = [(img.shape[1], img.shape[2]) for img in imgs]
31 resized_imgs = [F.interpolate(img.unsqueeze(0), self.size, mode=self.interpolation_mode) for img in imgs]
32 return concatenate(resized_imgs), {"original_size": original_sizes}
33
34
35 class ObjectDetector:
36 """This class wraps an object detection model and performs pre-processing and post-processing."""
37
38 def __init__(self, model: Module, pre_processor: Module, post_processor: Module) -> None:
39 """Construct an Object Detector object.
40
41 Args:
42 model: an object detection model.
43 pre_processor: a pre-processing module
44 post_processor: a post-processing module.
45 """
46 super().__init__()
47 self.model = model.eval()
48 self.pre_processor = pre_processor.eval()
49 self.post_processor = post_processor.eval()
50
51 @torch.inference_mode()
52 def predict(self, imgs: list[Tensor]) -> list[Tensor]:
53 """Detect objects in a given list of images.
54
55 Args:
56 imgs: list of RGB images. Each image is a Tensor with shape :math:`(3, H, W)`.
57
58 Returns:
59 list of detections found in each image. For item in a batch, shape is :math:`(D, 6)`, where :math:`D` is the
60 number of detections in the given image, :math:`6` represents class id, score, and `xywh` bounding box.
61 """
62 imgs, meta = self.pre_processor(imgs)
63 out = self.model(imgs)
64 detections = self.post_processor(out, meta)
65 return detections
66
67 def compile(
68 self,
69 *,
70 fullgraph: bool = False,
71 dynamic: bool = False,
72 backend: str = 'inductor',
73 mode: str | None = None,
74 options: dict[str, str | int | bool] | None = None,
75 disable: bool = False,
76 ) -> None:
77 """Compile the internal object detection model with :py:func:`torch.compile()`."""
78 self.model = torch.compile( # type: ignore
79 self.model,
80 fullgraph=fullgraph,
81 dynamic=dynamic,
82 backend=backend,
83 mode=mode,
84 options=options,
85 disable=disable,
86 )
87
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kornia/contrib/object_detection.py b/kornia/contrib/object_detection.py
--- a/kornia/contrib/object_detection.py
+++ b/kornia/contrib/object_detection.py
@@ -3,32 +3,32 @@
from typing import Any
import torch
-import torch.nn.functional as F
from kornia.core import Module, Tensor, concatenate
+from kornia.geometry.transform import Resize
class ResizePreProcessor(Module):
"""This module resizes a list of image tensors to the given size, and also returns the original image sizes for
further post-processing."""
- def __init__(self, size: int | tuple[int, int], interpolation_mode: str = "bilinear") -> None:
+ def __init__(self, size: tuple[int, int], interpolation_mode: str = "bilinear") -> None:
"""
Args:
size: images will be resized to this value. If a 2-integer tuple is given, it is interpreted as
- (height, width). If an integer is given, images will be resized to a square.
+ (height, width).
interpolation_mode: interpolation mode for image resizing. Supported values: ``nearest``, ``bilinear``,
``bicubic``, ``area``, and ``nearest-exact``.
"""
super().__init__()
self.size = size
- self.interpolation_mode = interpolation_mode
+ self.resizer = Resize(self.size, interpolation=interpolation_mode)
def forward(self, imgs: list[Tensor]) -> tuple[Tensor, dict[str, Any]]:
# TODO: support other input formats e.g. file path, numpy
# NOTE: antialias=False is used in F.interpolate()
original_sizes = [(img.shape[1], img.shape[2]) for img in imgs]
- resized_imgs = [F.interpolate(img.unsqueeze(0), self.size, mode=self.interpolation_mode) for img in imgs]
+ resized_imgs = [self.resizer(img.unsqueeze(0)) for img in imgs]
return concatenate(resized_imgs), {"original_size": original_sizes}
| {"golden_diff": "diff --git a/kornia/contrib/object_detection.py b/kornia/contrib/object_detection.py\n--- a/kornia/contrib/object_detection.py\n+++ b/kornia/contrib/object_detection.py\n@@ -3,32 +3,32 @@\n from typing import Any\n \n import torch\n-import torch.nn.functional as F\n \n from kornia.core import Module, Tensor, concatenate\n+from kornia.geometry.transform import Resize\n \n \n class ResizePreProcessor(Module):\n \"\"\"This module resizes a list of image tensors to the given size, and also returns the original image sizes for\n further post-processing.\"\"\"\n \n- def __init__(self, size: int | tuple[int, int], interpolation_mode: str = \"bilinear\") -> None:\n+ def __init__(self, size: tuple[int, int], interpolation_mode: str = \"bilinear\") -> None:\n \"\"\"\n Args:\n size: images will be resized to this value. If a 2-integer tuple is given, it is interpreted as\n- (height, width). If an integer is given, images will be resized to a square.\n+ (height, width).\n interpolation_mode: interpolation mode for image resizing. Supported values: ``nearest``, ``bilinear``,\n ``bicubic``, ``area``, and ``nearest-exact``.\n \"\"\"\n super().__init__()\n self.size = size\n- self.interpolation_mode = interpolation_mode\n+ self.resizer = Resize(self.size, interpolation=interpolation_mode)\n \n def forward(self, imgs: list[Tensor]) -> tuple[Tensor, dict[str, Any]]:\n # TODO: support other input formats e.g. file path, numpy\n # NOTE: antialias=False is used in F.interpolate()\n original_sizes = [(img.shape[1], img.shape[2]) for img in imgs]\n- resized_imgs = [F.interpolate(img.unsqueeze(0), self.size, mode=self.interpolation_mode) for img in imgs]\n+ resized_imgs = [self.resizer(img.unsqueeze(0)) for img in imgs]\n return concatenate(resized_imgs), {\"original_size\": original_sizes}\n", "issue": "Use Kornia resize in ResizePreprocessor in ObjectDetector\n Use Kornia resize in ResizePreprocessor\r\n\r\n_Originally posted by @edgarriba in https://github.com/kornia/kornia/pull/2363#discussion_r1257304346_\r\n \n", "before_files": [{"content": "from __future__ import annotations\n\nfrom typing import Any\n\nimport torch\nimport torch.nn.functional as F\n\nfrom kornia.core import Module, Tensor, concatenate\n\n\nclass ResizePreProcessor(Module):\n \"\"\"This module resizes a list of image tensors to the given size, and also returns the original image sizes for\n further post-processing.\"\"\"\n\n def __init__(self, size: int | tuple[int, int], interpolation_mode: str = \"bilinear\") -> None:\n \"\"\"\n Args:\n size: images will be resized to this value. If a 2-integer tuple is given, it is interpreted as\n (height, width). If an integer is given, images will be resized to a square.\n interpolation_mode: interpolation mode for image resizing. Supported values: ``nearest``, ``bilinear``,\n ``bicubic``, ``area``, and ``nearest-exact``.\n \"\"\"\n super().__init__()\n self.size = size\n self.interpolation_mode = interpolation_mode\n\n def forward(self, imgs: list[Tensor]) -> tuple[Tensor, dict[str, Any]]:\n # TODO: support other input formats e.g. file path, numpy\n # NOTE: antialias=False is used in F.interpolate()\n original_sizes = [(img.shape[1], img.shape[2]) for img in imgs]\n resized_imgs = [F.interpolate(img.unsqueeze(0), self.size, mode=self.interpolation_mode) for img in imgs]\n return concatenate(resized_imgs), {\"original_size\": original_sizes}\n\n\nclass ObjectDetector:\n \"\"\"This class wraps an object detection model and performs pre-processing and post-processing.\"\"\"\n\n def __init__(self, model: Module, pre_processor: Module, post_processor: Module) -> None:\n \"\"\"Construct an Object Detector object.\n\n Args:\n model: an object detection model.\n pre_processor: a pre-processing module\n post_processor: a post-processing module.\n \"\"\"\n super().__init__()\n self.model = model.eval()\n self.pre_processor = pre_processor.eval()\n self.post_processor = post_processor.eval()\n\n @torch.inference_mode()\n def predict(self, imgs: list[Tensor]) -> list[Tensor]:\n \"\"\"Detect objects in a given list of images.\n\n Args:\n imgs: list of RGB images. Each image is a Tensor with shape :math:`(3, H, W)`.\n\n Returns:\n list of detections found in each image. For item in a batch, shape is :math:`(D, 6)`, where :math:`D` is the\n number of detections in the given image, :math:`6` represents class id, score, and `xywh` bounding box.\n \"\"\"\n imgs, meta = self.pre_processor(imgs)\n out = self.model(imgs)\n detections = self.post_processor(out, meta)\n return detections\n\n def compile(\n self,\n *,\n fullgraph: bool = False,\n dynamic: bool = False,\n backend: str = 'inductor',\n mode: str | None = None,\n options: dict[str, str | int | bool] | None = None,\n disable: bool = False,\n ) -> None:\n \"\"\"Compile the internal object detection model with :py:func:`torch.compile()`.\"\"\"\n self.model = torch.compile( # type: ignore\n self.model,\n fullgraph=fullgraph,\n dynamic=dynamic,\n backend=backend,\n mode=mode,\n options=options,\n disable=disable,\n )\n", "path": "kornia/contrib/object_detection.py"}], "after_files": [{"content": "from __future__ import annotations\n\nfrom typing import Any\n\nimport torch\n\nfrom kornia.core import Module, Tensor, concatenate\nfrom kornia.geometry.transform import Resize\n\n\nclass ResizePreProcessor(Module):\n \"\"\"This module resizes a list of image tensors to the given size, and also returns the original image sizes for\n further post-processing.\"\"\"\n\n def __init__(self, size: tuple[int, int], interpolation_mode: str = \"bilinear\") -> None:\n \"\"\"\n Args:\n size: images will be resized to this value. If a 2-integer tuple is given, it is interpreted as\n (height, width).\n interpolation_mode: interpolation mode for image resizing. Supported values: ``nearest``, ``bilinear``,\n ``bicubic``, ``area``, and ``nearest-exact``.\n \"\"\"\n super().__init__()\n self.size = size\n self.resizer = Resize(self.size, interpolation=interpolation_mode)\n\n def forward(self, imgs: list[Tensor]) -> tuple[Tensor, dict[str, Any]]:\n # TODO: support other input formats e.g. file path, numpy\n # NOTE: antialias=False is used in F.interpolate()\n original_sizes = [(img.shape[1], img.shape[2]) for img in imgs]\n resized_imgs = [self.resizer(img.unsqueeze(0)) for img in imgs]\n return concatenate(resized_imgs), {\"original_size\": original_sizes}\n\n\nclass ObjectDetector:\n \"\"\"This class wraps an object detection model and performs pre-processing and post-processing.\"\"\"\n\n def __init__(self, model: Module, pre_processor: Module, post_processor: Module) -> None:\n \"\"\"Construct an Object Detector object.\n\n Args:\n model: an object detection model.\n pre_processor: a pre-processing module\n post_processor: a post-processing module.\n \"\"\"\n super().__init__()\n self.model = model.eval()\n self.pre_processor = pre_processor.eval()\n self.post_processor = post_processor.eval()\n\n @torch.inference_mode()\n def predict(self, imgs: list[Tensor]) -> list[Tensor]:\n \"\"\"Detect objects in a given list of images.\n\n Args:\n imgs: list of RGB images. Each image is a Tensor with shape :math:`(3, H, W)`.\n\n Returns:\n list of detections found in each image. For item in a batch, shape is :math:`(D, 6)`, where :math:`D` is the\n number of detections in the given image, :math:`6` represents class id, score, and `xywh` bounding box.\n \"\"\"\n imgs, meta = self.pre_processor(imgs)\n out = self.model(imgs)\n detections = self.post_processor(out, meta)\n return detections\n\n def compile(\n self,\n *,\n fullgraph: bool = False,\n dynamic: bool = False,\n backend: str = 'inductor',\n mode: str | None = None,\n options: dict[str, str | int | bool] | None = None,\n disable: bool = False,\n ) -> None:\n \"\"\"Compile the internal object detection model with :py:func:`torch.compile()`.\"\"\"\n self.model = torch.compile( # type: ignore\n self.model,\n fullgraph=fullgraph,\n dynamic=dynamic,\n backend=backend,\n mode=mode,\n options=options,\n disable=disable,\n )\n", "path": "kornia/contrib/object_detection.py"}]} | 1,242 | 452 |
gh_patches_debug_5481 | rasdani/github-patches | git_diff | mozmeao__snippets-service-1177 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix ASRSnippetBundle.empty calculation to work with Jobs
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `snippets/base/bundles.py`
Content:
```
1 import hashlib
2 import json
3 from datetime import datetime
4 from urllib.parse import urljoin, urlparse
5
6 from django.apps import apps
7 from django.conf import settings
8 from django.core.cache import cache
9 from django.core.files.base import ContentFile
10 from django.core.files.storage import default_storage
11 from django.template.loader import render_to_string
12 from django.utils.functional import cached_property
13
14 import brotli
15
16 from snippets.base import util
17 from snippets.base import models
18
19
20 ONE_DAY = 60 * 60 * 24
21
22 SNIPPET_FETCH_TEMPLATE_HASH = hashlib.sha1(
23 render_to_string(
24 'base/fetch_snippets.jinja',
25 {
26 'date': '',
27 'snippet_ids': [],
28 'snippets_json': '',
29 'locale': 'xx',
30 'settings': settings,
31 'current_firefox_major_version': '00',
32 'metrics_url': settings.METRICS_URL,
33 }
34 ).encode('utf-8')).hexdigest()
35
36 SNIPPET_FETCH_TEMPLATE_AS_HASH = hashlib.sha1(
37 render_to_string(
38 'base/fetch_snippets_as.jinja',
39 {
40 'date': '',
41 'snippet_ids': [],
42 'snippets_json': '',
43 'locale': 'xx',
44 'settings': settings,
45 'current_firefox_major_version': '00',
46 'metrics_url': settings.METRICS_URL,
47 }
48 ).encode('utf-8')).hexdigest()
49
50 # On application load combine all the version strings of all available
51 # templates into one. To be used in ASRSnippetBundle.key method to calculate
52 # the bundle key. The point is that this string should change when the Template
53 # schema changes.
54 TEMPLATES_NG_VERSIONS = '-'.join([
55 model.VERSION
56 for model in apps.get_models()
57 if issubclass(model, models.Template) and not model.__name__ == 'Template'
58 ])
59
60
61 class SnippetBundle(object):
62 """
63 Group of snippets to be sent to a particular client configuration.
64 """
65 def __init__(self, client):
66 self.client = client
67
68 @cached_property
69 def key(self):
70 """A unique key for this bundle as a sha1 hexdigest."""
71 # Key should consist of snippets that are in the bundle. This part
72 # accounts for all the properties sent by the Client, since the
73 # self.snippets lists snippets are all filters and CMRs have been
74 # applied.
75 key_properties = [
76 '{id}-{date}-{templatedate}'.format(id=snippet.id,
77 date=snippet.modified.isoformat(),
78 templatedate=snippet.template.modified.isoformat())
79 for snippet in self.snippets]
80
81 # Additional values used to calculate the key are the templates and the
82 # variables used to render them besides snippets.
83 key_properties.extend([
84 str(self.client.startpage_version),
85 self.client.locale,
86 util.current_firefox_major_version(),
87 str(settings.BUNDLE_BROTLI_COMPRESS),
88 ])
89 if self.client.startpage_version >= 5:
90 key_properties.append(SNIPPET_FETCH_TEMPLATE_AS_HASH)
91 else:
92 key_properties.append(SNIPPET_FETCH_TEMPLATE_HASH)
93
94 key_string = '_'.join(key_properties)
95 return hashlib.sha1(key_string.encode('utf-8')).hexdigest()
96
97 @property
98 def empty(self):
99 return len(self.snippets) == 0
100
101 @property
102 def cache_key(self):
103 return 'bundle_' + self.key
104
105 @property
106 def cached(self):
107 if cache.get(self.cache_key):
108 return True
109
110 # Check if available on S3 already.
111 if default_storage.exists(self.filename):
112 cache.set(self.cache_key, True, ONE_DAY)
113 return True
114
115 return False
116
117 @property
118 def expired(self):
119 """
120 If True, the code for this bundle should be re-generated before
121 use.
122 """
123 return not cache.get(self.cache_key)
124
125 @property
126 def filename(self):
127 return urljoin(settings.MEDIA_BUNDLES_ROOT, 'bundle_{0}.html'.format(self.key))
128
129 @property
130 def url(self):
131 bundle_url = default_storage.url(self.filename)
132 full_url = urljoin(settings.SITE_URL, bundle_url).split('?')[0]
133 cdn_url = getattr(settings, 'CDN_URL', None)
134 if cdn_url:
135 full_url = urljoin(cdn_url, urlparse(bundle_url).path)
136
137 return full_url
138
139 @cached_property
140 def snippets(self):
141 return (models.Snippet.objects
142 .filter(published=True)
143 .match_client(self.client)
144 .select_related('template')
145 .prefetch_related('countries', 'exclude_from_search_providers')
146 .filter_by_available())
147
148 def generate(self):
149 """Generate and save the code for this snippet bundle."""
150 template = 'base/fetch_snippets.jinja'
151 if self.client.startpage_version == 5:
152 template = 'base/fetch_snippets_as.jinja'
153 bundle_content = render_to_string(template, {
154 'snippet_ids': [snippet.id for snippet in self.snippets],
155 'snippets_json': json.dumps([s.to_dict() for s in self.snippets]),
156 'client': self.client,
157 'locale': self.client.locale,
158 'settings': settings,
159 'current_firefox_major_version': util.current_firefox_major_version(),
160 })
161
162 if isinstance(bundle_content, str):
163 bundle_content = bundle_content.encode('utf-8')
164
165 if (settings.BUNDLE_BROTLI_COMPRESS and self.client.startpage_version >= 5):
166 content_file = ContentFile(brotli.compress(bundle_content))
167 content_file.content_encoding = 'br'
168 else:
169 content_file = ContentFile(bundle_content)
170
171 default_storage.save(self.filename, content_file)
172 cache.set(self.cache_key, True, ONE_DAY)
173
174
175 class ASRSnippetBundle(SnippetBundle):
176
177 @cached_property
178 def key(self):
179 """A unique key for this bundle as a sha1 hexdigest."""
180 # Key should consist of snippets that are in the bundle. This part
181 # accounts for all the properties sent by the Client, since the
182 # self.snippets lists snippets are all filters and CMRs have been
183 # applied.
184 #
185 # Key must change when Snippet or related Template, Campaign or Target
186 # get updated.
187 key_properties = []
188 for job in self.jobs:
189 attributes = [
190 job.id,
191 job.snippet.modified.isoformat(),
192 ]
193
194 key_properties.append('-'.join([str(x) for x in attributes]))
195
196 # Additional values used to calculate the key are the templates and the
197 # variables used to render them besides snippets.
198 key_properties.extend([
199 str(self.client.startpage_version),
200 self.client.locale,
201 str(settings.BUNDLE_BROTLI_COMPRESS),
202 TEMPLATES_NG_VERSIONS,
203 ])
204
205 key_string = '_'.join(key_properties)
206 return hashlib.sha1(key_string.encode('utf-8')).hexdigest()
207
208 @property
209 def filename(self):
210 return urljoin(settings.MEDIA_BUNDLES_ROOT, 'bundle_{0}.json'.format(self.key))
211
212 @cached_property
213 def jobs(self):
214 return (models.Job.objects.filter(status=models.Job.PUBLISHED)
215 .select_related('snippet')
216 .match_client(self.client))
217
218 def generate(self):
219 """Generate and save the code for this snippet bundle."""
220 # Generate the new AS Router bundle format
221 data = [job.render() for job in self.jobs]
222 bundle_content = json.dumps({
223 'messages': data,
224 'metadata': {
225 'generated_at': datetime.utcnow().isoformat(),
226 'number_of_snippets': len(data),
227 }
228 })
229
230 if isinstance(bundle_content, str):
231 bundle_content = bundle_content.encode('utf-8')
232
233 if settings.BUNDLE_BROTLI_COMPRESS:
234 content_file = ContentFile(brotli.compress(bundle_content))
235 content_file.content_encoding = 'br'
236 else:
237 content_file = ContentFile(bundle_content)
238
239 default_storage.save(self.filename, content_file)
240 cache.set(self.cache_key, True, ONE_DAY)
241
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/snippets/base/bundles.py b/snippets/base/bundles.py
--- a/snippets/base/bundles.py
+++ b/snippets/base/bundles.py
@@ -205,6 +205,10 @@
key_string = '_'.join(key_properties)
return hashlib.sha1(key_string.encode('utf-8')).hexdigest()
+ @property
+ def empty(self):
+ return len(self.jobs) == 0
+
@property
def filename(self):
return urljoin(settings.MEDIA_BUNDLES_ROOT, 'bundle_{0}.json'.format(self.key))
| {"golden_diff": "diff --git a/snippets/base/bundles.py b/snippets/base/bundles.py\n--- a/snippets/base/bundles.py\n+++ b/snippets/base/bundles.py\n@@ -205,6 +205,10 @@\n key_string = '_'.join(key_properties)\n return hashlib.sha1(key_string.encode('utf-8')).hexdigest()\n \n+ @property\n+ def empty(self):\n+ return len(self.jobs) == 0\n+\n @property\n def filename(self):\n return urljoin(settings.MEDIA_BUNDLES_ROOT, 'bundle_{0}.json'.format(self.key))\n", "issue": "Fix ASRSnippetBundle.empty calculation to work with Jobs\n\n", "before_files": [{"content": "import hashlib\nimport json\nfrom datetime import datetime\nfrom urllib.parse import urljoin, urlparse\n\nfrom django.apps import apps\nfrom django.conf import settings\nfrom django.core.cache import cache\nfrom django.core.files.base import ContentFile\nfrom django.core.files.storage import default_storage\nfrom django.template.loader import render_to_string\nfrom django.utils.functional import cached_property\n\nimport brotli\n\nfrom snippets.base import util\nfrom snippets.base import models\n\n\nONE_DAY = 60 * 60 * 24\n\nSNIPPET_FETCH_TEMPLATE_HASH = hashlib.sha1(\n render_to_string(\n 'base/fetch_snippets.jinja',\n {\n 'date': '',\n 'snippet_ids': [],\n 'snippets_json': '',\n 'locale': 'xx',\n 'settings': settings,\n 'current_firefox_major_version': '00',\n 'metrics_url': settings.METRICS_URL,\n }\n ).encode('utf-8')).hexdigest()\n\nSNIPPET_FETCH_TEMPLATE_AS_HASH = hashlib.sha1(\n render_to_string(\n 'base/fetch_snippets_as.jinja',\n {\n 'date': '',\n 'snippet_ids': [],\n 'snippets_json': '',\n 'locale': 'xx',\n 'settings': settings,\n 'current_firefox_major_version': '00',\n 'metrics_url': settings.METRICS_URL,\n }\n ).encode('utf-8')).hexdigest()\n\n# On application load combine all the version strings of all available\n# templates into one. To be used in ASRSnippetBundle.key method to calculate\n# the bundle key. The point is that this string should change when the Template\n# schema changes.\nTEMPLATES_NG_VERSIONS = '-'.join([\n model.VERSION\n for model in apps.get_models()\n if issubclass(model, models.Template) and not model.__name__ == 'Template'\n])\n\n\nclass SnippetBundle(object):\n \"\"\"\n Group of snippets to be sent to a particular client configuration.\n \"\"\"\n def __init__(self, client):\n self.client = client\n\n @cached_property\n def key(self):\n \"\"\"A unique key for this bundle as a sha1 hexdigest.\"\"\"\n # Key should consist of snippets that are in the bundle. This part\n # accounts for all the properties sent by the Client, since the\n # self.snippets lists snippets are all filters and CMRs have been\n # applied.\n key_properties = [\n '{id}-{date}-{templatedate}'.format(id=snippet.id,\n date=snippet.modified.isoformat(),\n templatedate=snippet.template.modified.isoformat())\n for snippet in self.snippets]\n\n # Additional values used to calculate the key are the templates and the\n # variables used to render them besides snippets.\n key_properties.extend([\n str(self.client.startpage_version),\n self.client.locale,\n util.current_firefox_major_version(),\n str(settings.BUNDLE_BROTLI_COMPRESS),\n ])\n if self.client.startpage_version >= 5:\n key_properties.append(SNIPPET_FETCH_TEMPLATE_AS_HASH)\n else:\n key_properties.append(SNIPPET_FETCH_TEMPLATE_HASH)\n\n key_string = '_'.join(key_properties)\n return hashlib.sha1(key_string.encode('utf-8')).hexdigest()\n\n @property\n def empty(self):\n return len(self.snippets) == 0\n\n @property\n def cache_key(self):\n return 'bundle_' + self.key\n\n @property\n def cached(self):\n if cache.get(self.cache_key):\n return True\n\n # Check if available on S3 already.\n if default_storage.exists(self.filename):\n cache.set(self.cache_key, True, ONE_DAY)\n return True\n\n return False\n\n @property\n def expired(self):\n \"\"\"\n If True, the code for this bundle should be re-generated before\n use.\n \"\"\"\n return not cache.get(self.cache_key)\n\n @property\n def filename(self):\n return urljoin(settings.MEDIA_BUNDLES_ROOT, 'bundle_{0}.html'.format(self.key))\n\n @property\n def url(self):\n bundle_url = default_storage.url(self.filename)\n full_url = urljoin(settings.SITE_URL, bundle_url).split('?')[0]\n cdn_url = getattr(settings, 'CDN_URL', None)\n if cdn_url:\n full_url = urljoin(cdn_url, urlparse(bundle_url).path)\n\n return full_url\n\n @cached_property\n def snippets(self):\n return (models.Snippet.objects\n .filter(published=True)\n .match_client(self.client)\n .select_related('template')\n .prefetch_related('countries', 'exclude_from_search_providers')\n .filter_by_available())\n\n def generate(self):\n \"\"\"Generate and save the code for this snippet bundle.\"\"\"\n template = 'base/fetch_snippets.jinja'\n if self.client.startpage_version == 5:\n template = 'base/fetch_snippets_as.jinja'\n bundle_content = render_to_string(template, {\n 'snippet_ids': [snippet.id for snippet in self.snippets],\n 'snippets_json': json.dumps([s.to_dict() for s in self.snippets]),\n 'client': self.client,\n 'locale': self.client.locale,\n 'settings': settings,\n 'current_firefox_major_version': util.current_firefox_major_version(),\n })\n\n if isinstance(bundle_content, str):\n bundle_content = bundle_content.encode('utf-8')\n\n if (settings.BUNDLE_BROTLI_COMPRESS and self.client.startpage_version >= 5):\n content_file = ContentFile(brotli.compress(bundle_content))\n content_file.content_encoding = 'br'\n else:\n content_file = ContentFile(bundle_content)\n\n default_storage.save(self.filename, content_file)\n cache.set(self.cache_key, True, ONE_DAY)\n\n\nclass ASRSnippetBundle(SnippetBundle):\n\n @cached_property\n def key(self):\n \"\"\"A unique key for this bundle as a sha1 hexdigest.\"\"\"\n # Key should consist of snippets that are in the bundle. This part\n # accounts for all the properties sent by the Client, since the\n # self.snippets lists snippets are all filters and CMRs have been\n # applied.\n #\n # Key must change when Snippet or related Template, Campaign or Target\n # get updated.\n key_properties = []\n for job in self.jobs:\n attributes = [\n job.id,\n job.snippet.modified.isoformat(),\n ]\n\n key_properties.append('-'.join([str(x) for x in attributes]))\n\n # Additional values used to calculate the key are the templates and the\n # variables used to render them besides snippets.\n key_properties.extend([\n str(self.client.startpage_version),\n self.client.locale,\n str(settings.BUNDLE_BROTLI_COMPRESS),\n TEMPLATES_NG_VERSIONS,\n ])\n\n key_string = '_'.join(key_properties)\n return hashlib.sha1(key_string.encode('utf-8')).hexdigest()\n\n @property\n def filename(self):\n return urljoin(settings.MEDIA_BUNDLES_ROOT, 'bundle_{0}.json'.format(self.key))\n\n @cached_property\n def jobs(self):\n return (models.Job.objects.filter(status=models.Job.PUBLISHED)\n .select_related('snippet')\n .match_client(self.client))\n\n def generate(self):\n \"\"\"Generate and save the code for this snippet bundle.\"\"\"\n # Generate the new AS Router bundle format\n data = [job.render() for job in self.jobs]\n bundle_content = json.dumps({\n 'messages': data,\n 'metadata': {\n 'generated_at': datetime.utcnow().isoformat(),\n 'number_of_snippets': len(data),\n }\n })\n\n if isinstance(bundle_content, str):\n bundle_content = bundle_content.encode('utf-8')\n\n if settings.BUNDLE_BROTLI_COMPRESS:\n content_file = ContentFile(brotli.compress(bundle_content))\n content_file.content_encoding = 'br'\n else:\n content_file = ContentFile(bundle_content)\n\n default_storage.save(self.filename, content_file)\n cache.set(self.cache_key, True, ONE_DAY)\n", "path": "snippets/base/bundles.py"}], "after_files": [{"content": "import hashlib\nimport json\nfrom datetime import datetime\nfrom urllib.parse import urljoin, urlparse\n\nfrom django.apps import apps\nfrom django.conf import settings\nfrom django.core.cache import cache\nfrom django.core.files.base import ContentFile\nfrom django.core.files.storage import default_storage\nfrom django.template.loader import render_to_string\nfrom django.utils.functional import cached_property\n\nimport brotli\n\nfrom snippets.base import util\nfrom snippets.base import models\n\n\nONE_DAY = 60 * 60 * 24\n\nSNIPPET_FETCH_TEMPLATE_HASH = hashlib.sha1(\n render_to_string(\n 'base/fetch_snippets.jinja',\n {\n 'date': '',\n 'snippet_ids': [],\n 'snippets_json': '',\n 'locale': 'xx',\n 'settings': settings,\n 'current_firefox_major_version': '00',\n 'metrics_url': settings.METRICS_URL,\n }\n ).encode('utf-8')).hexdigest()\n\nSNIPPET_FETCH_TEMPLATE_AS_HASH = hashlib.sha1(\n render_to_string(\n 'base/fetch_snippets_as.jinja',\n {\n 'date': '',\n 'snippet_ids': [],\n 'snippets_json': '',\n 'locale': 'xx',\n 'settings': settings,\n 'current_firefox_major_version': '00',\n 'metrics_url': settings.METRICS_URL,\n }\n ).encode('utf-8')).hexdigest()\n\n# On application load combine all the version strings of all available\n# templates into one. To be used in ASRSnippetBundle.key method to calculate\n# the bundle key. The point is that this string should change when the Template\n# schema changes.\nTEMPLATES_NG_VERSIONS = '-'.join([\n model.VERSION\n for model in apps.get_models()\n if issubclass(model, models.Template) and not model.__name__ == 'Template'\n])\n\n\nclass SnippetBundle(object):\n \"\"\"\n Group of snippets to be sent to a particular client configuration.\n \"\"\"\n def __init__(self, client):\n self.client = client\n\n @cached_property\n def key(self):\n \"\"\"A unique key for this bundle as a sha1 hexdigest.\"\"\"\n # Key should consist of snippets that are in the bundle. This part\n # accounts for all the properties sent by the Client, since the\n # self.snippets lists snippets are all filters and CMRs have been\n # applied.\n key_properties = [\n '{id}-{date}-{templatedate}'.format(id=snippet.id,\n date=snippet.modified.isoformat(),\n templatedate=snippet.template.modified.isoformat())\n for snippet in self.snippets]\n\n # Additional values used to calculate the key are the templates and the\n # variables used to render them besides snippets.\n key_properties.extend([\n str(self.client.startpage_version),\n self.client.locale,\n util.current_firefox_major_version(),\n str(settings.BUNDLE_BROTLI_COMPRESS),\n ])\n if self.client.startpage_version >= 5:\n key_properties.append(SNIPPET_FETCH_TEMPLATE_AS_HASH)\n else:\n key_properties.append(SNIPPET_FETCH_TEMPLATE_HASH)\n\n key_string = '_'.join(key_properties)\n return hashlib.sha1(key_string.encode('utf-8')).hexdigest()\n\n @property\n def empty(self):\n return len(self.snippets) == 0\n\n @property\n def cache_key(self):\n return 'bundle_' + self.key\n\n @property\n def cached(self):\n if cache.get(self.cache_key):\n return True\n\n # Check if available on S3 already.\n if default_storage.exists(self.filename):\n cache.set(self.cache_key, True, ONE_DAY)\n return True\n\n return False\n\n @property\n def expired(self):\n \"\"\"\n If True, the code for this bundle should be re-generated before\n use.\n \"\"\"\n return not cache.get(self.cache_key)\n\n @property\n def filename(self):\n return urljoin(settings.MEDIA_BUNDLES_ROOT, 'bundle_{0}.html'.format(self.key))\n\n @property\n def url(self):\n bundle_url = default_storage.url(self.filename)\n full_url = urljoin(settings.SITE_URL, bundle_url).split('?')[0]\n cdn_url = getattr(settings, 'CDN_URL', None)\n if cdn_url:\n full_url = urljoin(cdn_url, urlparse(bundle_url).path)\n\n return full_url\n\n @cached_property\n def snippets(self):\n return (models.Snippet.objects\n .filter(published=True)\n .match_client(self.client)\n .select_related('template')\n .prefetch_related('countries', 'exclude_from_search_providers')\n .filter_by_available())\n\n def generate(self):\n \"\"\"Generate and save the code for this snippet bundle.\"\"\"\n template = 'base/fetch_snippets.jinja'\n if self.client.startpage_version == 5:\n template = 'base/fetch_snippets_as.jinja'\n bundle_content = render_to_string(template, {\n 'snippet_ids': [snippet.id for snippet in self.snippets],\n 'snippets_json': json.dumps([s.to_dict() for s in self.snippets]),\n 'client': self.client,\n 'locale': self.client.locale,\n 'settings': settings,\n 'current_firefox_major_version': util.current_firefox_major_version(),\n })\n\n if isinstance(bundle_content, str):\n bundle_content = bundle_content.encode('utf-8')\n\n if (settings.BUNDLE_BROTLI_COMPRESS and self.client.startpage_version >= 5):\n content_file = ContentFile(brotli.compress(bundle_content))\n content_file.content_encoding = 'br'\n else:\n content_file = ContentFile(bundle_content)\n\n default_storage.save(self.filename, content_file)\n cache.set(self.cache_key, True, ONE_DAY)\n\n\nclass ASRSnippetBundle(SnippetBundle):\n\n @cached_property\n def key(self):\n \"\"\"A unique key for this bundle as a sha1 hexdigest.\"\"\"\n # Key should consist of snippets that are in the bundle. This part\n # accounts for all the properties sent by the Client, since the\n # self.snippets lists snippets are all filters and CMRs have been\n # applied.\n #\n # Key must change when Snippet or related Template, Campaign or Target\n # get updated.\n key_properties = []\n for job in self.jobs:\n attributes = [\n job.id,\n job.snippet.modified.isoformat(),\n ]\n\n key_properties.append('-'.join([str(x) for x in attributes]))\n\n # Additional values used to calculate the key are the templates and the\n # variables used to render them besides snippets.\n key_properties.extend([\n str(self.client.startpage_version),\n self.client.locale,\n str(settings.BUNDLE_BROTLI_COMPRESS),\n TEMPLATES_NG_VERSIONS,\n ])\n\n key_string = '_'.join(key_properties)\n return hashlib.sha1(key_string.encode('utf-8')).hexdigest()\n\n @property\n def empty(self):\n return len(self.jobs) == 0\n\n @property\n def filename(self):\n return urljoin(settings.MEDIA_BUNDLES_ROOT, 'bundle_{0}.json'.format(self.key))\n\n @cached_property\n def jobs(self):\n return (models.Job.objects.filter(status=models.Job.PUBLISHED)\n .select_related('snippet')\n .match_client(self.client))\n\n def generate(self):\n \"\"\"Generate and save the code for this snippet bundle.\"\"\"\n # Generate the new AS Router bundle format\n data = [job.render() for job in self.jobs]\n bundle_content = json.dumps({\n 'messages': data,\n 'metadata': {\n 'generated_at': datetime.utcnow().isoformat(),\n 'number_of_snippets': len(data),\n }\n })\n\n if isinstance(bundle_content, str):\n bundle_content = bundle_content.encode('utf-8')\n\n if settings.BUNDLE_BROTLI_COMPRESS:\n content_file = ContentFile(brotli.compress(bundle_content))\n content_file.content_encoding = 'br'\n else:\n content_file = ContentFile(bundle_content)\n\n default_storage.save(self.filename, content_file)\n cache.set(self.cache_key, True, ONE_DAY)\n", "path": "snippets/base/bundles.py"}]} | 2,641 | 131 |
gh_patches_debug_5800 | rasdani/github-patches | git_diff | pymeasure__pymeasure-340 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Python 3.9 compatibility
Python 3.9 [is out](https://docs.python.org/3.9/whatsnew/3.9.html). We should ensure that we are compatible, so there are a couple of things to do
* [x] Create a fresh Python 3.9 environment.yml (with current package versions)
* [x] Update Travis and Appveyor CI setup files
- [x] Check if the Appveyor 3.8 build can use the 3.8 miniconda, not 3.7, now
- [x] I think we should relax the python version specifiers in the environment.yml to major.minor (i.e. python 3.8, not 3.8.1), to also get python bugfixes, even though it's a bit less strict in CI version stability.
- [x] Check if we should bump Trais ubuntu version to the current LTS focal (20.04)
* The conda-forge package is repackaged automatically, apparently - it's already available.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #
2 # This file is part of the PyMeasure package.
3 #
4 # Copyright (c) 2013-2020 PyMeasure Developers
5 #
6 # Permission is hereby granted, free of charge, to any person obtaining a copy
7 # of this software and associated documentation files (the "Software"), to deal
8 # in the Software without restriction, including without limitation the rights
9 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 # copies of the Software, and to permit persons to whom the Software is
11 # furnished to do so, subject to the following conditions:
12 #
13 # The above copyright notice and this permission notice shall be included in
14 # all copies or substantial portions of the Software.
15 #
16 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 # THE SOFTWARE.
23 #
24
25
26 from setuptools import setup, find_packages
27
28 setup(
29 name='PyMeasure',
30 version='0.8.0',
31 author='PyMeasure Developers',
32 packages=find_packages(),
33 scripts=[],
34 url='https://github.com/ralph-group/pymeasure',
35 download_url='https://github.com/ralph-group/pymeasure/tarball/v0.8.0',
36 license='MIT License',
37 description='Scientific measurement library for instruments, experiments, and live-plotting',
38 long_description=open('README.rst').read() + "\n\n" + open('CHANGES.txt').read(),
39 install_requires=[
40 "numpy >= 1.6.1",
41 "pandas >= 0.14",
42 "pyvisa >= 1.8",
43 "pyserial >= 2.7",
44 "pyqtgraph >= 0.9.10"
45 ],
46 extras_require={
47 'matplotlib': ['matplotlib >= 2.0.2'],
48 'tcp': [
49 'zmq >= 16.0.2',
50 'cloudpickle >= 0.3.1'
51 ],
52 'python-vxi11': ['python-vxi11 >= 0.9']
53 },
54 setup_requires=[
55 'pytest-runner'
56 ],
57 tests_require=[
58 'pytest >= 2.9.1',
59 'pytest-qt >= 2.4.0'
60 ],
61 classifiers=[
62 "Development Status :: 4 - Beta",
63 "Intended Audience :: Science/Research",
64 "License :: OSI Approved :: MIT License",
65 "Operating System :: MacOS",
66 "Operating System :: Microsoft :: Windows",
67 "Operating System :: POSIX",
68 "Operating System :: Unix",
69 "Programming Language :: Python :: 3 :: Only",
70 "Programming Language :: Python :: 3.6",
71 "Programming Language :: Python :: 3.7",
72 "Programming Language :: Python :: 3.8",
73 "Topic :: Scientific/Engineering",
74 ],
75 keywords="measure instrument experiment control automate graph plot"
76 )
77
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -70,6 +70,7 @@
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
+ "Programming Language :: Python :: 3.9",
"Topic :: Scientific/Engineering",
],
keywords="measure instrument experiment control automate graph plot"
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -70,6 +70,7 @@\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n+ \"Programming Language :: Python :: 3.9\",\n \"Topic :: Scientific/Engineering\",\n ],\n keywords=\"measure instrument experiment control automate graph plot\"\n", "issue": "Python 3.9 compatibility\nPython 3.9 [is out](https://docs.python.org/3.9/whatsnew/3.9.html). We should ensure that we are compatible, so there are a couple of things to do\r\n\r\n* [x] Create a fresh Python 3.9 environment.yml (with current package versions)\r\n* [x] Update Travis and Appveyor CI setup files\r\n - [x] Check if the Appveyor 3.8 build can use the 3.8 miniconda, not 3.7, now\r\n - [x] I think we should relax the python version specifiers in the environment.yml to major.minor (i.e. python 3.8, not 3.8.1), to also get python bugfixes, even though it's a bit less strict in CI version stability.\r\n - [x] Check if we should bump Trais ubuntu version to the current LTS focal (20.04)\r\n* The conda-forge package is repackaged automatically, apparently - it's already available.\n", "before_files": [{"content": "#\n# This file is part of the PyMeasure package.\n#\n# Copyright (c) 2013-2020 PyMeasure Developers\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n\n\nfrom setuptools import setup, find_packages\n\nsetup(\n name='PyMeasure',\n version='0.8.0',\n author='PyMeasure Developers',\n packages=find_packages(),\n scripts=[],\n url='https://github.com/ralph-group/pymeasure',\n download_url='https://github.com/ralph-group/pymeasure/tarball/v0.8.0',\n license='MIT License',\n description='Scientific measurement library for instruments, experiments, and live-plotting',\n long_description=open('README.rst').read() + \"\\n\\n\" + open('CHANGES.txt').read(),\n install_requires=[\n \"numpy >= 1.6.1\",\n \"pandas >= 0.14\",\n \"pyvisa >= 1.8\",\n \"pyserial >= 2.7\",\n \"pyqtgraph >= 0.9.10\"\n ],\n extras_require={\n 'matplotlib': ['matplotlib >= 2.0.2'],\n 'tcp': [\n 'zmq >= 16.0.2',\n 'cloudpickle >= 0.3.1'\n ],\n 'python-vxi11': ['python-vxi11 >= 0.9']\n },\n setup_requires=[\n 'pytest-runner'\n ],\n tests_require=[\n 'pytest >= 2.9.1',\n 'pytest-qt >= 2.4.0'\n ],\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: MacOS\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: POSIX\",\n \"Operating System :: Unix\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Topic :: Scientific/Engineering\",\n ],\n keywords=\"measure instrument experiment control automate graph plot\"\n)\n", "path": "setup.py"}], "after_files": [{"content": "#\n# This file is part of the PyMeasure package.\n#\n# Copyright (c) 2013-2020 PyMeasure Developers\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n\n\nfrom setuptools import setup, find_packages\n\nsetup(\n name='PyMeasure',\n version='0.8.0',\n author='PyMeasure Developers',\n packages=find_packages(),\n scripts=[],\n url='https://github.com/ralph-group/pymeasure',\n download_url='https://github.com/ralph-group/pymeasure/tarball/v0.8.0',\n license='MIT License',\n description='Scientific measurement library for instruments, experiments, and live-plotting',\n long_description=open('README.rst').read() + \"\\n\\n\" + open('CHANGES.txt').read(),\n install_requires=[\n \"numpy >= 1.6.1\",\n \"pandas >= 0.14\",\n \"pyvisa >= 1.8\",\n \"pyserial >= 2.7\",\n \"pyqtgraph >= 0.9.10\"\n ],\n extras_require={\n 'matplotlib': ['matplotlib >= 2.0.2'],\n 'tcp': [\n 'zmq >= 16.0.2',\n 'cloudpickle >= 0.3.1'\n ],\n 'python-vxi11': ['python-vxi11 >= 0.9']\n },\n setup_requires=[\n 'pytest-runner'\n ],\n tests_require=[\n 'pytest >= 2.9.1',\n 'pytest-qt >= 2.4.0'\n ],\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: MacOS\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: POSIX\",\n \"Operating System :: Unix\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Topic :: Scientific/Engineering\",\n ],\n keywords=\"measure instrument experiment control automate graph plot\"\n)\n", "path": "setup.py"}]} | 1,323 | 102 |
gh_patches_debug_37481 | rasdani/github-patches | git_diff | larq__larq-111 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Generic hyperparameter scheduler
The Idea is to have a TF scheduler which is optimizer/hyperparameter agnostic.
As an example, it can be used for gamma, threshold or learning rate of the floating point optimizer in Bop.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `larq/callbacks.py`
Content:
```
1 import tensorflow as tf
2 import numpy as np
3
4
5 class QuantizationLogger(tf.keras.callbacks.Callback):
6 """Callback that adds quantization specific metrics.
7
8 !!! note ""
9 In order for metrics to be picked up by TensorBoard this callback needs to be
10 applied before the TensorBoard callback and use the same update frequency.
11
12 !!! example
13 ```python
14 callbacks = [QuantizationLogger(), tf.keras.callbacks.TensorBoard()]
15 model.fit(X_train, Y_train, callbacks=callbacks)
16 ```
17
18 # Metrics
19 - `changed_quantization_ration`: The ration of quantized weights in each layer that
20 changed during the weight update.
21
22 # Arguments
23 update_freq: `'batch'` or `'epoch'` or integer. When using `'batch'`, computes the
24 metrics after each batch. The same applies for `'epoch'`. If using an integer
25 the callback will compute the metrics every `update_freq` batches.
26 Note that computing too frequently can slow down training.
27 """
28
29 def __init__(self, update_freq="epoch"):
30 super().__init__()
31 self.batch_previous_weights = {}
32 self.epoch_previous_weights = {}
33 self.update_freq = update_freq if update_freq != "batch" else 1
34 self._quantized_weights = []
35 self._quantized_weight_names = []
36
37 def set_model(self, model):
38 super().set_model(model)
39 for layer in model.layers:
40 if hasattr(layer, "quantized_weights"):
41 for i, weight in enumerate(layer.quantized_weights):
42 self._quantized_weights.append(weight)
43 self._quantized_weight_names.append(
44 layer.name if i == 0 else f"{layer.name}_{i}"
45 )
46
47 def _maybe_log_and_store(self, storage, logs, should_log=True, should_store=True):
48 if should_log or should_store:
49 values = tf.keras.backend.batch_get_value(self._quantized_weights)
50 for key, value in zip(self._quantized_weight_names, values):
51 value = value.astype(np.int8)
52 if should_log:
53 logs[f"changed_quantization_ration/{key.replace(':', '_')}"] = 1 - (
54 np.count_nonzero(value == storage[key]) / value.size
55 )
56 if should_store:
57 storage[key] = value
58
59 if should_log and not should_store:
60 # We don't need it in the next batch anymore
61 storage = {}
62
63 def on_batch_end(self, batch, logs=None):
64 if self.update_freq != "epoch":
65 self._maybe_log_and_store(
66 self.batch_previous_weights,
67 logs,
68 should_log=batch > 0 and (batch + 1) % self.update_freq == 0,
69 should_store=(batch + 2) % self.update_freq == 0,
70 )
71
72 def on_train_begin(self, logs=None):
73 self._maybe_log_and_store(self.epoch_previous_weights, logs, should_log=False)
74
75 def on_epoch_end(self, epoch, logs=None):
76 self._maybe_log_and_store(self.epoch_previous_weights, logs)
77
```
Path: `larq/optimizers_v1.py`
Content:
```
1 import tensorflow as tf
2 import numpy as np
3 import larq as lq
4
5 from larq import utils
6 from copy import deepcopy
7
8
9 @utils.register_keras_custom_object
10 class XavierLearningRateScaling(tf.keras.optimizers.Optimizer):
11 """Optimizer wrapper for Xavier Learning Rate Scaling
12
13 Scale the weights learning rates respectively with the weights initialization
14
15 !!! note ""
16 This is a wrapper and does not implement any optimization algorithm.
17
18 !!! example
19 ```python
20 optimizer = lq.optimizers.XavierLearningRateScaling(
21 tf.keras.optimizers.Adam(0.01), model
22 )
23 ```
24
25 # Arguments
26 optimizer: A `tf.keras.optimizers.Optimizer`
27 model: A `tf.keras.Model`
28
29 # References
30 - [BinaryConnect: Training Deep Neural Networks with binary weights during
31 propagations](https://arxiv.org/abs/1511.00363)
32 """
33
34 def __init__(self, optimizer, model):
35 if int(tf.__version__[0]) == 2:
36 raise NotImplementedError(
37 "XavierLearningRateScaling is not supported by Tensorflow 2.0."
38 )
39
40 if not isinstance(optimizer, tf.keras.optimizers.Optimizer):
41 raise ValueError(
42 f"Expected tf.keras.optimizers.Optimizer, received {type(optimizer)}."
43 )
44 self.optimizer = optimizer
45
46 if isinstance(model, tf.keras.Model):
47 self.multipliers = {}
48 for layer in model.layers:
49 if hasattr(layer, "quantized_latent_weights"):
50 for weight in layer.quantized_latent_weights:
51 self.multipliers[weight.name] = self.get_lr_multiplier(weight)
52 elif isinstance(model, dict):
53 self.multipliers = model
54 else:
55 raise ValueError(f"Expected tf.keras.Model or dict, received {type(model)}")
56
57 def get_lr_multiplier(self, weight):
58 shape = weight.get_shape().as_list()
59 n_input = shape[-2]
60 n_output = shape[-1]
61 if len(shape) == 4:
62 kernelsize = np.prod(shape[:-2])
63 coeff = 1.0 / np.sqrt(1.5 / ((kernelsize * (n_input + n_output))))
64 elif len(shape) == 2:
65 coeff = 1.0 / np.sqrt(1.5 / ((1.0 * (n_input + n_output))))
66 else:
67 raise NotImplementedError(
68 "Xavier Learning rate scaling not implimented for this kernelsize"
69 )
70 return coeff
71
72 def get_updates(self, loss, params):
73 mult_lr_params = [p for p in params if p.name in self.multipliers]
74 base_lr_params = [p for p in params if p.name not in self.multipliers]
75
76 updates = []
77 base_lr = self.optimizer.lr
78 for param in mult_lr_params:
79 self.optimizer.lr = base_lr * self.multipliers[param.name]
80 updates.extend(self.optimizer.get_updates(loss, [param]))
81
82 self.optimizer.lr = base_lr
83 updates.extend(self.optimizer.get_updates(loss, base_lr_params))
84
85 return updates
86
87 def __getattr__(self, name):
88 return getattr(self.optimizer, name)
89
90 def get_config(self):
91 return {
92 "optimizer": {
93 "class_name": self.optimizer.__class__.__name__,
94 "config": self.optimizer.get_config(),
95 },
96 "multipliers": self.multipliers,
97 }
98
99 @classmethod
100 def from_config(cls, config, custom_objects=None):
101 optimizer = tf.keras.optimizers.deserialize(
102 config.pop("optimizer"), custom_objects=custom_objects
103 )
104 return cls(optimizer, config["multipliers"])
105
106
107 @utils.register_keras_custom_object
108 class Bop(tf.keras.optimizers.Optimizer):
109 """Binary optimizer (Bop).
110
111 Bop is a latent-free optimizer for Binarized Neural Networks (BNNs).
112
113 !!! example
114 ```python
115 optimizer = lq.optimizers.Bop(fp_optimizer=tf.keras.optimizers.Adam(0.01))
116
117 ```
118
119 # Arguments
120 fp_optimizer: a `tf.keras.optimizers.Optimizer`.
121 threshold: determines to whether to flip each weight.
122 gamma: the adaptivity rate.
123 name: name of the optimizer.
124
125 # References
126 - [Latent Weights Do Not Exist: Rethinking Binarized Neural Network Optimization](https://arxiv.org/abs/1906.02107)
127 """
128
129 def __init__(self, fp_optimizer, threshold=1e-5, gamma=1e-2, name="Bop", **kwargs):
130 super().__init__(**kwargs)
131
132 if not isinstance(fp_optimizer, tf.keras.optimizers.Optimizer):
133 raise TypeError(
134 f"Expected tf.keras.optimizers.Optimizer, received {type(fp_optimizer)}."
135 )
136
137 with tf.keras.backend.name_scope(self.__class__.__name__):
138
139 self.fp_optimizer = fp_optimizer
140 self.threshold = threshold
141 self.gamma = gamma
142
143 def get_updates(self, loss, params):
144 grads = self.get_gradients(loss, params)
145 self.updates = []
146
147 ms = [
148 tf.keras.backend.zeros(
149 tf.keras.backend.int_shape(p), dtype=tf.keras.backend.dtype(p)
150 )
151 for p in params
152 ]
153
154 fp_params = []
155
156 for p, g, m in zip(params, grads, ms):
157 if self.is_binary(p):
158 m_t = (1 - self.gamma) * m + self.gamma * g
159
160 self.updates.append(tf.assign(m, m_t))
161 self.updates.append(
162 tf.assign(
163 p, lq.quantizers.sign(-p * tf.sign(p * m_t - self.threshold))
164 )
165 )
166
167 else:
168 fp_params.append(p)
169
170 return self.updates + self.fp_optimizer.get_updates(loss, fp_params)
171
172 @staticmethod
173 def is_binary(var):
174 return "/kernel" in var.name and "quant_" in var.name
175
176 def __getattr__(self, name):
177 if name == "lr":
178 return self.fp_optimizer.lr
179 return super().__getattr__(name)
180
181 def get_config(self):
182 fp_optimizer_config = self.fp_optimizer.get_config()
183 config = {
184 "threshold": self.threshold,
185 "gamma": self.gamma,
186 "fp_optimizer": {
187 "class_name": self.fp_optimizer.__class__.__name__,
188 "config": fp_optimizer_config,
189 },
190 }
191 return {**super().get_config(), **config}
192
193 @classmethod
194 def from_config(cls, config, custom_objects=None):
195 new_config = deepcopy(config)
196 fp_optimizer = tf.keras.optimizers.deserialize(
197 new_config["fp_optimizer"], custom_objects=custom_objects
198 )
199 new_config.pop("fp_optimizer", None)
200 return cls(fp_optimizer, **new_config)
201
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/larq/callbacks.py b/larq/callbacks.py
--- a/larq/callbacks.py
+++ b/larq/callbacks.py
@@ -74,3 +74,45 @@
def on_epoch_end(self, epoch, logs=None):
self._maybe_log_and_store(self.epoch_previous_weights, logs)
+
+
+class HyperparameterScheduler(tf.keras.callbacks.Callback):
+ """Generic hyperparameter scheduler.
+
+ # Arguments:
+ schedule: a function that takes an epoch index as input
+ (integer, indexed from 0) and returns a new hyperparameter as output.
+ hyperparameter: str. the name of the hyperparameter to be scheduled.
+ verbose: int. 0: quiet, 1: update messages.
+ """
+
+ def __init__(self, schedule, hyperparameter, verbose=0):
+ super(HyperparameterScheduler, self).__init__()
+ self.schedule = schedule
+ self.hyperparameter = hyperparameter
+ self.verbose = verbose
+
+ def on_epoch_begin(self, epoch, logs=None):
+ if not hasattr(self.model.optimizer, self.hyperparameter):
+ raise ValueError(
+ f'Optimizer must have a "{self.hyperparameter}" attribute.'
+ )
+
+ hp = getattr(self.model.optimizer, self.hyperparameter)
+ try: # new API
+ hyperparameter_val = tf.keras.backend.get_value(hp)
+ hyperparameter_val = self.schedule(epoch, hyperparameter_val)
+ except TypeError: # Support for old API for backward compatibility
+ hyperparameter_val = self.schedule(epoch)
+
+ tf.keras.backend.set_value(hp, hyperparameter_val)
+
+ if self.verbose > 0:
+ print(
+ f"Epoch {epoch + 1}: {self.hyperparameter} changning to {tf.keras.backend.get_value(hp)}."
+ )
+
+ def on_epoch_end(self, epoch, logs=None):
+ logs = logs or {}
+ hp = getattr(self.model.optimizer, self.hyperparameter)
+ logs[self.hyperparameter] = tf.keras.backend.get_value(hp)
diff --git a/larq/optimizers_v1.py b/larq/optimizers_v1.py
--- a/larq/optimizers_v1.py
+++ b/larq/optimizers_v1.py
@@ -137,8 +137,8 @@
with tf.keras.backend.name_scope(self.__class__.__name__):
self.fp_optimizer = fp_optimizer
- self.threshold = threshold
- self.gamma = gamma
+ self.threshold = tf.keras.backend.variable(threshold, name="threshold")
+ self.gamma = tf.keras.backend.variable(gamma, name="gamma")
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
@@ -181,8 +181,8 @@
def get_config(self):
fp_optimizer_config = self.fp_optimizer.get_config()
config = {
- "threshold": self.threshold,
- "gamma": self.gamma,
+ "threshold": float(tf.keras.backend.get_value(self.threshold)),
+ "gamma": float(tf.keras.backend.get_value(self.gamma)),
"fp_optimizer": {
"class_name": self.fp_optimizer.__class__.__name__,
"config": fp_optimizer_config,
| {"golden_diff": "diff --git a/larq/callbacks.py b/larq/callbacks.py\n--- a/larq/callbacks.py\n+++ b/larq/callbacks.py\n@@ -74,3 +74,45 @@\n \n def on_epoch_end(self, epoch, logs=None):\n self._maybe_log_and_store(self.epoch_previous_weights, logs)\n+\n+\n+class HyperparameterScheduler(tf.keras.callbacks.Callback):\n+ \"\"\"Generic hyperparameter scheduler.\n+\n+ # Arguments:\n+ schedule: a function that takes an epoch index as input\n+ (integer, indexed from 0) and returns a new hyperparameter as output.\n+ hyperparameter: str. the name of the hyperparameter to be scheduled.\n+ verbose: int. 0: quiet, 1: update messages.\n+ \"\"\"\n+\n+ def __init__(self, schedule, hyperparameter, verbose=0):\n+ super(HyperparameterScheduler, self).__init__()\n+ self.schedule = schedule\n+ self.hyperparameter = hyperparameter\n+ self.verbose = verbose\n+\n+ def on_epoch_begin(self, epoch, logs=None):\n+ if not hasattr(self.model.optimizer, self.hyperparameter):\n+ raise ValueError(\n+ f'Optimizer must have a \"{self.hyperparameter}\" attribute.'\n+ )\n+\n+ hp = getattr(self.model.optimizer, self.hyperparameter)\n+ try: # new API\n+ hyperparameter_val = tf.keras.backend.get_value(hp)\n+ hyperparameter_val = self.schedule(epoch, hyperparameter_val)\n+ except TypeError: # Support for old API for backward compatibility\n+ hyperparameter_val = self.schedule(epoch)\n+\n+ tf.keras.backend.set_value(hp, hyperparameter_val)\n+\n+ if self.verbose > 0:\n+ print(\n+ f\"Epoch {epoch + 1}: {self.hyperparameter} changning to {tf.keras.backend.get_value(hp)}.\"\n+ )\n+\n+ def on_epoch_end(self, epoch, logs=None):\n+ logs = logs or {}\n+ hp = getattr(self.model.optimizer, self.hyperparameter)\n+ logs[self.hyperparameter] = tf.keras.backend.get_value(hp)\ndiff --git a/larq/optimizers_v1.py b/larq/optimizers_v1.py\n--- a/larq/optimizers_v1.py\n+++ b/larq/optimizers_v1.py\n@@ -137,8 +137,8 @@\n with tf.keras.backend.name_scope(self.__class__.__name__):\n \n self.fp_optimizer = fp_optimizer\n- self.threshold = threshold\n- self.gamma = gamma\n+ self.threshold = tf.keras.backend.variable(threshold, name=\"threshold\")\n+ self.gamma = tf.keras.backend.variable(gamma, name=\"gamma\")\n \n def get_updates(self, loss, params):\n grads = self.get_gradients(loss, params)\n@@ -181,8 +181,8 @@\n def get_config(self):\n fp_optimizer_config = self.fp_optimizer.get_config()\n config = {\n- \"threshold\": self.threshold,\n- \"gamma\": self.gamma,\n+ \"threshold\": float(tf.keras.backend.get_value(self.threshold)),\n+ \"gamma\": float(tf.keras.backend.get_value(self.gamma)),\n \"fp_optimizer\": {\n \"class_name\": self.fp_optimizer.__class__.__name__,\n \"config\": fp_optimizer_config,\n", "issue": "Generic hyperparameter scheduler \nThe Idea is to have a TF scheduler which is optimizer/hyperparameter agnostic. \r\nAs an example, it can be used for gamma, threshold or learning rate of the floating point optimizer in Bop.\n", "before_files": [{"content": "import tensorflow as tf\nimport numpy as np\n\n\nclass QuantizationLogger(tf.keras.callbacks.Callback):\n \"\"\"Callback that adds quantization specific metrics.\n\n !!! note \"\"\n In order for metrics to be picked up by TensorBoard this callback needs to be\n applied before the TensorBoard callback and use the same update frequency.\n\n !!! example\n ```python\n callbacks = [QuantizationLogger(), tf.keras.callbacks.TensorBoard()]\n model.fit(X_train, Y_train, callbacks=callbacks)\n ```\n\n # Metrics\n - `changed_quantization_ration`: The ration of quantized weights in each layer that\n changed during the weight update.\n\n # Arguments\n update_freq: `'batch'` or `'epoch'` or integer. When using `'batch'`, computes the\n metrics after each batch. The same applies for `'epoch'`. If using an integer\n the callback will compute the metrics every `update_freq` batches.\n Note that computing too frequently can slow down training.\n \"\"\"\n\n def __init__(self, update_freq=\"epoch\"):\n super().__init__()\n self.batch_previous_weights = {}\n self.epoch_previous_weights = {}\n self.update_freq = update_freq if update_freq != \"batch\" else 1\n self._quantized_weights = []\n self._quantized_weight_names = []\n\n def set_model(self, model):\n super().set_model(model)\n for layer in model.layers:\n if hasattr(layer, \"quantized_weights\"):\n for i, weight in enumerate(layer.quantized_weights):\n self._quantized_weights.append(weight)\n self._quantized_weight_names.append(\n layer.name if i == 0 else f\"{layer.name}_{i}\"\n )\n\n def _maybe_log_and_store(self, storage, logs, should_log=True, should_store=True):\n if should_log or should_store:\n values = tf.keras.backend.batch_get_value(self._quantized_weights)\n for key, value in zip(self._quantized_weight_names, values):\n value = value.astype(np.int8)\n if should_log:\n logs[f\"changed_quantization_ration/{key.replace(':', '_')}\"] = 1 - (\n np.count_nonzero(value == storage[key]) / value.size\n )\n if should_store:\n storage[key] = value\n\n if should_log and not should_store:\n # We don't need it in the next batch anymore\n storage = {}\n\n def on_batch_end(self, batch, logs=None):\n if self.update_freq != \"epoch\":\n self._maybe_log_and_store(\n self.batch_previous_weights,\n logs,\n should_log=batch > 0 and (batch + 1) % self.update_freq == 0,\n should_store=(batch + 2) % self.update_freq == 0,\n )\n\n def on_train_begin(self, logs=None):\n self._maybe_log_and_store(self.epoch_previous_weights, logs, should_log=False)\n\n def on_epoch_end(self, epoch, logs=None):\n self._maybe_log_and_store(self.epoch_previous_weights, logs)\n", "path": "larq/callbacks.py"}, {"content": "import tensorflow as tf\nimport numpy as np\nimport larq as lq\n\nfrom larq import utils\nfrom copy import deepcopy\n\n\[email protected]_keras_custom_object\nclass XavierLearningRateScaling(tf.keras.optimizers.Optimizer):\n \"\"\"Optimizer wrapper for Xavier Learning Rate Scaling\n\n Scale the weights learning rates respectively with the weights initialization\n\n !!! note \"\"\n This is a wrapper and does not implement any optimization algorithm.\n\n !!! example\n ```python\n optimizer = lq.optimizers.XavierLearningRateScaling(\n tf.keras.optimizers.Adam(0.01), model\n )\n ```\n\n # Arguments\n optimizer: A `tf.keras.optimizers.Optimizer`\n model: A `tf.keras.Model`\n\n # References\n - [BinaryConnect: Training Deep Neural Networks with binary weights during\n propagations](https://arxiv.org/abs/1511.00363)\n \"\"\"\n\n def __init__(self, optimizer, model):\n if int(tf.__version__[0]) == 2:\n raise NotImplementedError(\n \"XavierLearningRateScaling is not supported by Tensorflow 2.0.\"\n )\n\n if not isinstance(optimizer, tf.keras.optimizers.Optimizer):\n raise ValueError(\n f\"Expected tf.keras.optimizers.Optimizer, received {type(optimizer)}.\"\n )\n self.optimizer = optimizer\n\n if isinstance(model, tf.keras.Model):\n self.multipliers = {}\n for layer in model.layers:\n if hasattr(layer, \"quantized_latent_weights\"):\n for weight in layer.quantized_latent_weights:\n self.multipliers[weight.name] = self.get_lr_multiplier(weight)\n elif isinstance(model, dict):\n self.multipliers = model\n else:\n raise ValueError(f\"Expected tf.keras.Model or dict, received {type(model)}\")\n\n def get_lr_multiplier(self, weight):\n shape = weight.get_shape().as_list()\n n_input = shape[-2]\n n_output = shape[-1]\n if len(shape) == 4:\n kernelsize = np.prod(shape[:-2])\n coeff = 1.0 / np.sqrt(1.5 / ((kernelsize * (n_input + n_output))))\n elif len(shape) == 2:\n coeff = 1.0 / np.sqrt(1.5 / ((1.0 * (n_input + n_output))))\n else:\n raise NotImplementedError(\n \"Xavier Learning rate scaling not implimented for this kernelsize\"\n )\n return coeff\n\n def get_updates(self, loss, params):\n mult_lr_params = [p for p in params if p.name in self.multipliers]\n base_lr_params = [p for p in params if p.name not in self.multipliers]\n\n updates = []\n base_lr = self.optimizer.lr\n for param in mult_lr_params:\n self.optimizer.lr = base_lr * self.multipliers[param.name]\n updates.extend(self.optimizer.get_updates(loss, [param]))\n\n self.optimizer.lr = base_lr\n updates.extend(self.optimizer.get_updates(loss, base_lr_params))\n\n return updates\n\n def __getattr__(self, name):\n return getattr(self.optimizer, name)\n\n def get_config(self):\n return {\n \"optimizer\": {\n \"class_name\": self.optimizer.__class__.__name__,\n \"config\": self.optimizer.get_config(),\n },\n \"multipliers\": self.multipliers,\n }\n\n @classmethod\n def from_config(cls, config, custom_objects=None):\n optimizer = tf.keras.optimizers.deserialize(\n config.pop(\"optimizer\"), custom_objects=custom_objects\n )\n return cls(optimizer, config[\"multipliers\"])\n\n\[email protected]_keras_custom_object\nclass Bop(tf.keras.optimizers.Optimizer):\n \"\"\"Binary optimizer (Bop).\n\n Bop is a latent-free optimizer for Binarized Neural Networks (BNNs).\n\n !!! example\n ```python\n optimizer = lq.optimizers.Bop(fp_optimizer=tf.keras.optimizers.Adam(0.01))\n\n ```\n\n # Arguments\n fp_optimizer: a `tf.keras.optimizers.Optimizer`.\n threshold: determines to whether to flip each weight.\n gamma: the adaptivity rate.\n name: name of the optimizer.\n\n # References\n - [Latent Weights Do Not Exist: Rethinking Binarized Neural Network Optimization](https://arxiv.org/abs/1906.02107)\n \"\"\"\n\n def __init__(self, fp_optimizer, threshold=1e-5, gamma=1e-2, name=\"Bop\", **kwargs):\n super().__init__(**kwargs)\n\n if not isinstance(fp_optimizer, tf.keras.optimizers.Optimizer):\n raise TypeError(\n f\"Expected tf.keras.optimizers.Optimizer, received {type(fp_optimizer)}.\"\n )\n\n with tf.keras.backend.name_scope(self.__class__.__name__):\n\n self.fp_optimizer = fp_optimizer\n self.threshold = threshold\n self.gamma = gamma\n\n def get_updates(self, loss, params):\n grads = self.get_gradients(loss, params)\n self.updates = []\n\n ms = [\n tf.keras.backend.zeros(\n tf.keras.backend.int_shape(p), dtype=tf.keras.backend.dtype(p)\n )\n for p in params\n ]\n\n fp_params = []\n\n for p, g, m in zip(params, grads, ms):\n if self.is_binary(p):\n m_t = (1 - self.gamma) * m + self.gamma * g\n\n self.updates.append(tf.assign(m, m_t))\n self.updates.append(\n tf.assign(\n p, lq.quantizers.sign(-p * tf.sign(p * m_t - self.threshold))\n )\n )\n\n else:\n fp_params.append(p)\n\n return self.updates + self.fp_optimizer.get_updates(loss, fp_params)\n\n @staticmethod\n def is_binary(var):\n return \"/kernel\" in var.name and \"quant_\" in var.name\n\n def __getattr__(self, name):\n if name == \"lr\":\n return self.fp_optimizer.lr\n return super().__getattr__(name)\n\n def get_config(self):\n fp_optimizer_config = self.fp_optimizer.get_config()\n config = {\n \"threshold\": self.threshold,\n \"gamma\": self.gamma,\n \"fp_optimizer\": {\n \"class_name\": self.fp_optimizer.__class__.__name__,\n \"config\": fp_optimizer_config,\n },\n }\n return {**super().get_config(), **config}\n\n @classmethod\n def from_config(cls, config, custom_objects=None):\n new_config = deepcopy(config)\n fp_optimizer = tf.keras.optimizers.deserialize(\n new_config[\"fp_optimizer\"], custom_objects=custom_objects\n )\n new_config.pop(\"fp_optimizer\", None)\n return cls(fp_optimizer, **new_config)\n", "path": "larq/optimizers_v1.py"}], "after_files": [{"content": "import tensorflow as tf\nimport numpy as np\n\n\nclass QuantizationLogger(tf.keras.callbacks.Callback):\n \"\"\"Callback that adds quantization specific metrics.\n\n !!! note \"\"\n In order for metrics to be picked up by TensorBoard this callback needs to be\n applied before the TensorBoard callback and use the same update frequency.\n\n !!! example\n ```python\n callbacks = [QuantizationLogger(), tf.keras.callbacks.TensorBoard()]\n model.fit(X_train, Y_train, callbacks=callbacks)\n ```\n\n # Metrics\n - `changed_quantization_ration`: The ration of quantized weights in each layer that\n changed during the weight update.\n\n # Arguments\n update_freq: `'batch'` or `'epoch'` or integer. When using `'batch'`, computes the\n metrics after each batch. The same applies for `'epoch'`. If using an integer\n the callback will compute the metrics every `update_freq` batches.\n Note that computing too frequently can slow down training.\n \"\"\"\n\n def __init__(self, update_freq=\"epoch\"):\n super().__init__()\n self.batch_previous_weights = {}\n self.epoch_previous_weights = {}\n self.update_freq = update_freq if update_freq != \"batch\" else 1\n self._quantized_weights = []\n self._quantized_weight_names = []\n\n def set_model(self, model):\n super().set_model(model)\n for layer in model.layers:\n if hasattr(layer, \"quantized_weights\"):\n for i, weight in enumerate(layer.quantized_weights):\n self._quantized_weights.append(weight)\n self._quantized_weight_names.append(\n layer.name if i == 0 else f\"{layer.name}_{i}\"\n )\n\n def _maybe_log_and_store(self, storage, logs, should_log=True, should_store=True):\n if should_log or should_store:\n values = tf.keras.backend.batch_get_value(self._quantized_weights)\n for key, value in zip(self._quantized_weight_names, values):\n value = value.astype(np.int8)\n if should_log:\n logs[f\"changed_quantization_ration/{key.replace(':', '_')}\"] = 1 - (\n np.count_nonzero(value == storage[key]) / value.size\n )\n if should_store:\n storage[key] = value\n\n if should_log and not should_store:\n # We don't need it in the next batch anymore\n storage = {}\n\n def on_batch_end(self, batch, logs=None):\n if self.update_freq != \"epoch\":\n self._maybe_log_and_store(\n self.batch_previous_weights,\n logs,\n should_log=batch > 0 and (batch + 1) % self.update_freq == 0,\n should_store=(batch + 2) % self.update_freq == 0,\n )\n\n def on_train_begin(self, logs=None):\n self._maybe_log_and_store(self.epoch_previous_weights, logs, should_log=False)\n\n def on_epoch_end(self, epoch, logs=None):\n self._maybe_log_and_store(self.epoch_previous_weights, logs)\n\n\nclass HyperparameterScheduler(tf.keras.callbacks.Callback):\n \"\"\"Generic hyperparameter scheduler.\n\n # Arguments:\n schedule: a function that takes an epoch index as input\n (integer, indexed from 0) and returns a new hyperparameter as output.\n hyperparameter: str. the name of the hyperparameter to be scheduled.\n verbose: int. 0: quiet, 1: update messages.\n \"\"\"\n\n def __init__(self, schedule, hyperparameter, verbose=0):\n super(HyperparameterScheduler, self).__init__()\n self.schedule = schedule\n self.hyperparameter = hyperparameter\n self.verbose = verbose\n\n def on_epoch_begin(self, epoch, logs=None):\n if not hasattr(self.model.optimizer, self.hyperparameter):\n raise ValueError(\n f'Optimizer must have a \"{self.hyperparameter}\" attribute.'\n )\n\n hp = getattr(self.model.optimizer, self.hyperparameter)\n try: # new API\n hyperparameter_val = tf.keras.backend.get_value(hp)\n hyperparameter_val = self.schedule(epoch, hyperparameter_val)\n except TypeError: # Support for old API for backward compatibility\n hyperparameter_val = self.schedule(epoch)\n\n tf.keras.backend.set_value(hp, hyperparameter_val)\n\n if self.verbose > 0:\n print(\n f\"Epoch {epoch + 1}: {self.hyperparameter} changning to {tf.keras.backend.get_value(hp)}.\"\n )\n\n def on_epoch_end(self, epoch, logs=None):\n logs = logs or {}\n hp = getattr(self.model.optimizer, self.hyperparameter)\n logs[self.hyperparameter] = tf.keras.backend.get_value(hp)\n", "path": "larq/callbacks.py"}, {"content": "import tensorflow as tf\nimport numpy as np\nimport larq as lq\n\nfrom larq import utils\nfrom copy import deepcopy\n\n\[email protected]_keras_custom_object\nclass XavierLearningRateScaling(tf.keras.optimizers.Optimizer):\n \"\"\"Optimizer wrapper for Xavier Learning Rate Scaling\n\n Scale the weights learning rates respectively with the weights initialization\n\n !!! note \"\"\n This is a wrapper and does not implement any optimization algorithm.\n\n !!! example\n ```python\n optimizer = lq.optimizers.XavierLearningRateScaling(\n tf.keras.optimizers.Adam(0.01), model\n )\n ```\n\n # Arguments\n optimizer: A `tf.keras.optimizers.Optimizer`\n model: A `tf.keras.Model`\n\n # References\n - [BinaryConnect: Training Deep Neural Networks with binary weights during\n propagations](https://arxiv.org/abs/1511.00363)\n \"\"\"\n\n def __init__(self, optimizer, model):\n if int(tf.__version__[0]) == 2:\n raise NotImplementedError(\n \"XavierLearningRateScaling is not supported by Tensorflow 2.0.\"\n )\n\n if not isinstance(optimizer, tf.keras.optimizers.Optimizer):\n raise ValueError(\n f\"Expected tf.keras.optimizers.Optimizer, received {type(optimizer)}.\"\n )\n self.optimizer = optimizer\n\n if isinstance(model, tf.keras.Model):\n self.multipliers = {}\n for layer in model.layers:\n if hasattr(layer, \"quantized_latent_weights\"):\n for weight in layer.quantized_latent_weights:\n self.multipliers[weight.name] = self.get_lr_multiplier(weight)\n elif isinstance(model, dict):\n self.multipliers = model\n else:\n raise ValueError(f\"Expected tf.keras.Model or dict, received {type(model)}\")\n\n def get_lr_multiplier(self, weight):\n shape = weight.get_shape().as_list()\n n_input = shape[-2]\n n_output = shape[-1]\n if len(shape) == 4:\n kernelsize = np.prod(shape[:-2])\n coeff = 1.0 / np.sqrt(1.5 / ((kernelsize * (n_input + n_output))))\n elif len(shape) == 2:\n coeff = 1.0 / np.sqrt(1.5 / ((1.0 * (n_input + n_output))))\n else:\n raise NotImplementedError(\n \"Xavier Learning rate scaling not implimented for this kernelsize\"\n )\n return coeff\n\n def get_updates(self, loss, params):\n mult_lr_params = [p for p in params if p.name in self.multipliers]\n base_lr_params = [p for p in params if p.name not in self.multipliers]\n\n updates = []\n base_lr = self.optimizer.lr\n for param in mult_lr_params:\n self.optimizer.lr = base_lr * self.multipliers[param.name]\n updates.extend(self.optimizer.get_updates(loss, [param]))\n\n self.optimizer.lr = base_lr\n updates.extend(self.optimizer.get_updates(loss, base_lr_params))\n\n return updates\n\n def __getattr__(self, name):\n return getattr(self.optimizer, name)\n\n def get_config(self):\n return {\n \"optimizer\": {\n \"class_name\": self.optimizer.__class__.__name__,\n \"config\": self.optimizer.get_config(),\n },\n \"multipliers\": self.multipliers,\n }\n\n @classmethod\n def from_config(cls, config, custom_objects=None):\n optimizer = tf.keras.optimizers.deserialize(\n config.pop(\"optimizer\"), custom_objects=custom_objects\n )\n return cls(optimizer, config[\"multipliers\"])\n\n\[email protected]_keras_custom_object\nclass Bop(tf.keras.optimizers.Optimizer):\n \"\"\"Binary optimizer (Bop).\n\n Bop is a latent-free optimizer for Binarized Neural Networks (BNNs).\n\n !!! example\n ```python\n optimizer = lq.optimizers.Bop(fp_optimizer=tf.keras.optimizers.Adam(0.01))\n\n ```\n\n # Arguments\n fp_optimizer: a `tf.keras.optimizers.Optimizer`.\n threshold: determines to whether to flip each weight.\n gamma: the adaptivity rate.\n name: name of the optimizer.\n\n # References\n - [Latent Weights Do Not Exist: Rethinking Binarized Neural Network Optimization](https://arxiv.org/abs/1906.02107)\n \"\"\"\n\n def __init__(self, fp_optimizer, threshold=1e-5, gamma=1e-2, name=\"Bop\", **kwargs):\n super().__init__(**kwargs)\n\n if not isinstance(fp_optimizer, tf.keras.optimizers.Optimizer):\n raise TypeError(\n f\"Expected tf.keras.optimizers.Optimizer, received {type(fp_optimizer)}.\"\n )\n\n with tf.keras.backend.name_scope(self.__class__.__name__):\n\n self.fp_optimizer = fp_optimizer\n self.threshold = tf.keras.backend.variable(threshold, name=\"threshold\")\n self.gamma = tf.keras.backend.variable(gamma, name=\"gamma\")\n\n def get_updates(self, loss, params):\n grads = self.get_gradients(loss, params)\n self.updates = []\n\n ms = [\n tf.keras.backend.zeros(\n tf.keras.backend.int_shape(p), dtype=tf.keras.backend.dtype(p)\n )\n for p in params\n ]\n\n fp_params = []\n\n for p, g, m in zip(params, grads, ms):\n if self.is_binary(p):\n m_t = (1 - self.gamma) * m + self.gamma * g\n\n self.updates.append(tf.assign(m, m_t))\n self.updates.append(\n tf.assign(\n p, lq.quantizers.sign(-p * tf.sign(p * m_t - self.threshold))\n )\n )\n\n else:\n fp_params.append(p)\n\n return self.updates + self.fp_optimizer.get_updates(loss, fp_params)\n\n @staticmethod\n def is_binary(var):\n return \"/kernel\" in var.name and \"quant_\" in var.name\n\n def __getattr__(self, name):\n if name == \"lr\":\n return self.fp_optimizer.lr\n return super().__getattr__(name)\n\n def get_config(self):\n fp_optimizer_config = self.fp_optimizer.get_config()\n config = {\n \"threshold\": float(tf.keras.backend.get_value(self.threshold)),\n \"gamma\": float(tf.keras.backend.get_value(self.gamma)),\n \"fp_optimizer\": {\n \"class_name\": self.fp_optimizer.__class__.__name__,\n \"config\": fp_optimizer_config,\n },\n }\n return {**super().get_config(), **config}\n\n @classmethod\n def from_config(cls, config, custom_objects=None):\n new_config = deepcopy(config)\n fp_optimizer = tf.keras.optimizers.deserialize(\n new_config[\"fp_optimizer\"], custom_objects=custom_objects\n )\n new_config.pop(\"fp_optimizer\", None)\n return cls(fp_optimizer, **new_config)\n", "path": "larq/optimizers_v1.py"}]} | 3,083 | 727 |
gh_patches_debug_57452 | rasdani/github-patches | git_diff | ManageIQ__integration_tests-204 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Model up yaml configs from the DB
Various yaml configs are stored in the database, including the one we'll most likely want to imspect, which is the vmdb config.
The schema, for our purposes, is two fields in the `configurations` table, `typ` and `settings`. The interface that I'm leaning toward is configurations[typ] = dictified_yaml(settings), if that makes sense.
So, for example, if we wanted to see whether or not to get a list of public images from ec2, the lookup would be `configurations['vmdb']['ems_refresh']['ec2']['get_public_images']`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `fixtures/cfmedb.py`
Content:
```
1 '''
2
3
4 Created on Jun 14, 2013
5
6 @author: bcrochet
7
8 '''
9 # -*- coding: utf-8 -*-
10 # pylint: disable=C0103
11 # pylint: disable=E1101
12 import pytest
13 from urlparse import urlparse
14 from sqlalchemy import create_engine
15 from sqlalchemy.orm import sessionmaker
16 import ConfigParser
17
18 def pytest_addoption(parser):
19 '''Create the options for py.test'''
20 config = ConfigParser.ConfigParser(defaults={
21 'cfmedburl': ''
22 })
23 config.read('cfme.cfg')
24
25 group = parser.getgroup('cfme', 'cfme')
26 group.addoption('--cfmedburl',
27 action='store',
28 dest='cfme_db_url',
29 default=config.get('DEFAULT', 'cfmedburl'),
30 metavar='url',
31 help='url for CFME database to connect to')
32
33 def pytest_sessionstart(session):
34 '''Setup run for tests'''
35 import db
36 db.cfme_db_url = session.config.option.cfme_db_url
37 if not db.cfme_db_url:
38 # Let's try to figure it out
39 baseurl = session.config.option.base_url
40 baseip = urlparse(baseurl).hostname
41 db.cfme_db_url = "postgres://root:smartvm@%s:5432/vmdb_production" \
42 % baseip
43 db.engine = create_engine(db.cfme_db_url)
44
45 @pytest.fixture
46 def db_session():
47 '''Creates a database session based on the db url passed on the CLI
48
49 Usage example:
50
51 This is a SQLalchemy (http://www.sqlalchemy.org/) session. You can make
52 queries and create new rows in the database with this session.
53
54 The available classes are dynamically generated from the database. Consult
55 db/__init__.py for a list of available class -> table mappings.
56
57 An example test:
58
59 @pytest.mark.nondestructive
60 def test_that_tries_for_db(db_session):
61 import db
62 session = db_session
63 for instance in session.query(db.ExtManagementSystem).order_by(
64 db.ExtManagementSystem.id):
65 print instance.name, instance.hostname
66
67 This 'test' prints the management systems from the database.
68 '''
69 import db
70 Session = sessionmaker(bind=db.engine)
71 return Session()
72
73
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/fixtures/cfmedb.py b/fixtures/cfmedb.py
--- a/fixtures/cfmedb.py
+++ b/fixtures/cfmedb.py
@@ -70,3 +70,13 @@
Session = sessionmaker(bind=db.engine)
return Session()
[email protected]
+def db_yamls(db_session):
+ '''Returns the yamls from the db configuration table as a dict'''
+
+ import db
+ import yaml
+ configs = db_session.query(db.Configuration.typ, db.Configuration.settings)
+ data = {name: yaml.load(settings) for name, settings in configs}
+
+ return data
| {"golden_diff": "diff --git a/fixtures/cfmedb.py b/fixtures/cfmedb.py\n--- a/fixtures/cfmedb.py\n+++ b/fixtures/cfmedb.py\n@@ -70,3 +70,13 @@\n Session = sessionmaker(bind=db.engine)\n return Session()\n \[email protected]\n+def db_yamls(db_session):\n+ '''Returns the yamls from the db configuration table as a dict'''\n+\n+ import db\n+ import yaml\n+ configs = db_session.query(db.Configuration.typ, db.Configuration.settings)\n+ data = {name: yaml.load(settings) for name, settings in configs}\n+\n+ return data\n", "issue": "Model up yaml configs from the DB\nVarious yaml configs are stored in the database, including the one we'll most likely want to imspect, which is the vmdb config.\n\nThe schema, for our purposes, is two fields in the `configurations` table, `typ` and `settings`. The interface that I'm leaning toward is configurations[typ] = dictified_yaml(settings), if that makes sense. \n\nSo, for example, if we wanted to see whether or not to get a list of public images from ec2, the lookup would be `configurations['vmdb']['ems_refresh']['ec2']['get_public_images']`\n\n", "before_files": [{"content": "'''\n\n\nCreated on Jun 14, 2013\n\n@author: bcrochet\n\n'''\n# -*- coding: utf-8 -*-\n# pylint: disable=C0103\n# pylint: disable=E1101\nimport pytest\nfrom urlparse import urlparse\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nimport ConfigParser\n\ndef pytest_addoption(parser):\n '''Create the options for py.test'''\n config = ConfigParser.ConfigParser(defaults={\n 'cfmedburl': ''\n })\n config.read('cfme.cfg')\n\n group = parser.getgroup('cfme', 'cfme')\n group.addoption('--cfmedburl',\n action='store',\n dest='cfme_db_url',\n default=config.get('DEFAULT', 'cfmedburl'),\n metavar='url',\n help='url for CFME database to connect to')\n\ndef pytest_sessionstart(session):\n '''Setup run for tests'''\n import db\n db.cfme_db_url = session.config.option.cfme_db_url\n if not db.cfme_db_url:\n # Let's try to figure it out\n baseurl = session.config.option.base_url\n baseip = urlparse(baseurl).hostname\n db.cfme_db_url = \"postgres://root:smartvm@%s:5432/vmdb_production\" \\\n % baseip\n db.engine = create_engine(db.cfme_db_url)\n\[email protected]\ndef db_session():\n '''Creates a database session based on the db url passed on the CLI\n\n Usage example:\n\n This is a SQLalchemy (http://www.sqlalchemy.org/) session. You can make\n queries and create new rows in the database with this session.\n\n The available classes are dynamically generated from the database. Consult\n db/__init__.py for a list of available class -> table mappings.\n\n An example test:\n\n @pytest.mark.nondestructive\n def test_that_tries_for_db(db_session):\n import db\n session = db_session\n for instance in session.query(db.ExtManagementSystem).order_by(\n db.ExtManagementSystem.id):\n print instance.name, instance.hostname\n\n This 'test' prints the management systems from the database.\n '''\n import db\n Session = sessionmaker(bind=db.engine)\n return Session()\n\n", "path": "fixtures/cfmedb.py"}], "after_files": [{"content": "'''\n\n\nCreated on Jun 14, 2013\n\n@author: bcrochet\n\n'''\n# -*- coding: utf-8 -*-\n# pylint: disable=C0103\n# pylint: disable=E1101\nimport pytest\nfrom urlparse import urlparse\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nimport ConfigParser\n\ndef pytest_addoption(parser):\n '''Create the options for py.test'''\n config = ConfigParser.ConfigParser(defaults={\n 'cfmedburl': ''\n })\n config.read('cfme.cfg')\n\n group = parser.getgroup('cfme', 'cfme')\n group.addoption('--cfmedburl',\n action='store',\n dest='cfme_db_url',\n default=config.get('DEFAULT', 'cfmedburl'),\n metavar='url',\n help='url for CFME database to connect to')\n\ndef pytest_sessionstart(session):\n '''Setup run for tests'''\n import db\n db.cfme_db_url = session.config.option.cfme_db_url\n if not db.cfme_db_url:\n # Let's try to figure it out\n baseurl = session.config.option.base_url\n baseip = urlparse(baseurl).hostname\n db.cfme_db_url = \"postgres://root:smartvm@%s:5432/vmdb_production\" \\\n % baseip\n db.engine = create_engine(db.cfme_db_url)\n\[email protected]\ndef db_session():\n '''Creates a database session based on the db url passed on the CLI\n\n Usage example:\n\n This is a SQLalchemy (http://www.sqlalchemy.org/) session. You can make\n queries and create new rows in the database with this session.\n\n The available classes are dynamically generated from the database. Consult\n db/__init__.py for a list of available class -> table mappings.\n\n An example test:\n\n @pytest.mark.nondestructive\n def test_that_tries_for_db(db_session):\n import db\n session = db_session\n for instance in session.query(db.ExtManagementSystem).order_by(\n db.ExtManagementSystem.id):\n print instance.name, instance.hostname\n\n This 'test' prints the management systems from the database.\n '''\n import db\n Session = sessionmaker(bind=db.engine)\n return Session()\n\[email protected]\ndef db_yamls(db_session):\n '''Returns the yamls from the db configuration table as a dict'''\n\n import db\n import yaml\n configs = db_session.query(db.Configuration.typ, db.Configuration.settings)\n data = {name: yaml.load(settings) for name, settings in configs}\n\n return data\n", "path": "fixtures/cfmedb.py"}]} | 1,030 | 144 |
gh_patches_debug_21975 | rasdani/github-patches | git_diff | getpelican__pelican-1515 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Built-in server relies on file name suffix to guess Content-Type
Pelican's built-in web server relies on the standard `SimpleHTTPServer` module to guess the appropriate `Content-Type` header for the files it serves. Sadly, that implementation relies on file name suffix to make its guesses. When I configure my site to use URLs without suffixes...
```
'PAGE_URL': 'pages/{slug}'
```
...the server sends `Content-Type: application/octet-stream`, and my browser refuses to render the HTML.
This could be better, at least on systems that have the python-magic package installed.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pelican/server.py`
Content:
```
1 from __future__ import print_function
2 import os
3 import sys
4 import logging
5 try:
6 import SimpleHTTPServer as srvmod
7 except ImportError:
8 import http.server as srvmod # NOQA
9
10 try:
11 import SocketServer as socketserver
12 except ImportError:
13 import socketserver # NOQA
14
15 PORT = len(sys.argv) in (2, 3) and int(sys.argv[1]) or 8000
16 SERVER = len(sys.argv) == 3 and sys.argv[2] or ""
17 SUFFIXES = ['', '.html', '/index.html']
18
19
20 class ComplexHTTPRequestHandler(srvmod.SimpleHTTPRequestHandler):
21 def do_GET(self):
22 # Try to detect file by applying various suffixes
23 for suffix in SUFFIXES:
24 if not hasattr(self, 'original_path'):
25 self.original_path = self.path
26
27 self.path = self.original_path + suffix
28 path = self.translate_path(self.path)
29
30 if os.path.exists(path):
31 srvmod.SimpleHTTPRequestHandler.do_GET(self)
32 logging.info("Found `%s`." % self.path)
33 break
34
35 logging.info("Tried to find `%s`, but it doesn't exist.",
36 self.path)
37 else:
38 # Fallback if there were no matches
39 logging.warning("Unable to find `%s` or variations.",
40 self.original_path)
41
42 Handler = ComplexHTTPRequestHandler
43
44 socketserver.TCPServer.allow_reuse_address = True
45 try:
46 httpd = socketserver.TCPServer((SERVER, PORT), Handler)
47 except OSError as e:
48 logging.error("Could not listen on port %s, server %s.", PORT, SERVER)
49 sys.exit(getattr(e, 'exitcode', 1))
50
51
52 logging.info("Serving at port %s, server %s.", PORT, SERVER)
53 try:
54 httpd.serve_forever()
55 except KeyboardInterrupt as e:
56 logging.info("Shutting down server.")
57 httpd.socket.close()
58
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pelican/server.py b/pelican/server.py
--- a/pelican/server.py
+++ b/pelican/server.py
@@ -12,6 +12,11 @@
except ImportError:
import socketserver # NOQA
+try:
+ from magic import from_file as magic_from_file
+except ImportError:
+ magic_from_file = None
+
PORT = len(sys.argv) in (2, 3) and int(sys.argv[1]) or 8000
SERVER = len(sys.argv) == 3 and sys.argv[2] or ""
SUFFIXES = ['', '.html', '/index.html']
@@ -39,6 +44,18 @@
logging.warning("Unable to find `%s` or variations.",
self.original_path)
+ def guess_type(self, path):
+ """Guess at the mime type for the specified file.
+ """
+ mimetype = srvmod.SimpleHTTPRequestHandler.guess_type(self, path)
+
+ # If the default guess is too generic, try the python-magic library
+ if mimetype == 'application/octet-stream' and magic_from_file:
+ mimetype = magic_from_file(path, mime=True)
+
+ return mimetype
+
+
Handler = ComplexHTTPRequestHandler
socketserver.TCPServer.allow_reuse_address = True
| {"golden_diff": "diff --git a/pelican/server.py b/pelican/server.py\n--- a/pelican/server.py\n+++ b/pelican/server.py\n@@ -12,6 +12,11 @@\n except ImportError:\n import socketserver # NOQA\n \n+try:\n+ from magic import from_file as magic_from_file\n+except ImportError:\n+ magic_from_file = None\n+\n PORT = len(sys.argv) in (2, 3) and int(sys.argv[1]) or 8000\n SERVER = len(sys.argv) == 3 and sys.argv[2] or \"\"\n SUFFIXES = ['', '.html', '/index.html']\n@@ -39,6 +44,18 @@\n logging.warning(\"Unable to find `%s` or variations.\",\n self.original_path)\n \n+ def guess_type(self, path):\n+ \"\"\"Guess at the mime type for the specified file.\n+ \"\"\"\n+ mimetype = srvmod.SimpleHTTPRequestHandler.guess_type(self, path)\n+\n+ # If the default guess is too generic, try the python-magic library\n+ if mimetype == 'application/octet-stream' and magic_from_file:\n+ mimetype = magic_from_file(path, mime=True)\n+\n+ return mimetype\n+\n+\n Handler = ComplexHTTPRequestHandler\n \n socketserver.TCPServer.allow_reuse_address = True\n", "issue": "Built-in server relies on file name suffix to guess Content-Type\nPelican's built-in web server relies on the standard `SimpleHTTPServer` module to guess the appropriate `Content-Type` header for the files it serves. Sadly, that implementation relies on file name suffix to make its guesses. When I configure my site to use URLs without suffixes...\n\n```\n'PAGE_URL': 'pages/{slug}'\n```\n\n...the server sends `Content-Type: application/octet-stream`, and my browser refuses to render the HTML.\n\nThis could be better, at least on systems that have the python-magic package installed.\n\n", "before_files": [{"content": "from __future__ import print_function\nimport os\nimport sys\nimport logging\ntry:\n import SimpleHTTPServer as srvmod\nexcept ImportError:\n import http.server as srvmod # NOQA\n\ntry:\n import SocketServer as socketserver\nexcept ImportError:\n import socketserver # NOQA\n\nPORT = len(sys.argv) in (2, 3) and int(sys.argv[1]) or 8000\nSERVER = len(sys.argv) == 3 and sys.argv[2] or \"\"\nSUFFIXES = ['', '.html', '/index.html']\n\n\nclass ComplexHTTPRequestHandler(srvmod.SimpleHTTPRequestHandler):\n def do_GET(self):\n # Try to detect file by applying various suffixes\n for suffix in SUFFIXES:\n if not hasattr(self, 'original_path'):\n self.original_path = self.path\n\n self.path = self.original_path + suffix\n path = self.translate_path(self.path)\n\n if os.path.exists(path):\n srvmod.SimpleHTTPRequestHandler.do_GET(self)\n logging.info(\"Found `%s`.\" % self.path)\n break\n\n logging.info(\"Tried to find `%s`, but it doesn't exist.\",\n self.path)\n else:\n # Fallback if there were no matches\n logging.warning(\"Unable to find `%s` or variations.\",\n self.original_path)\n\nHandler = ComplexHTTPRequestHandler\n\nsocketserver.TCPServer.allow_reuse_address = True\ntry:\n httpd = socketserver.TCPServer((SERVER, PORT), Handler)\nexcept OSError as e:\n logging.error(\"Could not listen on port %s, server %s.\", PORT, SERVER)\n sys.exit(getattr(e, 'exitcode', 1))\n\n\nlogging.info(\"Serving at port %s, server %s.\", PORT, SERVER)\ntry:\n httpd.serve_forever()\nexcept KeyboardInterrupt as e:\n logging.info(\"Shutting down server.\")\n httpd.socket.close()\n", "path": "pelican/server.py"}], "after_files": [{"content": "from __future__ import print_function\nimport os\nimport sys\nimport logging\ntry:\n import SimpleHTTPServer as srvmod\nexcept ImportError:\n import http.server as srvmod # NOQA\n\ntry:\n import SocketServer as socketserver\nexcept ImportError:\n import socketserver # NOQA\n\ntry:\n from magic import from_file as magic_from_file\nexcept ImportError:\n magic_from_file = None\n\nPORT = len(sys.argv) in (2, 3) and int(sys.argv[1]) or 8000\nSERVER = len(sys.argv) == 3 and sys.argv[2] or \"\"\nSUFFIXES = ['', '.html', '/index.html']\n\n\nclass ComplexHTTPRequestHandler(srvmod.SimpleHTTPRequestHandler):\n def do_GET(self):\n # Try to detect file by applying various suffixes\n for suffix in SUFFIXES:\n if not hasattr(self, 'original_path'):\n self.original_path = self.path\n\n self.path = self.original_path + suffix\n path = self.translate_path(self.path)\n\n if os.path.exists(path):\n srvmod.SimpleHTTPRequestHandler.do_GET(self)\n logging.info(\"Found `%s`.\" % self.path)\n break\n\n logging.info(\"Tried to find `%s`, but it doesn't exist.\",\n self.path)\n else:\n # Fallback if there were no matches\n logging.warning(\"Unable to find `%s` or variations.\",\n self.original_path)\n\n def guess_type(self, path):\n \"\"\"Guess at the mime type for the specified file.\n \"\"\"\n mimetype = srvmod.SimpleHTTPRequestHandler.guess_type(self, path)\n\n # If the default guess is too generic, try the python-magic library\n if mimetype == 'application/octet-stream' and magic_from_file:\n mimetype = magic_from_file(path, mime=True)\n\n return mimetype\n\n\nHandler = ComplexHTTPRequestHandler\n\nsocketserver.TCPServer.allow_reuse_address = True\ntry:\n httpd = socketserver.TCPServer((SERVER, PORT), Handler)\nexcept OSError as e:\n logging.error(\"Could not listen on port %s, server %s.\", PORT, SERVER)\n sys.exit(getattr(e, 'exitcode', 1))\n\n\nlogging.info(\"Serving at port %s, server %s.\", PORT, SERVER)\ntry:\n httpd.serve_forever()\nexcept KeyboardInterrupt as e:\n logging.info(\"Shutting down server.\")\n httpd.socket.close()\n", "path": "pelican/server.py"}]} | 904 | 287 |
gh_patches_debug_39402 | rasdani/github-patches | git_diff | openai__gym-1013 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
MuJoCo render incompatible with newer versions of MuJoCo Py
MujucoEnv's render method assumes the viewer has a 500x500 display. If it is larger than this, it will select pixels from the bottom-left corner when called with `mode='rgb_array'`. There's a comment which suggests this size used to be the default for mujoco-py, but with the latest mujoco-py it defaults to the screen resolution: see opengl_context.pyx:89 in _create_window.
This issue can be fixed by adding the line `self.viewer.opengl_context.set_buffer_size(W, H)` in `_get_viewer` to manually specify the desired frame size. (Oddly, I need to set both dimensions to twice the desired size, I think due to an overzealous workaround for Mac's on opengl_context.pyx:113.) But this is pretty hacky.
I'm a little confused why we're rendering anything on the screen for `render(mode='rgb_array')`, which is typically used for video capture in a batch setting. In particular, MjViewer has the annoyance of showing an overlay menu by default, rarely desired for video capture. It might be better to use `mujoco_py.MjRenderContextOffscreen` instead for this purpose. If we allow render to be called with different modes, however, this would require making a breaking change to the interface for `viewer_setup` (or some hacking to make `self.viewer` point to different objects depending on the last call).
I'd be happy to work on a pull request to address these issues if we can agree on a good way forward for this.
MuJoCo render incompatible with newer versions of MuJoCo Py
MujucoEnv's render method assumes the viewer has a 500x500 display. If it is larger than this, it will select pixels from the bottom-left corner when called with `mode='rgb_array'`. There's a comment which suggests this size used to be the default for mujoco-py, but with the latest mujoco-py it defaults to the screen resolution: see opengl_context.pyx:89 in _create_window.
This issue can be fixed by adding the line `self.viewer.opengl_context.set_buffer_size(W, H)` in `_get_viewer` to manually specify the desired frame size. (Oddly, I need to set both dimensions to twice the desired size, I think due to an overzealous workaround for Mac's on opengl_context.pyx:113.) But this is pretty hacky.
I'm a little confused why we're rendering anything on the screen for `render(mode='rgb_array')`, which is typically used for video capture in a batch setting. In particular, MjViewer has the annoyance of showing an overlay menu by default, rarely desired for video capture. It might be better to use `mujoco_py.MjRenderContextOffscreen` instead for this purpose. If we allow render to be called with different modes, however, this would require making a breaking change to the interface for `viewer_setup` (or some hacking to make `self.viewer` point to different objects depending on the last call).
I'd be happy to work on a pull request to address these issues if we can agree on a good way forward for this.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gym/envs/mujoco/mujoco_env.py`
Content:
```
1 import os
2
3 from gym import error, spaces
4 from gym.utils import seeding
5 import numpy as np
6 from os import path
7 import gym
8 import six
9
10 try:
11 import mujoco_py
12 except ImportError as e:
13 raise error.DependencyNotInstalled("{}. (HINT: you need to install mujoco_py, and also perform the setup instructions here: https://github.com/openai/mujoco-py/.)".format(e))
14
15 class MujocoEnv(gym.Env):
16 """Superclass for all MuJoCo environments.
17 """
18
19 def __init__(self, model_path, frame_skip):
20 if model_path.startswith("/"):
21 fullpath = model_path
22 else:
23 fullpath = os.path.join(os.path.dirname(__file__), "assets", model_path)
24 if not path.exists(fullpath):
25 raise IOError("File %s does not exist" % fullpath)
26 self.frame_skip = frame_skip
27 self.model = mujoco_py.load_model_from_path(fullpath)
28 self.sim = mujoco_py.MjSim(self.model)
29 self.data = self.sim.data
30 self.viewer = None
31
32 self.metadata = {
33 'render.modes': ['human', 'rgb_array'],
34 'video.frames_per_second': int(np.round(1.0 / self.dt))
35 }
36
37 self.init_qpos = self.sim.data.qpos.ravel().copy()
38 self.init_qvel = self.sim.data.qvel.ravel().copy()
39 observation, _reward, done, _info = self.step(np.zeros(self.model.nu))
40 assert not done
41 self.obs_dim = observation.size
42
43 bounds = self.model.actuator_ctrlrange.copy()
44 low = bounds[:, 0]
45 high = bounds[:, 1]
46 self.action_space = spaces.Box(low=low, high=high)
47
48 high = np.inf*np.ones(self.obs_dim)
49 low = -high
50 self.observation_space = spaces.Box(low, high)
51
52 self.seed()
53
54 def seed(self, seed=None):
55 self.np_random, seed = seeding.np_random(seed)
56 return [seed]
57
58 # methods to override:
59 # ----------------------------
60
61 def reset_model(self):
62 """
63 Reset the robot degrees of freedom (qpos and qvel).
64 Implement this in each subclass.
65 """
66 raise NotImplementedError
67
68 def viewer_setup(self):
69 """
70 This method is called when the viewer is initialized and after every reset
71 Optionally implement this method, if you need to tinker with camera position
72 and so forth.
73 """
74 pass
75
76 # -----------------------------
77
78 def reset(self):
79 self.sim.reset()
80 ob = self.reset_model()
81 if self.viewer is not None:
82 self.viewer_setup()
83 return ob
84
85 def set_state(self, qpos, qvel):
86 assert qpos.shape == (self.model.nq,) and qvel.shape == (self.model.nv,)
87 old_state = self.sim.get_state()
88 new_state = mujoco_py.MjSimState(old_state.time, qpos, qvel,
89 old_state.act, old_state.udd_state)
90 self.sim.set_state(new_state)
91 self.sim.forward()
92
93 @property
94 def dt(self):
95 return self.model.opt.timestep * self.frame_skip
96
97 def do_simulation(self, ctrl, n_frames):
98 self.sim.data.ctrl[:] = ctrl
99 for _ in range(n_frames):
100 self.sim.step()
101
102 def render(self, mode='human'):
103 if mode == 'rgb_array':
104 self._get_viewer().render()
105 # window size used for old mujoco-py:
106 width, height = 500, 500
107 data = self._get_viewer().read_pixels(width, height, depth=False)
108 # original image is upside-down, so flip it
109 return data[::-1, :, :]
110 elif mode == 'human':
111 self._get_viewer().render()
112
113 def close(self):
114 if self.viewer is not None:
115 # self.viewer.finish()
116 self.viewer = None
117
118 def _get_viewer(self):
119 if self.viewer is None:
120 self.viewer = mujoco_py.MjViewer(self.sim)
121 self.viewer_setup()
122 return self.viewer
123
124 def get_body_com(self, body_name):
125 return self.data.get_body_xpos(body_name)
126
127 def state_vector(self):
128 return np.concatenate([
129 self.sim.data.qpos.flat,
130 self.sim.data.qvel.flat
131 ])
132
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/gym/envs/mujoco/mujoco_env.py b/gym/envs/mujoco/mujoco_env.py
--- a/gym/envs/mujoco/mujoco_env.py
+++ b/gym/envs/mujoco/mujoco_env.py
@@ -12,6 +12,8 @@
except ImportError as e:
raise error.DependencyNotInstalled("{}. (HINT: you need to install mujoco_py, and also perform the setup instructions here: https://github.com/openai/mujoco-py/.)".format(e))
+DEFAULT_SIZE = 500
+
class MujocoEnv(gym.Env):
"""Superclass for all MuJoCo environments.
"""
@@ -28,6 +30,7 @@
self.sim = mujoco_py.MjSim(self.model)
self.data = self.sim.data
self.viewer = None
+ self._viewers = {}
self.metadata = {
'render.modes': ['human', 'rgb_array'],
@@ -78,8 +81,11 @@
def reset(self):
self.sim.reset()
ob = self.reset_model()
- if self.viewer is not None:
+ old_viewer = self.viewer
+ for v in self._viewers.values():
+ self.viewer = v
self.viewer_setup()
+ self.viewer = old_viewer
return ob
def set_state(self, qpos, qvel):
@@ -99,26 +105,31 @@
for _ in range(n_frames):
self.sim.step()
- def render(self, mode='human'):
+ def render(self, mode='human', width=DEFAULT_SIZE, height=DEFAULT_SIZE):
if mode == 'rgb_array':
- self._get_viewer().render()
+ self._get_viewer(mode).render(width, height)
# window size used for old mujoco-py:
- width, height = 500, 500
- data = self._get_viewer().read_pixels(width, height, depth=False)
+ data = self._get_viewer(mode).read_pixels(width, height, depth=False)
# original image is upside-down, so flip it
return data[::-1, :, :]
elif mode == 'human':
- self._get_viewer().render()
+ self._get_viewer(mode).render()
def close(self):
if self.viewer is not None:
# self.viewer.finish()
self.viewer = None
+ self._viewers = {}
- def _get_viewer(self):
+ def _get_viewer(self, mode):
+ self.viewer = self._viewers.get(mode)
if self.viewer is None:
- self.viewer = mujoco_py.MjViewer(self.sim)
+ if mode == 'human':
+ self.viewer = mujoco_py.MjViewer(self.sim)
+ elif mode == 'rgb_array':
+ self.viewer = mujoco_py.MjRenderContextOffscreen(self.sim, 0)
self.viewer_setup()
+ self._viewers[mode] = self.viewer
return self.viewer
def get_body_com(self, body_name):
| {"golden_diff": "diff --git a/gym/envs/mujoco/mujoco_env.py b/gym/envs/mujoco/mujoco_env.py\n--- a/gym/envs/mujoco/mujoco_env.py\n+++ b/gym/envs/mujoco/mujoco_env.py\n@@ -12,6 +12,8 @@\n except ImportError as e:\n raise error.DependencyNotInstalled(\"{}. (HINT: you need to install mujoco_py, and also perform the setup instructions here: https://github.com/openai/mujoco-py/.)\".format(e))\n \n+DEFAULT_SIZE = 500\n+\n class MujocoEnv(gym.Env):\n \"\"\"Superclass for all MuJoCo environments.\n \"\"\"\n@@ -28,6 +30,7 @@\n self.sim = mujoco_py.MjSim(self.model)\n self.data = self.sim.data\n self.viewer = None\n+ self._viewers = {}\n \n self.metadata = {\n 'render.modes': ['human', 'rgb_array'],\n@@ -78,8 +81,11 @@\n def reset(self):\n self.sim.reset()\n ob = self.reset_model()\n- if self.viewer is not None:\n+ old_viewer = self.viewer\n+ for v in self._viewers.values():\n+ self.viewer = v\n self.viewer_setup()\n+ self.viewer = old_viewer\n return ob\n \n def set_state(self, qpos, qvel):\n@@ -99,26 +105,31 @@\n for _ in range(n_frames):\n self.sim.step()\n \n- def render(self, mode='human'):\n+ def render(self, mode='human', width=DEFAULT_SIZE, height=DEFAULT_SIZE):\n if mode == 'rgb_array':\n- self._get_viewer().render()\n+ self._get_viewer(mode).render(width, height)\n # window size used for old mujoco-py:\n- width, height = 500, 500\n- data = self._get_viewer().read_pixels(width, height, depth=False)\n+ data = self._get_viewer(mode).read_pixels(width, height, depth=False)\n # original image is upside-down, so flip it\n return data[::-1, :, :]\n elif mode == 'human':\n- self._get_viewer().render()\n+ self._get_viewer(mode).render()\n \n def close(self):\n if self.viewer is not None:\n # self.viewer.finish()\n self.viewer = None\n+ self._viewers = {}\n \n- def _get_viewer(self):\n+ def _get_viewer(self, mode):\n+ self.viewer = self._viewers.get(mode)\n if self.viewer is None:\n- self.viewer = mujoco_py.MjViewer(self.sim)\n+ if mode == 'human':\n+ self.viewer = mujoco_py.MjViewer(self.sim)\n+ elif mode == 'rgb_array':\n+ self.viewer = mujoco_py.MjRenderContextOffscreen(self.sim, 0)\n self.viewer_setup()\n+ self._viewers[mode] = self.viewer\n return self.viewer\n \n def get_body_com(self, body_name):\n", "issue": "MuJoCo render incompatible with newer versions of MuJoCo Py\nMujucoEnv's render method assumes the viewer has a 500x500 display. If it is larger than this, it will select pixels from the bottom-left corner when called with `mode='rgb_array'`. There's a comment which suggests this size used to be the default for mujoco-py, but with the latest mujoco-py it defaults to the screen resolution: see opengl_context.pyx:89 in _create_window.\r\n\r\nThis issue can be fixed by adding the line `self.viewer.opengl_context.set_buffer_size(W, H)` in `_get_viewer` to manually specify the desired frame size. (Oddly, I need to set both dimensions to twice the desired size, I think due to an overzealous workaround for Mac's on opengl_context.pyx:113.) But this is pretty hacky. \r\n\r\nI'm a little confused why we're rendering anything on the screen for `render(mode='rgb_array')`, which is typically used for video capture in a batch setting. In particular, MjViewer has the annoyance of showing an overlay menu by default, rarely desired for video capture. It might be better to use `mujoco_py.MjRenderContextOffscreen` instead for this purpose. If we allow render to be called with different modes, however, this would require making a breaking change to the interface for `viewer_setup` (or some hacking to make `self.viewer` point to different objects depending on the last call).\r\n\r\nI'd be happy to work on a pull request to address these issues if we can agree on a good way forward for this.\nMuJoCo render incompatible with newer versions of MuJoCo Py\nMujucoEnv's render method assumes the viewer has a 500x500 display. If it is larger than this, it will select pixels from the bottom-left corner when called with `mode='rgb_array'`. There's a comment which suggests this size used to be the default for mujoco-py, but with the latest mujoco-py it defaults to the screen resolution: see opengl_context.pyx:89 in _create_window.\r\n\r\nThis issue can be fixed by adding the line `self.viewer.opengl_context.set_buffer_size(W, H)` in `_get_viewer` to manually specify the desired frame size. (Oddly, I need to set both dimensions to twice the desired size, I think due to an overzealous workaround for Mac's on opengl_context.pyx:113.) But this is pretty hacky. \r\n\r\nI'm a little confused why we're rendering anything on the screen for `render(mode='rgb_array')`, which is typically used for video capture in a batch setting. In particular, MjViewer has the annoyance of showing an overlay menu by default, rarely desired for video capture. It might be better to use `mujoco_py.MjRenderContextOffscreen` instead for this purpose. If we allow render to be called with different modes, however, this would require making a breaking change to the interface for `viewer_setup` (or some hacking to make `self.viewer` point to different objects depending on the last call).\r\n\r\nI'd be happy to work on a pull request to address these issues if we can agree on a good way forward for this.\n", "before_files": [{"content": "import os\n\nfrom gym import error, spaces\nfrom gym.utils import seeding\nimport numpy as np\nfrom os import path\nimport gym\nimport six\n\ntry:\n import mujoco_py\nexcept ImportError as e:\n raise error.DependencyNotInstalled(\"{}. (HINT: you need to install mujoco_py, and also perform the setup instructions here: https://github.com/openai/mujoco-py/.)\".format(e))\n\nclass MujocoEnv(gym.Env):\n \"\"\"Superclass for all MuJoCo environments.\n \"\"\"\n\n def __init__(self, model_path, frame_skip):\n if model_path.startswith(\"/\"):\n fullpath = model_path\n else:\n fullpath = os.path.join(os.path.dirname(__file__), \"assets\", model_path)\n if not path.exists(fullpath):\n raise IOError(\"File %s does not exist\" % fullpath)\n self.frame_skip = frame_skip\n self.model = mujoco_py.load_model_from_path(fullpath)\n self.sim = mujoco_py.MjSim(self.model)\n self.data = self.sim.data\n self.viewer = None\n\n self.metadata = {\n 'render.modes': ['human', 'rgb_array'],\n 'video.frames_per_second': int(np.round(1.0 / self.dt))\n }\n\n self.init_qpos = self.sim.data.qpos.ravel().copy()\n self.init_qvel = self.sim.data.qvel.ravel().copy()\n observation, _reward, done, _info = self.step(np.zeros(self.model.nu))\n assert not done\n self.obs_dim = observation.size\n\n bounds = self.model.actuator_ctrlrange.copy()\n low = bounds[:, 0]\n high = bounds[:, 1]\n self.action_space = spaces.Box(low=low, high=high)\n\n high = np.inf*np.ones(self.obs_dim)\n low = -high\n self.observation_space = spaces.Box(low, high)\n\n self.seed()\n\n def seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]\n\n # methods to override:\n # ----------------------------\n\n def reset_model(self):\n \"\"\"\n Reset the robot degrees of freedom (qpos and qvel).\n Implement this in each subclass.\n \"\"\"\n raise NotImplementedError\n\n def viewer_setup(self):\n \"\"\"\n This method is called when the viewer is initialized and after every reset\n Optionally implement this method, if you need to tinker with camera position\n and so forth.\n \"\"\"\n pass\n\n # -----------------------------\n\n def reset(self):\n self.sim.reset()\n ob = self.reset_model()\n if self.viewer is not None:\n self.viewer_setup()\n return ob\n\n def set_state(self, qpos, qvel):\n assert qpos.shape == (self.model.nq,) and qvel.shape == (self.model.nv,)\n old_state = self.sim.get_state()\n new_state = mujoco_py.MjSimState(old_state.time, qpos, qvel,\n old_state.act, old_state.udd_state)\n self.sim.set_state(new_state)\n self.sim.forward()\n\n @property\n def dt(self):\n return self.model.opt.timestep * self.frame_skip\n\n def do_simulation(self, ctrl, n_frames):\n self.sim.data.ctrl[:] = ctrl\n for _ in range(n_frames):\n self.sim.step()\n\n def render(self, mode='human'):\n if mode == 'rgb_array':\n self._get_viewer().render()\n # window size used for old mujoco-py:\n width, height = 500, 500\n data = self._get_viewer().read_pixels(width, height, depth=False)\n # original image is upside-down, so flip it\n return data[::-1, :, :]\n elif mode == 'human':\n self._get_viewer().render()\n\n def close(self):\n if self.viewer is not None:\n # self.viewer.finish()\n self.viewer = None\n\n def _get_viewer(self):\n if self.viewer is None:\n self.viewer = mujoco_py.MjViewer(self.sim)\n self.viewer_setup()\n return self.viewer\n\n def get_body_com(self, body_name):\n return self.data.get_body_xpos(body_name)\n\n def state_vector(self):\n return np.concatenate([\n self.sim.data.qpos.flat,\n self.sim.data.qvel.flat\n ])\n", "path": "gym/envs/mujoco/mujoco_env.py"}], "after_files": [{"content": "import os\n\nfrom gym import error, spaces\nfrom gym.utils import seeding\nimport numpy as np\nfrom os import path\nimport gym\nimport six\n\ntry:\n import mujoco_py\nexcept ImportError as e:\n raise error.DependencyNotInstalled(\"{}. (HINT: you need to install mujoco_py, and also perform the setup instructions here: https://github.com/openai/mujoco-py/.)\".format(e))\n\nDEFAULT_SIZE = 500\n\nclass MujocoEnv(gym.Env):\n \"\"\"Superclass for all MuJoCo environments.\n \"\"\"\n\n def __init__(self, model_path, frame_skip):\n if model_path.startswith(\"/\"):\n fullpath = model_path\n else:\n fullpath = os.path.join(os.path.dirname(__file__), \"assets\", model_path)\n if not path.exists(fullpath):\n raise IOError(\"File %s does not exist\" % fullpath)\n self.frame_skip = frame_skip\n self.model = mujoco_py.load_model_from_path(fullpath)\n self.sim = mujoco_py.MjSim(self.model)\n self.data = self.sim.data\n self.viewer = None\n self._viewers = {}\n\n self.metadata = {\n 'render.modes': ['human', 'rgb_array'],\n 'video.frames_per_second': int(np.round(1.0 / self.dt))\n }\n\n self.init_qpos = self.sim.data.qpos.ravel().copy()\n self.init_qvel = self.sim.data.qvel.ravel().copy()\n observation, _reward, done, _info = self.step(np.zeros(self.model.nu))\n assert not done\n self.obs_dim = observation.size\n\n bounds = self.model.actuator_ctrlrange.copy()\n low = bounds[:, 0]\n high = bounds[:, 1]\n self.action_space = spaces.Box(low=low, high=high)\n\n high = np.inf*np.ones(self.obs_dim)\n low = -high\n self.observation_space = spaces.Box(low, high)\n\n self.seed()\n\n def seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]\n\n # methods to override:\n # ----------------------------\n\n def reset_model(self):\n \"\"\"\n Reset the robot degrees of freedom (qpos and qvel).\n Implement this in each subclass.\n \"\"\"\n raise NotImplementedError\n\n def viewer_setup(self):\n \"\"\"\n This method is called when the viewer is initialized and after every reset\n Optionally implement this method, if you need to tinker with camera position\n and so forth.\n \"\"\"\n pass\n\n # -----------------------------\n\n def reset(self):\n self.sim.reset()\n ob = self.reset_model()\n old_viewer = self.viewer\n for v in self._viewers.values():\n self.viewer = v\n self.viewer_setup()\n self.viewer = old_viewer\n return ob\n\n def set_state(self, qpos, qvel):\n assert qpos.shape == (self.model.nq,) and qvel.shape == (self.model.nv,)\n old_state = self.sim.get_state()\n new_state = mujoco_py.MjSimState(old_state.time, qpos, qvel,\n old_state.act, old_state.udd_state)\n self.sim.set_state(new_state)\n self.sim.forward()\n\n @property\n def dt(self):\n return self.model.opt.timestep * self.frame_skip\n\n def do_simulation(self, ctrl, n_frames):\n self.sim.data.ctrl[:] = ctrl\n for _ in range(n_frames):\n self.sim.step()\n\n def render(self, mode='human', width=DEFAULT_SIZE, height=DEFAULT_SIZE):\n if mode == 'rgb_array':\n self._get_viewer(mode).render(width, height)\n # window size used for old mujoco-py:\n data = self._get_viewer(mode).read_pixels(width, height, depth=False)\n # original image is upside-down, so flip it\n return data[::-1, :, :]\n elif mode == 'human':\n self._get_viewer(mode).render()\n\n def close(self):\n if self.viewer is not None:\n # self.viewer.finish()\n self.viewer = None\n self._viewers = {}\n\n def _get_viewer(self, mode):\n self.viewer = self._viewers.get(mode)\n if self.viewer is None:\n if mode == 'human':\n self.viewer = mujoco_py.MjViewer(self.sim)\n elif mode == 'rgb_array':\n self.viewer = mujoco_py.MjRenderContextOffscreen(self.sim, 0)\n self.viewer_setup()\n self._viewers[mode] = self.viewer\n return self.viewer\n\n def get_body_com(self, body_name):\n return self.data.get_body_xpos(body_name)\n\n def state_vector(self):\n return np.concatenate([\n self.sim.data.qpos.flat,\n self.sim.data.qvel.flat\n ])\n", "path": "gym/envs/mujoco/mujoco_env.py"}]} | 2,195 | 684 |
gh_patches_debug_15805 | rasdani/github-patches | git_diff | networkx__networkx-4655 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Incorrect link to the paper in katz_centrality documentation
### Current Behavior
Link [2] in katz_centrality documentation directs to the incorrect paper: 'Universal Behavior of Load Distribution in Scale-Free Networks'
### Expected Behavior
Link [2] in katz_centrality documentation directs to the correct paper: 'A New Status Index Derived from Sociometric Index'
### Steps to Reproduce
Open the page https://networkx.org/documentation/stable/reference/algorithms/generated/networkx.algorithms.centrality.katz_centrality.html
### Additional context
I guess the correct link is https://link.springer.com/content/pdf/10.1007/BF02289026.pdf
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `networkx/algorithms/centrality/katz.py`
Content:
```
1 """Katz centrality."""
2 from math import sqrt
3
4 import networkx as nx
5 from networkx.utils import not_implemented_for
6
7 __all__ = ["katz_centrality", "katz_centrality_numpy"]
8
9
10 @not_implemented_for("multigraph")
11 def katz_centrality(
12 G,
13 alpha=0.1,
14 beta=1.0,
15 max_iter=1000,
16 tol=1.0e-6,
17 nstart=None,
18 normalized=True,
19 weight=None,
20 ):
21 r"""Compute the Katz centrality for the nodes of the graph G.
22
23 Katz centrality computes the centrality for a node based on the centrality
24 of its neighbors. It is a generalization of the eigenvector centrality. The
25 Katz centrality for node $i$ is
26
27 .. math::
28
29 x_i = \alpha \sum_{j} A_{ij} x_j + \beta,
30
31 where $A$ is the adjacency matrix of graph G with eigenvalues $\lambda$.
32
33 The parameter $\beta$ controls the initial centrality and
34
35 .. math::
36
37 \alpha < \frac{1}{\lambda_{\max}}.
38
39 Katz centrality computes the relative influence of a node within a
40 network by measuring the number of the immediate neighbors (first
41 degree nodes) and also all other nodes in the network that connect
42 to the node under consideration through these immediate neighbors.
43
44 Extra weight can be provided to immediate neighbors through the
45 parameter $\beta$. Connections made with distant neighbors
46 are, however, penalized by an attenuation factor $\alpha$ which
47 should be strictly less than the inverse largest eigenvalue of the
48 adjacency matrix in order for the Katz centrality to be computed
49 correctly. More information is provided in [1]_.
50
51 Parameters
52 ----------
53 G : graph
54 A NetworkX graph.
55
56 alpha : float
57 Attenuation factor
58
59 beta : scalar or dictionary, optional (default=1.0)
60 Weight attributed to the immediate neighborhood. If not a scalar, the
61 dictionary must have an value for every node.
62
63 max_iter : integer, optional (default=1000)
64 Maximum number of iterations in power method.
65
66 tol : float, optional (default=1.0e-6)
67 Error tolerance used to check convergence in power method iteration.
68
69 nstart : dictionary, optional
70 Starting value of Katz iteration for each node.
71
72 normalized : bool, optional (default=True)
73 If True normalize the resulting values.
74
75 weight : None or string, optional (default=None)
76 If None, all edge weights are considered equal.
77 Otherwise holds the name of the edge attribute used as weight.
78 In this measure the weight is interpreted as the connection strength.
79
80 Returns
81 -------
82 nodes : dictionary
83 Dictionary of nodes with Katz centrality as the value.
84
85 Raises
86 ------
87 NetworkXError
88 If the parameter `beta` is not a scalar but lacks a value for at least
89 one node
90
91 PowerIterationFailedConvergence
92 If the algorithm fails to converge to the specified tolerance
93 within the specified number of iterations of the power iteration
94 method.
95
96 Examples
97 --------
98 >>> import math
99 >>> G = nx.path_graph(4)
100 >>> phi = (1 + math.sqrt(5)) / 2.0 # largest eigenvalue of adj matrix
101 >>> centrality = nx.katz_centrality(G, 1 / phi - 0.01)
102 >>> for n, c in sorted(centrality.items()):
103 ... print(f"{n} {c:.2f}")
104 0 0.37
105 1 0.60
106 2 0.60
107 3 0.37
108
109 See Also
110 --------
111 katz_centrality_numpy
112 eigenvector_centrality
113 eigenvector_centrality_numpy
114 pagerank
115 hits
116
117 Notes
118 -----
119 Katz centrality was introduced by [2]_.
120
121 This algorithm it uses the power method to find the eigenvector
122 corresponding to the largest eigenvalue of the adjacency matrix of ``G``.
123 The parameter ``alpha`` should be strictly less than the inverse of largest
124 eigenvalue of the adjacency matrix for the algorithm to converge.
125 You can use ``max(nx.adjacency_spectrum(G))`` to get $\lambda_{\max}$ the largest
126 eigenvalue of the adjacency matrix.
127 The iteration will stop after ``max_iter`` iterations or an error tolerance of
128 ``number_of_nodes(G) * tol`` has been reached.
129
130 When $\alpha = 1/\lambda_{\max}$ and $\beta=0$, Katz centrality is the same
131 as eigenvector centrality.
132
133 For directed graphs this finds "left" eigenvectors which corresponds
134 to the in-edges in the graph. For out-edges Katz centrality
135 first reverse the graph with ``G.reverse()``.
136
137 References
138 ----------
139 .. [1] Mark E. J. Newman:
140 Networks: An Introduction.
141 Oxford University Press, USA, 2010, p. 720.
142 .. [2] Leo Katz:
143 A New Status Index Derived from Sociometric Index.
144 Psychometrika 18(1):39–43, 1953
145 http://phya.snu.ac.kr/~dkim/PRL87278701.pdf
146 """
147 if len(G) == 0:
148 return {}
149
150 nnodes = G.number_of_nodes()
151
152 if nstart is None:
153 # choose starting vector with entries of 0
154 x = {n: 0 for n in G}
155 else:
156 x = nstart
157
158 try:
159 b = dict.fromkeys(G, float(beta))
160 except (TypeError, ValueError, AttributeError) as e:
161 b = beta
162 if set(beta) != set(G):
163 raise nx.NetworkXError(
164 "beta dictionary " "must have a value for every node"
165 ) from e
166
167 # make up to max_iter iterations
168 for i in range(max_iter):
169 xlast = x
170 x = dict.fromkeys(xlast, 0)
171 # do the multiplication y^T = Alpha * x^T A - Beta
172 for n in x:
173 for nbr in G[n]:
174 x[nbr] += xlast[n] * G[n][nbr].get(weight, 1)
175 for n in x:
176 x[n] = alpha * x[n] + b[n]
177
178 # check convergence
179 err = sum([abs(x[n] - xlast[n]) for n in x])
180 if err < nnodes * tol:
181 if normalized:
182 # normalize vector
183 try:
184 s = 1.0 / sqrt(sum(v ** 2 for v in x.values()))
185 # this should never be zero?
186 except ZeroDivisionError:
187 s = 1.0
188 else:
189 s = 1
190 for n in x:
191 x[n] *= s
192 return x
193 raise nx.PowerIterationFailedConvergence(max_iter)
194
195
196 @not_implemented_for("multigraph")
197 def katz_centrality_numpy(G, alpha=0.1, beta=1.0, normalized=True, weight=None):
198 r"""Compute the Katz centrality for the graph G.
199
200 Katz centrality computes the centrality for a node based on the centrality
201 of its neighbors. It is a generalization of the eigenvector centrality. The
202 Katz centrality for node $i$ is
203
204 .. math::
205
206 x_i = \alpha \sum_{j} A_{ij} x_j + \beta,
207
208 where $A$ is the adjacency matrix of graph G with eigenvalues $\lambda$.
209
210 The parameter $\beta$ controls the initial centrality and
211
212 .. math::
213
214 \alpha < \frac{1}{\lambda_{\max}}.
215
216 Katz centrality computes the relative influence of a node within a
217 network by measuring the number of the immediate neighbors (first
218 degree nodes) and also all other nodes in the network that connect
219 to the node under consideration through these immediate neighbors.
220
221 Extra weight can be provided to immediate neighbors through the
222 parameter $\beta$. Connections made with distant neighbors
223 are, however, penalized by an attenuation factor $\alpha$ which
224 should be strictly less than the inverse largest eigenvalue of the
225 adjacency matrix in order for the Katz centrality to be computed
226 correctly. More information is provided in [1]_.
227
228 Parameters
229 ----------
230 G : graph
231 A NetworkX graph
232
233 alpha : float
234 Attenuation factor
235
236 beta : scalar or dictionary, optional (default=1.0)
237 Weight attributed to the immediate neighborhood. If not a scalar the
238 dictionary must have an value for every node.
239
240 normalized : bool
241 If True normalize the resulting values.
242
243 weight : None or string, optional
244 If None, all edge weights are considered equal.
245 Otherwise holds the name of the edge attribute used as weight.
246 In this measure the weight is interpreted as the connection strength.
247
248 Returns
249 -------
250 nodes : dictionary
251 Dictionary of nodes with Katz centrality as the value.
252
253 Raises
254 ------
255 NetworkXError
256 If the parameter `beta` is not a scalar but lacks a value for at least
257 one node
258
259 Examples
260 --------
261 >>> import math
262 >>> G = nx.path_graph(4)
263 >>> phi = (1 + math.sqrt(5)) / 2.0 # largest eigenvalue of adj matrix
264 >>> centrality = nx.katz_centrality_numpy(G, 1 / phi)
265 >>> for n, c in sorted(centrality.items()):
266 ... print(f"{n} {c:.2f}")
267 0 0.37
268 1 0.60
269 2 0.60
270 3 0.37
271
272 See Also
273 --------
274 katz_centrality
275 eigenvector_centrality_numpy
276 eigenvector_centrality
277 pagerank
278 hits
279
280 Notes
281 -----
282 Katz centrality was introduced by [2]_.
283
284 This algorithm uses a direct linear solver to solve the above equation.
285 The parameter ``alpha`` should be strictly less than the inverse of largest
286 eigenvalue of the adjacency matrix for there to be a solution.
287 You can use ``max(nx.adjacency_spectrum(G))`` to get $\lambda_{\max}$ the largest
288 eigenvalue of the adjacency matrix.
289
290 When $\alpha = 1/\lambda_{\max}$ and $\beta=0$, Katz centrality is the same
291 as eigenvector centrality.
292
293 For directed graphs this finds "left" eigenvectors which corresponds
294 to the in-edges in the graph. For out-edges Katz centrality
295 first reverse the graph with ``G.reverse()``.
296
297 References
298 ----------
299 .. [1] Mark E. J. Newman:
300 Networks: An Introduction.
301 Oxford University Press, USA, 2010, p. 720.
302 .. [2] Leo Katz:
303 A New Status Index Derived from Sociometric Index.
304 Psychometrika 18(1):39–43, 1953
305 http://phya.snu.ac.kr/~dkim/PRL87278701.pdf
306 """
307 import numpy as np
308
309 if len(G) == 0:
310 return {}
311 try:
312 nodelist = beta.keys()
313 if set(nodelist) != set(G):
314 raise nx.NetworkXError(
315 "beta dictionary " "must have a value for every node"
316 )
317 b = np.array(list(beta.values()), dtype=float)
318 except AttributeError:
319 nodelist = list(G)
320 try:
321 b = np.ones((len(nodelist), 1)) * float(beta)
322 except (TypeError, ValueError, AttributeError) as e:
323 raise nx.NetworkXError("beta must be a number") from e
324
325 A = nx.adj_matrix(G, nodelist=nodelist, weight=weight).todense().T
326 n = A.shape[0]
327 centrality = np.linalg.solve(np.eye(n, n) - (alpha * A), b)
328 if normalized:
329 norm = np.sign(sum(centrality)) * np.linalg.norm(centrality)
330 else:
331 norm = 1.0
332 centrality = dict(zip(nodelist, map(float, centrality / norm)))
333 return centrality
334
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/networkx/algorithms/centrality/katz.py b/networkx/algorithms/centrality/katz.py
--- a/networkx/algorithms/centrality/katz.py
+++ b/networkx/algorithms/centrality/katz.py
@@ -142,7 +142,7 @@
.. [2] Leo Katz:
A New Status Index Derived from Sociometric Index.
Psychometrika 18(1):39–43, 1953
- http://phya.snu.ac.kr/~dkim/PRL87278701.pdf
+ https://link.springer.com/content/pdf/10.1007/BF02289026.pdf
"""
if len(G) == 0:
return {}
@@ -302,7 +302,7 @@
.. [2] Leo Katz:
A New Status Index Derived from Sociometric Index.
Psychometrika 18(1):39–43, 1953
- http://phya.snu.ac.kr/~dkim/PRL87278701.pdf
+ https://link.springer.com/content/pdf/10.1007/BF02289026.pdf
"""
import numpy as np
| {"golden_diff": "diff --git a/networkx/algorithms/centrality/katz.py b/networkx/algorithms/centrality/katz.py\n--- a/networkx/algorithms/centrality/katz.py\n+++ b/networkx/algorithms/centrality/katz.py\n@@ -142,7 +142,7 @@\n .. [2] Leo Katz:\n A New Status Index Derived from Sociometric Index.\n Psychometrika 18(1):39\u201343, 1953\n- http://phya.snu.ac.kr/~dkim/PRL87278701.pdf\n+ https://link.springer.com/content/pdf/10.1007/BF02289026.pdf\n \"\"\"\n if len(G) == 0:\n return {}\n@@ -302,7 +302,7 @@\n .. [2] Leo Katz:\n A New Status Index Derived from Sociometric Index.\n Psychometrika 18(1):39\u201343, 1953\n- http://phya.snu.ac.kr/~dkim/PRL87278701.pdf\n+ https://link.springer.com/content/pdf/10.1007/BF02289026.pdf\n \"\"\"\n import numpy as np\n", "issue": "Incorrect link to the paper in katz_centrality documentation\n### Current Behavior\r\n\r\nLink [2] in katz_centrality documentation directs to the incorrect paper: 'Universal Behavior of Load Distribution in Scale-Free Networks'\r\n\r\n### Expected Behavior\r\nLink [2] in katz_centrality documentation directs to the correct paper: 'A New Status Index Derived from Sociometric Index'\r\n\r\n### Steps to Reproduce\r\nOpen the page https://networkx.org/documentation/stable/reference/algorithms/generated/networkx.algorithms.centrality.katz_centrality.html\r\n\r\n### Additional context\r\nI guess the correct link is https://link.springer.com/content/pdf/10.1007/BF02289026.pdf\r\n\n", "before_files": [{"content": "\"\"\"Katz centrality.\"\"\"\nfrom math import sqrt\n\nimport networkx as nx\nfrom networkx.utils import not_implemented_for\n\n__all__ = [\"katz_centrality\", \"katz_centrality_numpy\"]\n\n\n@not_implemented_for(\"multigraph\")\ndef katz_centrality(\n G,\n alpha=0.1,\n beta=1.0,\n max_iter=1000,\n tol=1.0e-6,\n nstart=None,\n normalized=True,\n weight=None,\n):\n r\"\"\"Compute the Katz centrality for the nodes of the graph G.\n\n Katz centrality computes the centrality for a node based on the centrality\n of its neighbors. It is a generalization of the eigenvector centrality. The\n Katz centrality for node $i$ is\n\n .. math::\n\n x_i = \\alpha \\sum_{j} A_{ij} x_j + \\beta,\n\n where $A$ is the adjacency matrix of graph G with eigenvalues $\\lambda$.\n\n The parameter $\\beta$ controls the initial centrality and\n\n .. math::\n\n \\alpha < \\frac{1}{\\lambda_{\\max}}.\n\n Katz centrality computes the relative influence of a node within a\n network by measuring the number of the immediate neighbors (first\n degree nodes) and also all other nodes in the network that connect\n to the node under consideration through these immediate neighbors.\n\n Extra weight can be provided to immediate neighbors through the\n parameter $\\beta$. Connections made with distant neighbors\n are, however, penalized by an attenuation factor $\\alpha$ which\n should be strictly less than the inverse largest eigenvalue of the\n adjacency matrix in order for the Katz centrality to be computed\n correctly. More information is provided in [1]_.\n\n Parameters\n ----------\n G : graph\n A NetworkX graph.\n\n alpha : float\n Attenuation factor\n\n beta : scalar or dictionary, optional (default=1.0)\n Weight attributed to the immediate neighborhood. If not a scalar, the\n dictionary must have an value for every node.\n\n max_iter : integer, optional (default=1000)\n Maximum number of iterations in power method.\n\n tol : float, optional (default=1.0e-6)\n Error tolerance used to check convergence in power method iteration.\n\n nstart : dictionary, optional\n Starting value of Katz iteration for each node.\n\n normalized : bool, optional (default=True)\n If True normalize the resulting values.\n\n weight : None or string, optional (default=None)\n If None, all edge weights are considered equal.\n Otherwise holds the name of the edge attribute used as weight.\n In this measure the weight is interpreted as the connection strength.\n\n Returns\n -------\n nodes : dictionary\n Dictionary of nodes with Katz centrality as the value.\n\n Raises\n ------\n NetworkXError\n If the parameter `beta` is not a scalar but lacks a value for at least\n one node\n\n PowerIterationFailedConvergence\n If the algorithm fails to converge to the specified tolerance\n within the specified number of iterations of the power iteration\n method.\n\n Examples\n --------\n >>> import math\n >>> G = nx.path_graph(4)\n >>> phi = (1 + math.sqrt(5)) / 2.0 # largest eigenvalue of adj matrix\n >>> centrality = nx.katz_centrality(G, 1 / phi - 0.01)\n >>> for n, c in sorted(centrality.items()):\n ... print(f\"{n} {c:.2f}\")\n 0 0.37\n 1 0.60\n 2 0.60\n 3 0.37\n\n See Also\n --------\n katz_centrality_numpy\n eigenvector_centrality\n eigenvector_centrality_numpy\n pagerank\n hits\n\n Notes\n -----\n Katz centrality was introduced by [2]_.\n\n This algorithm it uses the power method to find the eigenvector\n corresponding to the largest eigenvalue of the adjacency matrix of ``G``.\n The parameter ``alpha`` should be strictly less than the inverse of largest\n eigenvalue of the adjacency matrix for the algorithm to converge.\n You can use ``max(nx.adjacency_spectrum(G))`` to get $\\lambda_{\\max}$ the largest\n eigenvalue of the adjacency matrix.\n The iteration will stop after ``max_iter`` iterations or an error tolerance of\n ``number_of_nodes(G) * tol`` has been reached.\n\n When $\\alpha = 1/\\lambda_{\\max}$ and $\\beta=0$, Katz centrality is the same\n as eigenvector centrality.\n\n For directed graphs this finds \"left\" eigenvectors which corresponds\n to the in-edges in the graph. For out-edges Katz centrality\n first reverse the graph with ``G.reverse()``.\n\n References\n ----------\n .. [1] Mark E. J. Newman:\n Networks: An Introduction.\n Oxford University Press, USA, 2010, p. 720.\n .. [2] Leo Katz:\n A New Status Index Derived from Sociometric Index.\n Psychometrika 18(1):39\u201343, 1953\n http://phya.snu.ac.kr/~dkim/PRL87278701.pdf\n \"\"\"\n if len(G) == 0:\n return {}\n\n nnodes = G.number_of_nodes()\n\n if nstart is None:\n # choose starting vector with entries of 0\n x = {n: 0 for n in G}\n else:\n x = nstart\n\n try:\n b = dict.fromkeys(G, float(beta))\n except (TypeError, ValueError, AttributeError) as e:\n b = beta\n if set(beta) != set(G):\n raise nx.NetworkXError(\n \"beta dictionary \" \"must have a value for every node\"\n ) from e\n\n # make up to max_iter iterations\n for i in range(max_iter):\n xlast = x\n x = dict.fromkeys(xlast, 0)\n # do the multiplication y^T = Alpha * x^T A - Beta\n for n in x:\n for nbr in G[n]:\n x[nbr] += xlast[n] * G[n][nbr].get(weight, 1)\n for n in x:\n x[n] = alpha * x[n] + b[n]\n\n # check convergence\n err = sum([abs(x[n] - xlast[n]) for n in x])\n if err < nnodes * tol:\n if normalized:\n # normalize vector\n try:\n s = 1.0 / sqrt(sum(v ** 2 for v in x.values()))\n # this should never be zero?\n except ZeroDivisionError:\n s = 1.0\n else:\n s = 1\n for n in x:\n x[n] *= s\n return x\n raise nx.PowerIterationFailedConvergence(max_iter)\n\n\n@not_implemented_for(\"multigraph\")\ndef katz_centrality_numpy(G, alpha=0.1, beta=1.0, normalized=True, weight=None):\n r\"\"\"Compute the Katz centrality for the graph G.\n\n Katz centrality computes the centrality for a node based on the centrality\n of its neighbors. It is a generalization of the eigenvector centrality. The\n Katz centrality for node $i$ is\n\n .. math::\n\n x_i = \\alpha \\sum_{j} A_{ij} x_j + \\beta,\n\n where $A$ is the adjacency matrix of graph G with eigenvalues $\\lambda$.\n\n The parameter $\\beta$ controls the initial centrality and\n\n .. math::\n\n \\alpha < \\frac{1}{\\lambda_{\\max}}.\n\n Katz centrality computes the relative influence of a node within a\n network by measuring the number of the immediate neighbors (first\n degree nodes) and also all other nodes in the network that connect\n to the node under consideration through these immediate neighbors.\n\n Extra weight can be provided to immediate neighbors through the\n parameter $\\beta$. Connections made with distant neighbors\n are, however, penalized by an attenuation factor $\\alpha$ which\n should be strictly less than the inverse largest eigenvalue of the\n adjacency matrix in order for the Katz centrality to be computed\n correctly. More information is provided in [1]_.\n\n Parameters\n ----------\n G : graph\n A NetworkX graph\n\n alpha : float\n Attenuation factor\n\n beta : scalar or dictionary, optional (default=1.0)\n Weight attributed to the immediate neighborhood. If not a scalar the\n dictionary must have an value for every node.\n\n normalized : bool\n If True normalize the resulting values.\n\n weight : None or string, optional\n If None, all edge weights are considered equal.\n Otherwise holds the name of the edge attribute used as weight.\n In this measure the weight is interpreted as the connection strength.\n\n Returns\n -------\n nodes : dictionary\n Dictionary of nodes with Katz centrality as the value.\n\n Raises\n ------\n NetworkXError\n If the parameter `beta` is not a scalar but lacks a value for at least\n one node\n\n Examples\n --------\n >>> import math\n >>> G = nx.path_graph(4)\n >>> phi = (1 + math.sqrt(5)) / 2.0 # largest eigenvalue of adj matrix\n >>> centrality = nx.katz_centrality_numpy(G, 1 / phi)\n >>> for n, c in sorted(centrality.items()):\n ... print(f\"{n} {c:.2f}\")\n 0 0.37\n 1 0.60\n 2 0.60\n 3 0.37\n\n See Also\n --------\n katz_centrality\n eigenvector_centrality_numpy\n eigenvector_centrality\n pagerank\n hits\n\n Notes\n -----\n Katz centrality was introduced by [2]_.\n\n This algorithm uses a direct linear solver to solve the above equation.\n The parameter ``alpha`` should be strictly less than the inverse of largest\n eigenvalue of the adjacency matrix for there to be a solution.\n You can use ``max(nx.adjacency_spectrum(G))`` to get $\\lambda_{\\max}$ the largest\n eigenvalue of the adjacency matrix.\n\n When $\\alpha = 1/\\lambda_{\\max}$ and $\\beta=0$, Katz centrality is the same\n as eigenvector centrality.\n\n For directed graphs this finds \"left\" eigenvectors which corresponds\n to the in-edges in the graph. For out-edges Katz centrality\n first reverse the graph with ``G.reverse()``.\n\n References\n ----------\n .. [1] Mark E. J. Newman:\n Networks: An Introduction.\n Oxford University Press, USA, 2010, p. 720.\n .. [2] Leo Katz:\n A New Status Index Derived from Sociometric Index.\n Psychometrika 18(1):39\u201343, 1953\n http://phya.snu.ac.kr/~dkim/PRL87278701.pdf\n \"\"\"\n import numpy as np\n\n if len(G) == 0:\n return {}\n try:\n nodelist = beta.keys()\n if set(nodelist) != set(G):\n raise nx.NetworkXError(\n \"beta dictionary \" \"must have a value for every node\"\n )\n b = np.array(list(beta.values()), dtype=float)\n except AttributeError:\n nodelist = list(G)\n try:\n b = np.ones((len(nodelist), 1)) * float(beta)\n except (TypeError, ValueError, AttributeError) as e:\n raise nx.NetworkXError(\"beta must be a number\") from e\n\n A = nx.adj_matrix(G, nodelist=nodelist, weight=weight).todense().T\n n = A.shape[0]\n centrality = np.linalg.solve(np.eye(n, n) - (alpha * A), b)\n if normalized:\n norm = np.sign(sum(centrality)) * np.linalg.norm(centrality)\n else:\n norm = 1.0\n centrality = dict(zip(nodelist, map(float, centrality / norm)))\n return centrality\n", "path": "networkx/algorithms/centrality/katz.py"}], "after_files": [{"content": "\"\"\"Katz centrality.\"\"\"\nfrom math import sqrt\n\nimport networkx as nx\nfrom networkx.utils import not_implemented_for\n\n__all__ = [\"katz_centrality\", \"katz_centrality_numpy\"]\n\n\n@not_implemented_for(\"multigraph\")\ndef katz_centrality(\n G,\n alpha=0.1,\n beta=1.0,\n max_iter=1000,\n tol=1.0e-6,\n nstart=None,\n normalized=True,\n weight=None,\n):\n r\"\"\"Compute the Katz centrality for the nodes of the graph G.\n\n Katz centrality computes the centrality for a node based on the centrality\n of its neighbors. It is a generalization of the eigenvector centrality. The\n Katz centrality for node $i$ is\n\n .. math::\n\n x_i = \\alpha \\sum_{j} A_{ij} x_j + \\beta,\n\n where $A$ is the adjacency matrix of graph G with eigenvalues $\\lambda$.\n\n The parameter $\\beta$ controls the initial centrality and\n\n .. math::\n\n \\alpha < \\frac{1}{\\lambda_{\\max}}.\n\n Katz centrality computes the relative influence of a node within a\n network by measuring the number of the immediate neighbors (first\n degree nodes) and also all other nodes in the network that connect\n to the node under consideration through these immediate neighbors.\n\n Extra weight can be provided to immediate neighbors through the\n parameter $\\beta$. Connections made with distant neighbors\n are, however, penalized by an attenuation factor $\\alpha$ which\n should be strictly less than the inverse largest eigenvalue of the\n adjacency matrix in order for the Katz centrality to be computed\n correctly. More information is provided in [1]_.\n\n Parameters\n ----------\n G : graph\n A NetworkX graph.\n\n alpha : float\n Attenuation factor\n\n beta : scalar or dictionary, optional (default=1.0)\n Weight attributed to the immediate neighborhood. If not a scalar, the\n dictionary must have an value for every node.\n\n max_iter : integer, optional (default=1000)\n Maximum number of iterations in power method.\n\n tol : float, optional (default=1.0e-6)\n Error tolerance used to check convergence in power method iteration.\n\n nstart : dictionary, optional\n Starting value of Katz iteration for each node.\n\n normalized : bool, optional (default=True)\n If True normalize the resulting values.\n\n weight : None or string, optional (default=None)\n If None, all edge weights are considered equal.\n Otherwise holds the name of the edge attribute used as weight.\n In this measure the weight is interpreted as the connection strength.\n\n Returns\n -------\n nodes : dictionary\n Dictionary of nodes with Katz centrality as the value.\n\n Raises\n ------\n NetworkXError\n If the parameter `beta` is not a scalar but lacks a value for at least\n one node\n\n PowerIterationFailedConvergence\n If the algorithm fails to converge to the specified tolerance\n within the specified number of iterations of the power iteration\n method.\n\n Examples\n --------\n >>> import math\n >>> G = nx.path_graph(4)\n >>> phi = (1 + math.sqrt(5)) / 2.0 # largest eigenvalue of adj matrix\n >>> centrality = nx.katz_centrality(G, 1 / phi - 0.01)\n >>> for n, c in sorted(centrality.items()):\n ... print(f\"{n} {c:.2f}\")\n 0 0.37\n 1 0.60\n 2 0.60\n 3 0.37\n\n See Also\n --------\n katz_centrality_numpy\n eigenvector_centrality\n eigenvector_centrality_numpy\n pagerank\n hits\n\n Notes\n -----\n Katz centrality was introduced by [2]_.\n\n This algorithm it uses the power method to find the eigenvector\n corresponding to the largest eigenvalue of the adjacency matrix of ``G``.\n The parameter ``alpha`` should be strictly less than the inverse of largest\n eigenvalue of the adjacency matrix for the algorithm to converge.\n You can use ``max(nx.adjacency_spectrum(G))`` to get $\\lambda_{\\max}$ the largest\n eigenvalue of the adjacency matrix.\n The iteration will stop after ``max_iter`` iterations or an error tolerance of\n ``number_of_nodes(G) * tol`` has been reached.\n\n When $\\alpha = 1/\\lambda_{\\max}$ and $\\beta=0$, Katz centrality is the same\n as eigenvector centrality.\n\n For directed graphs this finds \"left\" eigenvectors which corresponds\n to the in-edges in the graph. For out-edges Katz centrality\n first reverse the graph with ``G.reverse()``.\n\n References\n ----------\n .. [1] Mark E. J. Newman:\n Networks: An Introduction.\n Oxford University Press, USA, 2010, p. 720.\n .. [2] Leo Katz:\n A New Status Index Derived from Sociometric Index.\n Psychometrika 18(1):39\u201343, 1953\n https://link.springer.com/content/pdf/10.1007/BF02289026.pdf\n \"\"\"\n if len(G) == 0:\n return {}\n\n nnodes = G.number_of_nodes()\n\n if nstart is None:\n # choose starting vector with entries of 0\n x = {n: 0 for n in G}\n else:\n x = nstart\n\n try:\n b = dict.fromkeys(G, float(beta))\n except (TypeError, ValueError, AttributeError) as e:\n b = beta\n if set(beta) != set(G):\n raise nx.NetworkXError(\n \"beta dictionary \" \"must have a value for every node\"\n ) from e\n\n # make up to max_iter iterations\n for i in range(max_iter):\n xlast = x\n x = dict.fromkeys(xlast, 0)\n # do the multiplication y^T = Alpha * x^T A - Beta\n for n in x:\n for nbr in G[n]:\n x[nbr] += xlast[n] * G[n][nbr].get(weight, 1)\n for n in x:\n x[n] = alpha * x[n] + b[n]\n\n # check convergence\n err = sum([abs(x[n] - xlast[n]) for n in x])\n if err < nnodes * tol:\n if normalized:\n # normalize vector\n try:\n s = 1.0 / sqrt(sum(v ** 2 for v in x.values()))\n # this should never be zero?\n except ZeroDivisionError:\n s = 1.0\n else:\n s = 1\n for n in x:\n x[n] *= s\n return x\n raise nx.PowerIterationFailedConvergence(max_iter)\n\n\n@not_implemented_for(\"multigraph\")\ndef katz_centrality_numpy(G, alpha=0.1, beta=1.0, normalized=True, weight=None):\n r\"\"\"Compute the Katz centrality for the graph G.\n\n Katz centrality computes the centrality for a node based on the centrality\n of its neighbors. It is a generalization of the eigenvector centrality. The\n Katz centrality for node $i$ is\n\n .. math::\n\n x_i = \\alpha \\sum_{j} A_{ij} x_j + \\beta,\n\n where $A$ is the adjacency matrix of graph G with eigenvalues $\\lambda$.\n\n The parameter $\\beta$ controls the initial centrality and\n\n .. math::\n\n \\alpha < \\frac{1}{\\lambda_{\\max}}.\n\n Katz centrality computes the relative influence of a node within a\n network by measuring the number of the immediate neighbors (first\n degree nodes) and also all other nodes in the network that connect\n to the node under consideration through these immediate neighbors.\n\n Extra weight can be provided to immediate neighbors through the\n parameter $\\beta$. Connections made with distant neighbors\n are, however, penalized by an attenuation factor $\\alpha$ which\n should be strictly less than the inverse largest eigenvalue of the\n adjacency matrix in order for the Katz centrality to be computed\n correctly. More information is provided in [1]_.\n\n Parameters\n ----------\n G : graph\n A NetworkX graph\n\n alpha : float\n Attenuation factor\n\n beta : scalar or dictionary, optional (default=1.0)\n Weight attributed to the immediate neighborhood. If not a scalar the\n dictionary must have an value for every node.\n\n normalized : bool\n If True normalize the resulting values.\n\n weight : None or string, optional\n If None, all edge weights are considered equal.\n Otherwise holds the name of the edge attribute used as weight.\n In this measure the weight is interpreted as the connection strength.\n\n Returns\n -------\n nodes : dictionary\n Dictionary of nodes with Katz centrality as the value.\n\n Raises\n ------\n NetworkXError\n If the parameter `beta` is not a scalar but lacks a value for at least\n one node\n\n Examples\n --------\n >>> import math\n >>> G = nx.path_graph(4)\n >>> phi = (1 + math.sqrt(5)) / 2.0 # largest eigenvalue of adj matrix\n >>> centrality = nx.katz_centrality_numpy(G, 1 / phi)\n >>> for n, c in sorted(centrality.items()):\n ... print(f\"{n} {c:.2f}\")\n 0 0.37\n 1 0.60\n 2 0.60\n 3 0.37\n\n See Also\n --------\n katz_centrality\n eigenvector_centrality_numpy\n eigenvector_centrality\n pagerank\n hits\n\n Notes\n -----\n Katz centrality was introduced by [2]_.\n\n This algorithm uses a direct linear solver to solve the above equation.\n The parameter ``alpha`` should be strictly less than the inverse of largest\n eigenvalue of the adjacency matrix for there to be a solution.\n You can use ``max(nx.adjacency_spectrum(G))`` to get $\\lambda_{\\max}$ the largest\n eigenvalue of the adjacency matrix.\n\n When $\\alpha = 1/\\lambda_{\\max}$ and $\\beta=0$, Katz centrality is the same\n as eigenvector centrality.\n\n For directed graphs this finds \"left\" eigenvectors which corresponds\n to the in-edges in the graph. For out-edges Katz centrality\n first reverse the graph with ``G.reverse()``.\n\n References\n ----------\n .. [1] Mark E. J. Newman:\n Networks: An Introduction.\n Oxford University Press, USA, 2010, p. 720.\n .. [2] Leo Katz:\n A New Status Index Derived from Sociometric Index.\n Psychometrika 18(1):39\u201343, 1953\n https://link.springer.com/content/pdf/10.1007/BF02289026.pdf\n \"\"\"\n import numpy as np\n\n if len(G) == 0:\n return {}\n try:\n nodelist = beta.keys()\n if set(nodelist) != set(G):\n raise nx.NetworkXError(\n \"beta dictionary \" \"must have a value for every node\"\n )\n b = np.array(list(beta.values()), dtype=float)\n except AttributeError:\n nodelist = list(G)\n try:\n b = np.ones((len(nodelist), 1)) * float(beta)\n except (TypeError, ValueError, AttributeError) as e:\n raise nx.NetworkXError(\"beta must be a number\") from e\n\n A = nx.adj_matrix(G, nodelist=nodelist, weight=weight).todense().T\n n = A.shape[0]\n centrality = np.linalg.solve(np.eye(n, n) - (alpha * A), b)\n if normalized:\n norm = np.sign(sum(centrality)) * np.linalg.norm(centrality)\n else:\n norm = 1.0\n centrality = dict(zip(nodelist, map(float, centrality / norm)))\n return centrality\n", "path": "networkx/algorithms/centrality/katz.py"}]} | 4,091 | 298 |
gh_patches_debug_21276 | rasdani/github-patches | git_diff | privacyidea__privacyidea-1981 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add some diagrams to the documentation
It would be nice to have a few diagrams in the documentation. Specifically I think a deployment diagram giving an example for the components involved in a typical use of push tokens, as well as sequence diagrams for the three authentication modes would be nice.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `doc/conf.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # privacyIDEA documentation build configuration file, created by
4 # sphinx-quickstart on Fri Jun 13 07:31:01 2014.
5 #
6 # This file is execfile()d with the current directory set to its containing dir.
7 #
8 # Note that not all possible configuration values are present in this
9 # autogenerated file.
10 #
11 # All configuration values have a default; values that are commented out
12 # serve to show the default.
13
14
15 # The version info for the project you're documenting, acts as replacement for
16 # |version| and |release|, also used in various other places throughout the
17 # built documents.
18 #
19 # The short X.Y version.
20 version = '3.2'
21 # The full version, including alpha/beta/rc tags.
22 #release = '2.16dev5'
23 release = version
24
25
26 import sys
27 import os
28
29 # Monkey-patch functools.wraps
30 # http://stackoverflow.com/questions/28366818/preserve-default-arguments-of-wrapped-decorated-python-function-in-sphinx-docume
31 import functools
32
33 def no_op_wraps(func, assigned=None, updated=None):
34 """Replaces functools.wraps in order to undo wrapping.
35
36 Can be used to preserve the decorated function's signature
37 in the documentation generated by Sphinx.
38
39 """
40 def wrapper(decorator):
41 return func
42 return wrapper
43
44 functools.wraps = no_op_wraps
45
46 # If extensions (or modules to document with autodoc) are in another directory,
47 # add these directories to sys.path here. If the directory is relative to the
48 # documentation root, use os.path.abspath to make it absolute, like shown here.
49 sys.path.insert(0, os.path.abspath('..'))
50 sys.path.append(os.path.abspath('_themes/flask-sphinx-themes'))
51 sys.path.insert(0, os.path.abspath('../privacyidea'))
52
53 # -- General configuration -----------------------------------------------------
54
55 # If your documentation needs a minimal Sphinx version, state it here.
56 #needs_sphinx = '1.0'
57
58 # Add any Sphinx extension module names here, as strings. They can be extensions
59 # coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
60 extensions = ['sphinx.ext.autodoc', 'sphinx.ext.imgmath', 'sphinx.ext.viewcode',
61 'sphinxcontrib.autohttp.flask']
62 http_index_ignore_prefixes = ['/token']
63
64 # Add any paths that contain templates here, relative to this directory.
65 templates_path = ['_templates']
66
67 # The suffix of source filenames.
68 source_suffix = '.rst'
69
70 # The encoding of source files.
71 #source_encoding = 'utf-8-sig'
72
73 # The master toctree document.
74 master_doc = 'index'
75
76 # General information about the project.
77 project = u'privacyIDEA'
78 copyright = u'2014-2019, Cornelius Kölbel'
79
80 # The language for content autogenerated by Sphinx. Refer to documentation
81 # for a list of supported languages.
82 #language = None
83
84 # There are two options for replacing |today|: either, you set today to some
85 # non-false value, then it is used:
86 #today = ''
87 # Else, today_fmt is used as the format for a strftime call.
88 #today_fmt = '%B %d, %Y'
89
90 # List of patterns, relative to source directory, that match files and
91 # directories to ignore when looking for source files.
92 exclude_patterns = ['_build']
93
94 # The reST default role (used for this markup: `text`) to use for all documents.
95 #default_role = None
96
97 # If true, '()' will be appended to :func: etc. cross-reference text.
98 add_function_parentheses = True
99
100 # If true, the current module name will be prepended to all description
101 # unit titles (such as .. function::).
102 #add_module_names = True
103
104 # If true, sectionauthor and moduleauthor directives will be shown in the
105 # output. They are ignored by default.
106 #show_authors = False
107
108 # The name of the Pygments (syntax highlighting) style to use.
109 pygments_style = 'sphinx'
110
111 # A list of ignored prefixes for module index sorting.
112 #modindex_common_prefix = []
113
114
115 # -- Options for HTML output ---------------------------------------------------
116
117 # The theme to use for HTML and HTML Help pages. See the documentation for
118 # a list of builtin themes.
119 #html_theme = 'sphinxdoc'
120 #html_theme = 'sphinx_rtd_theme'
121 #html_theme = 'agogo'
122 html_theme = 'flask'
123
124 # Theme options are theme-specific and customize the look and feel of a theme
125 # further. For a list of options available for each theme, see the
126 # documentation.
127 #html_theme_options = {}
128
129 # Add any paths that contain custom themes here, relative to this directory.
130 html_theme_path = ['_themes/flask-sphinx-themes']
131
132
133 # The name for this set of Sphinx documents. If None, it defaults to
134 # "<project> v<release> documentation".
135 #html_title = None
136
137 # A shorter title for the navigation bar. Default is the same as html_title.
138 #html_short_title = None
139
140 # The name of an image file (relative to this directory) to place at the top
141 # of the sidebar.
142 html_logo = "images/privacyidea-color.png"
143
144 # The name of an image file (within the static path) to use as favicon of the
145 # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
146 # pixels large.
147 #html_favicon = None
148
149 # Add any paths that contain custom static files (such as style sheets) here,
150 # relative to this directory. They are copied after the builtin static files,
151 # so a file named "default.css" will overwrite the builtin "default.css".
152 html_static_path = ['_static']
153
154 html_css_files = [
155 'css/custom.css',
156 ]
157
158 # If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
159 # using the given strftime format.
160 #html_last_updated_fmt = '%b %d, %Y'
161
162 # If true, SmartyPants will be used to convert quotes and dashes to
163 # typographically correct entities.
164 #html_use_smartypants = True
165
166 # Custom sidebar templates, maps document names to template names.
167 #html_sidebars = {}
168
169 # Additional templates that should be rendered to pages, maps page names to
170 # template names.
171 #html_additional_pages = {}
172
173 # If false, no module index is generated.
174 #html_domain_indices = True
175
176 # If false, no index is generated.
177 #html_use_index = True
178
179 # If true, the index is split into individual pages for each letter.
180 #html_split_index = False
181
182 # If true, links to the reST sources are added to the pages.
183 #html_show_sourcelink = True
184
185 # If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
186 #html_show_sphinx = True
187
188 # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
189 #html_show_copyright = True
190
191 # If true, an OpenSearch description file will be output, and all pages will
192 # contain a <link> tag referring to it. The value of this option must be the
193 # base URL from which the finished HTML is served.
194 #html_use_opensearch = ''
195
196 # This is the file name suffix for HTML files (e.g. ".xhtml").
197 #html_file_suffix = None
198
199 # Output file base name for HTML help builder.
200 htmlhelp_basename = 'privacyIDEAdoc'
201
202
203 # -- Options for LaTeX output --------------------------------------------------
204
205 latex_elements = {
206 # The paper size ('letterpaper' or 'a4paper').
207 #'papersize': 'letterpaper',
208
209 # The font size ('10pt', '11pt' or '12pt').
210 #'pointsize': '10pt',
211
212 # Additional stuff for the LaTeX preamble.
213 #'preamble': '',
214 }
215
216 # Grouping the document tree into LaTeX files. List of tuples
217 # (source start file, target name, title, author, documentclass [howto/manual]).
218 latex_documents = [
219 ('index', 'privacyIDEA.tex', u'privacyIDEA Authentication System',
220 u'Cornelius Kölbel', 'manual'),
221 ]
222
223 # The name of an image file (relative to this directory) to place at the top of
224 # the title page.
225 #latex_logo = None
226
227 # For "manual" documents, if this is true, then toplevel headings are parts,
228 # not chapters.
229 #latex_use_parts = False
230
231 # If true, show page references after internal links.
232 #latex_show_pagerefs = False
233
234 # If true, show URL addresses after external links.
235 #latex_show_urls = False
236
237 # Documents to append as an appendix to all manuals.
238 #latex_appendices = []
239
240 # If false, no module index is generated.
241 #latex_domain_indices = True
242
243
244 # -- Options for manual page output --------------------------------------------
245
246 # One entry per manual page. List of tuples
247 # (source start file, name, description, authors, manual section).
248 man_pages = [
249 ('index', 'privacyidea-server', u'privacyIDEA Authentication System',
250 [u'Cornelius Kölbel'], 1)
251 ]
252
253 # If true, show URL addresses after external links.
254 #man_show_urls = False
255
256
257 # -- Options for Texinfo output ------------------------------------------------
258
259 # Grouping the document tree into Texinfo files. List of tuples
260 # (source start file, target name, title, author,
261 # dir menu entry, description, category)
262 texinfo_documents = [
263 ('index', 'privacyIDEA', u'privacyIDEA AUthentication System',
264 u'Cornelius Kölbel', 'privacyIDEA', 'One line description of project.',
265 'Miscellaneous'),
266 ]
267
268 # Documents to append as an appendix to all manuals.
269 #texinfo_appendices = []
270
271 # If false, no module index is generated.
272 #texinfo_domain_indices = True
273
274 # How to display URL addresses: 'footnote', 'no', or 'inline'.
275 #texinfo_show_urls = 'footnote'
276
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/doc/conf.py b/doc/conf.py
--- a/doc/conf.py
+++ b/doc/conf.py
@@ -58,7 +58,7 @@
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.imgmath', 'sphinx.ext.viewcode',
- 'sphinxcontrib.autohttp.flask']
+ 'sphinxcontrib.autohttp.flask', 'sphinxcontrib.plantuml']
http_index_ignore_prefixes = ['/token']
# Add any paths that contain templates here, relative to this directory.
@@ -273,3 +273,13 @@
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
+
+#
+# PlantUML
+#
+
+# Run plantUML under Java in headless mode. This is needed for compatibility with readthedocs.io.
+plantuml = 'java -Djava.awt.headless=true -jar /usr/share/plantuml/plantuml.jar'
+
+# Use SVG inside <object> in supported browsers (all except IE8), falling back to PNG.
+plantuml_output_format = 'svg'
| {"golden_diff": "diff --git a/doc/conf.py b/doc/conf.py\n--- a/doc/conf.py\n+++ b/doc/conf.py\n@@ -58,7 +58,7 @@\n # Add any Sphinx extension module names here, as strings. They can be extensions\n # coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\n extensions = ['sphinx.ext.autodoc', 'sphinx.ext.imgmath', 'sphinx.ext.viewcode', \n- 'sphinxcontrib.autohttp.flask']\n+ 'sphinxcontrib.autohttp.flask', 'sphinxcontrib.plantuml']\n http_index_ignore_prefixes = ['/token']\n \n # Add any paths that contain templates here, relative to this directory.\n@@ -273,3 +273,13 @@\n \n # How to display URL addresses: 'footnote', 'no', or 'inline'.\n #texinfo_show_urls = 'footnote'\n+\n+#\n+# PlantUML\n+#\n+\n+# Run plantUML under Java in headless mode. This is needed for compatibility with readthedocs.io.\n+plantuml = 'java -Djava.awt.headless=true -jar /usr/share/plantuml/plantuml.jar'\n+\n+# Use SVG inside <object> in supported browsers (all except IE8), falling back to PNG.\n+plantuml_output_format = 'svg'\n", "issue": "Add some diagrams to the documentation\nIt would be nice to have a few diagrams in the documentation. Specifically I think a deployment diagram giving an example for the components involved in a typical use of push tokens, as well as sequence diagrams for the three authentication modes would be nice.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# privacyIDEA documentation build configuration file, created by\n# sphinx-quickstart on Fri Jun 13 07:31:01 2014.\n#\n# This file is execfile()d with the current directory set to its containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = '3.2'\n# The full version, including alpha/beta/rc tags.\n#release = '2.16dev5'\nrelease = version\n\n\nimport sys\nimport os\n\n# Monkey-patch functools.wraps\n# http://stackoverflow.com/questions/28366818/preserve-default-arguments-of-wrapped-decorated-python-function-in-sphinx-docume\nimport functools\n\ndef no_op_wraps(func, assigned=None, updated=None):\n \"\"\"Replaces functools.wraps in order to undo wrapping.\n\n Can be used to preserve the decorated function's signature\n in the documentation generated by Sphinx.\n\n \"\"\"\n def wrapper(decorator):\n return func\n return wrapper\n\nfunctools.wraps = no_op_wraps\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\nsys.path.insert(0, os.path.abspath('..'))\nsys.path.append(os.path.abspath('_themes/flask-sphinx-themes'))\nsys.path.insert(0, os.path.abspath('../privacyidea'))\n\n# -- General configuration -----------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be extensions\n# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\nextensions = ['sphinx.ext.autodoc', 'sphinx.ext.imgmath', 'sphinx.ext.viewcode', \n 'sphinxcontrib.autohttp.flask']\nhttp_index_ignore_prefixes = ['/token']\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix of source filenames.\nsource_suffix = '.rst'\n\n# The encoding of source files.\n#source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'privacyIDEA'\ncopyright = u'2014-2019, Cornelius K\u00f6lbel'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#language = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = ['_build']\n\n# The reST default role (used for this markup: `text`) to use for all documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\nadd_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# A list of ignored prefixes for module index sorting.\n#modindex_common_prefix = []\n\n\n# -- Options for HTML output ---------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#html_theme = 'sphinxdoc'\n#html_theme = 'sphinx_rtd_theme'\n#html_theme = 'agogo'\nhtml_theme = 'flask'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#html_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\nhtml_theme_path = ['_themes/flask-sphinx-themes']\n\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n#html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\nhtml_logo = \"images/privacyidea-color.png\"\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n#html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\nhtml_css_files = [\n 'css/custom.css',\n]\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n#html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n#html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_domain_indices = True\n\n# If false, no index is generated.\n#html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n#html_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n#html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n#html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = None\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'privacyIDEAdoc'\n\n\n# -- Options for LaTeX output --------------------------------------------------\n\nlatex_elements = {\n# The paper size ('letterpaper' or 'a4paper').\n#'papersize': 'letterpaper',\n\n# The font size ('10pt', '11pt' or '12pt').\n#'pointsize': '10pt',\n\n# Additional stuff for the LaTeX preamble.\n#'preamble': '',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title, author, documentclass [howto/manual]).\nlatex_documents = [\n ('index', 'privacyIDEA.tex', u'privacyIDEA Authentication System',\n u'Cornelius K\u00f6lbel', 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# If true, show page references after internal links.\n#latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n#latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_domain_indices = True\n\n\n# -- Options for manual page output --------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n ('index', 'privacyidea-server', u'privacyIDEA Authentication System',\n [u'Cornelius K\u00f6lbel'], 1)\n]\n\n# If true, show URL addresses after external links.\n#man_show_urls = False\n\n\n# -- Options for Texinfo output ------------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n ('index', 'privacyIDEA', u'privacyIDEA AUthentication System',\n u'Cornelius K\u00f6lbel', 'privacyIDEA', 'One line description of project.',\n 'Miscellaneous'),\n]\n\n# Documents to append as an appendix to all manuals.\n#texinfo_appendices = []\n\n# If false, no module index is generated.\n#texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n#texinfo_show_urls = 'footnote'\n", "path": "doc/conf.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# privacyIDEA documentation build configuration file, created by\n# sphinx-quickstart on Fri Jun 13 07:31:01 2014.\n#\n# This file is execfile()d with the current directory set to its containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = '3.2'\n# The full version, including alpha/beta/rc tags.\n#release = '2.16dev5'\nrelease = version\n\n\nimport sys\nimport os\n\n# Monkey-patch functools.wraps\n# http://stackoverflow.com/questions/28366818/preserve-default-arguments-of-wrapped-decorated-python-function-in-sphinx-docume\nimport functools\n\ndef no_op_wraps(func, assigned=None, updated=None):\n \"\"\"Replaces functools.wraps in order to undo wrapping.\n\n Can be used to preserve the decorated function's signature\n in the documentation generated by Sphinx.\n\n \"\"\"\n def wrapper(decorator):\n return func\n return wrapper\n\nfunctools.wraps = no_op_wraps\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\nsys.path.insert(0, os.path.abspath('..'))\nsys.path.append(os.path.abspath('_themes/flask-sphinx-themes'))\nsys.path.insert(0, os.path.abspath('../privacyidea'))\n\n# -- General configuration -----------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be extensions\n# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\nextensions = ['sphinx.ext.autodoc', 'sphinx.ext.imgmath', 'sphinx.ext.viewcode', \n 'sphinxcontrib.autohttp.flask', 'sphinxcontrib.plantuml']\nhttp_index_ignore_prefixes = ['/token']\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix of source filenames.\nsource_suffix = '.rst'\n\n# The encoding of source files.\n#source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'privacyIDEA'\ncopyright = u'2014-2019, Cornelius K\u00f6lbel'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#language = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = ['_build']\n\n# The reST default role (used for this markup: `text`) to use for all documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\nadd_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# A list of ignored prefixes for module index sorting.\n#modindex_common_prefix = []\n\n\n# -- Options for HTML output ---------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#html_theme = 'sphinxdoc'\n#html_theme = 'sphinx_rtd_theme'\n#html_theme = 'agogo'\nhtml_theme = 'flask'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#html_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\nhtml_theme_path = ['_themes/flask-sphinx-themes']\n\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n#html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\nhtml_logo = \"images/privacyidea-color.png\"\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n#html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\nhtml_css_files = [\n 'css/custom.css',\n]\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n#html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n#html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_domain_indices = True\n\n# If false, no index is generated.\n#html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n#html_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n#html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n#html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = None\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'privacyIDEAdoc'\n\n\n# -- Options for LaTeX output --------------------------------------------------\n\nlatex_elements = {\n# The paper size ('letterpaper' or 'a4paper').\n#'papersize': 'letterpaper',\n\n# The font size ('10pt', '11pt' or '12pt').\n#'pointsize': '10pt',\n\n# Additional stuff for the LaTeX preamble.\n#'preamble': '',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title, author, documentclass [howto/manual]).\nlatex_documents = [\n ('index', 'privacyIDEA.tex', u'privacyIDEA Authentication System',\n u'Cornelius K\u00f6lbel', 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# If true, show page references after internal links.\n#latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n#latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_domain_indices = True\n\n\n# -- Options for manual page output --------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n ('index', 'privacyidea-server', u'privacyIDEA Authentication System',\n [u'Cornelius K\u00f6lbel'], 1)\n]\n\n# If true, show URL addresses after external links.\n#man_show_urls = False\n\n\n# -- Options for Texinfo output ------------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n ('index', 'privacyIDEA', u'privacyIDEA AUthentication System',\n u'Cornelius K\u00f6lbel', 'privacyIDEA', 'One line description of project.',\n 'Miscellaneous'),\n]\n\n# Documents to append as an appendix to all manuals.\n#texinfo_appendices = []\n\n# If false, no module index is generated.\n#texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n#texinfo_show_urls = 'footnote'\n\n#\n# PlantUML\n#\n\n# Run plantUML under Java in headless mode. This is needed for compatibility with readthedocs.io.\nplantuml = 'java -Djava.awt.headless=true -jar /usr/share/plantuml/plantuml.jar'\n\n# Use SVG inside <object> in supported browsers (all except IE8), falling back to PNG.\nplantuml_output_format = 'svg'\n", "path": "doc/conf.py"}]} | 3,245 | 284 |
gh_patches_debug_7246 | rasdani/github-patches | git_diff | digitalfabrik__integreat-cms-1285 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Push notifications can be sent multiple times
### Describe the Bug
<!-- A clear and concise description of what the bug is. -->
It should not be possible to send push notifications multiple times to prevent the accidental re-sending of news.
Instead, only the "save" button should be enabled which can be used to update the messages that are delivered via the API endpoint.
### Steps to Reproduce
1. Go to News
2. Send new push notification
3. Send again
### Expected Behavior
<!-- A clear and concise description of what you expected to happen. -->
The news should only be sent the first time and only saved on all subsequent submissions
### Actual Behavior
<!-- A clear and concise description of what actually happened. -->
The news is sent twice
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `integreat_cms/cms/views/push_notifications/push_notification_form_view.py`
Content:
```
1 import logging
2
3 from datetime import datetime
4
5 from django.contrib import messages
6 from django.core.exceptions import PermissionDenied
7 from django.shortcuts import render, redirect
8 from django.utils.decorators import method_decorator
9 from django.utils.translation import ugettext as _
10 from django.views.generic import TemplateView
11 from django.forms import modelformset_factory
12
13 from .push_notification_sender import PushNotificationSender
14 from ...decorators import permission_required
15 from ...forms import (
16 PushNotificationForm,
17 PushNotificationTranslationForm,
18 )
19 from ...models import Language, PushNotification, PushNotificationTranslation
20
21 logger = logging.getLogger(__name__)
22
23
24 @method_decorator(permission_required("cms.view_pushnotification"), name="dispatch")
25 @method_decorator(permission_required("cms.change_pushnotification"), name="post")
26 class PushNotificationFormView(TemplateView):
27 """
28 Class that handles HTTP POST and GET requests for editing push notifications
29 """
30
31 #: The template to render (see :class:`~django.views.generic.base.TemplateResponseMixin`)
32 template_name = "push_notifications/push_notification_form.html"
33 #: The context dict passed to the template (see :class:`~django.views.generic.base.ContextMixin`)
34 extra_context = {"current_menu_item": "push_notifications_form"}
35
36 def get(self, request, *args, **kwargs):
37 r"""
38 Open form for creating or editing a push notification
39
40 :param request: Object representing the user call
41 :type request: ~django.http.HttpRequest
42
43 :param \*args: The supplied arguments
44 :type \*args: list
45
46 :param \**kwargs: The supplied keyword arguments
47 :type \**kwargs: dict
48
49 :return: The rendered template response
50 :rtype: ~django.template.response.TemplateResponse
51 """
52
53 region = request.region
54 language = region.get_language_or_404(
55 kwargs.get("language_slug"), only_active=True
56 )
57
58 push_notification = PushNotification.objects.filter(
59 id=kwargs.get("push_notification_id")
60 ).first()
61 push_notification_translations = PushNotificationTranslation.objects.filter(
62 push_notification=push_notification
63 )
64
65 push_notification_form = PushNotificationForm(instance=push_notification)
66
67 num_languages = len(region.active_languages)
68 PNTFormset = modelformset_factory(
69 PushNotificationTranslation,
70 form=PushNotificationTranslationForm,
71 max_num=num_languages,
72 extra=num_languages - push_notification_translations.count(),
73 )
74 existing_languages = push_notification.languages if push_notification else []
75 pnt_formset = PNTFormset(
76 # Add queryset for all translations which exist already
77 queryset=push_notification_translations,
78 # Add initial data for all languages which do not yet have a translation
79 initial=[
80 {"language": language}
81 for language in region.active_languages
82 if language not in existing_languages
83 ],
84 )
85
86 return render(
87 request,
88 self.template_name,
89 {
90 **self.get_context_data(**kwargs),
91 "push_notification_form": push_notification_form,
92 "pnt_formset": pnt_formset,
93 "language": language,
94 "languages": region.active_languages,
95 },
96 )
97
98 # pylint: disable=too-many-branches,unused-argument
99 def post(self, request, *args, **kwargs):
100 r"""
101 Save and show form for creating or editing a push notification. Send push notification
102 if asked for by user.
103
104 :param request: Object representing the user call
105 :type request: ~django.http.HttpRequest
106
107 :param \*args: The supplied arguments
108 :type \*args: list
109
110 :param \**kwargs: The supplied keyword arguments
111 :type \**kwargs: dict
112
113 :raises ~django.core.exceptions.PermissionDenied: If user does not have the permission to send push notifications
114
115 :return: The rendered template response
116 :rtype: ~django.template.response.TemplateResponse
117 """
118
119 region = request.region
120 language = Language.objects.get(slug=kwargs.get("language_slug"))
121
122 push_notification_instance = PushNotification.objects.filter(
123 id=kwargs.get("push_notification_id")
124 ).first()
125 push_notification_translations = PushNotificationTranslation.objects.filter(
126 push_notification=push_notification_instance
127 )
128
129 if not request.user.has_perm("cms.change_pushnotification"):
130 logger.warning(
131 "%r tried to edit %r",
132 request.user,
133 push_notification_instance,
134 )
135 raise PermissionDenied
136
137 pn_form = PushNotificationForm(
138 data=request.POST,
139 instance=push_notification_instance,
140 additional_instance_attributes={
141 "region": region,
142 },
143 )
144
145 num_languages = len(region.active_languages)
146 PNTFormset = modelformset_factory(
147 PushNotificationTranslation,
148 form=PushNotificationTranslationForm,
149 max_num=num_languages,
150 extra=num_languages - push_notification_translations.count(),
151 )
152 existing_languages = (
153 push_notification_instance.languages if push_notification_instance else []
154 )
155 pnt_formset = PNTFormset(
156 data=request.POST,
157 # Add queryset for all translations which exist already
158 queryset=push_notification_translations,
159 # Add initial data for all languages which do not yet have a translation
160 initial=[
161 {"language": language}
162 for language in region.active_languages
163 if language not in existing_languages
164 ],
165 )
166
167 if not pn_form.is_valid() or not pnt_formset.is_valid():
168 # Add error messages
169 pn_form.add_error_messages(request)
170 for form in pnt_formset:
171 form.add_error_messages(request)
172 else:
173 # Save forms
174 pn_form.save()
175 for form in pnt_formset:
176 form.instance.push_notification = pn_form.instance
177 pnt_formset.save()
178
179 # Add the success message
180 if not push_notification_instance:
181 messages.success(
182 request,
183 _('News message "{}" was successfully created').format(
184 pn_form.instance
185 ),
186 )
187 else:
188 messages.success(
189 request,
190 _('News message "{}" was successfully saved').format(
191 pn_form.instance
192 ),
193 )
194
195 if "submit_send" in request.POST:
196 if not request.user.has_perm("cms.send_push_notification"):
197 logger.warning(
198 "%r does not have the permission to send %r",
199 request.user,
200 push_notification_instance,
201 )
202 raise PermissionDenied
203 push_sender = PushNotificationSender(pn_form.instance)
204 if not push_sender.is_valid():
205 messages.warning(
206 request,
207 _(
208 "News message cannot be sent because required texts are missing"
209 ),
210 )
211 else:
212 if push_sender.send_all():
213 messages.success(
214 request, _("News message was successfully sent")
215 )
216 pn_form.instance.sent_date = datetime.now()
217 pn_form.instance.save()
218 else:
219 messages.error(request, _("News message could not be sent"))
220
221 # Redirect to the edit page
222 return redirect(
223 "edit_push_notification",
224 **{
225 "push_notification_id": pn_form.instance.id,
226 "region_slug": region.slug,
227 "language_slug": language.slug,
228 },
229 )
230
231 return render(
232 request,
233 self.template_name,
234 {
235 **self.get_context_data(**kwargs),
236 "push_notification_form": pn_form,
237 "pnt_formset": pnt_formset,
238 "language": language,
239 "languages": region.active_languages,
240 },
241 )
242
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/integreat_cms/cms/views/push_notifications/push_notification_form_view.py b/integreat_cms/cms/views/push_notifications/push_notification_form_view.py
--- a/integreat_cms/cms/views/push_notifications/push_notification_form_view.py
+++ b/integreat_cms/cms/views/push_notifications/push_notification_form_view.py
@@ -192,7 +192,7 @@
),
)
- if "submit_send" in request.POST:
+ if "submit_send" in request.POST and not pn_form.instance.sent_date:
if not request.user.has_perm("cms.send_push_notification"):
logger.warning(
"%r does not have the permission to send %r",
| {"golden_diff": "diff --git a/integreat_cms/cms/views/push_notifications/push_notification_form_view.py b/integreat_cms/cms/views/push_notifications/push_notification_form_view.py\n--- a/integreat_cms/cms/views/push_notifications/push_notification_form_view.py\n+++ b/integreat_cms/cms/views/push_notifications/push_notification_form_view.py\n@@ -192,7 +192,7 @@\n ),\n )\n \n- if \"submit_send\" in request.POST:\n+ if \"submit_send\" in request.POST and not pn_form.instance.sent_date:\n if not request.user.has_perm(\"cms.send_push_notification\"):\n logger.warning(\n \"%r does not have the permission to send %r\",\n", "issue": "Push notifications can be sent multiple times\n### Describe the Bug\r\n<!-- A clear and concise description of what the bug is. -->\r\nIt should not be possible to send push notifications multiple times to prevent the accidental re-sending of news.\r\nInstead, only the \"save\" button should be enabled which can be used to update the messages that are delivered via the API endpoint.\r\n\r\n### Steps to Reproduce\r\n\r\n1. Go to News\r\n2. Send new push notification\r\n3. Send again\r\n\r\n### Expected Behavior\r\n<!-- A clear and concise description of what you expected to happen. -->\r\nThe news should only be sent the first time and only saved on all subsequent submissions\r\n\r\n### Actual Behavior\r\n<!-- A clear and concise description of what actually happened. -->\r\nThe news is sent twice\r\n\n", "before_files": [{"content": "import logging\n\nfrom datetime import datetime\n\nfrom django.contrib import messages\nfrom django.core.exceptions import PermissionDenied\nfrom django.shortcuts import render, redirect\nfrom django.utils.decorators import method_decorator\nfrom django.utils.translation import ugettext as _\nfrom django.views.generic import TemplateView\nfrom django.forms import modelformset_factory\n\nfrom .push_notification_sender import PushNotificationSender\nfrom ...decorators import permission_required\nfrom ...forms import (\n PushNotificationForm,\n PushNotificationTranslationForm,\n)\nfrom ...models import Language, PushNotification, PushNotificationTranslation\n\nlogger = logging.getLogger(__name__)\n\n\n@method_decorator(permission_required(\"cms.view_pushnotification\"), name=\"dispatch\")\n@method_decorator(permission_required(\"cms.change_pushnotification\"), name=\"post\")\nclass PushNotificationFormView(TemplateView):\n \"\"\"\n Class that handles HTTP POST and GET requests for editing push notifications\n \"\"\"\n\n #: The template to render (see :class:`~django.views.generic.base.TemplateResponseMixin`)\n template_name = \"push_notifications/push_notification_form.html\"\n #: The context dict passed to the template (see :class:`~django.views.generic.base.ContextMixin`)\n extra_context = {\"current_menu_item\": \"push_notifications_form\"}\n\n def get(self, request, *args, **kwargs):\n r\"\"\"\n Open form for creating or editing a push notification\n\n :param request: Object representing the user call\n :type request: ~django.http.HttpRequest\n\n :param \\*args: The supplied arguments\n :type \\*args: list\n\n :param \\**kwargs: The supplied keyword arguments\n :type \\**kwargs: dict\n\n :return: The rendered template response\n :rtype: ~django.template.response.TemplateResponse\n \"\"\"\n\n region = request.region\n language = region.get_language_or_404(\n kwargs.get(\"language_slug\"), only_active=True\n )\n\n push_notification = PushNotification.objects.filter(\n id=kwargs.get(\"push_notification_id\")\n ).first()\n push_notification_translations = PushNotificationTranslation.objects.filter(\n push_notification=push_notification\n )\n\n push_notification_form = PushNotificationForm(instance=push_notification)\n\n num_languages = len(region.active_languages)\n PNTFormset = modelformset_factory(\n PushNotificationTranslation,\n form=PushNotificationTranslationForm,\n max_num=num_languages,\n extra=num_languages - push_notification_translations.count(),\n )\n existing_languages = push_notification.languages if push_notification else []\n pnt_formset = PNTFormset(\n # Add queryset for all translations which exist already\n queryset=push_notification_translations,\n # Add initial data for all languages which do not yet have a translation\n initial=[\n {\"language\": language}\n for language in region.active_languages\n if language not in existing_languages\n ],\n )\n\n return render(\n request,\n self.template_name,\n {\n **self.get_context_data(**kwargs),\n \"push_notification_form\": push_notification_form,\n \"pnt_formset\": pnt_formset,\n \"language\": language,\n \"languages\": region.active_languages,\n },\n )\n\n # pylint: disable=too-many-branches,unused-argument\n def post(self, request, *args, **kwargs):\n r\"\"\"\n Save and show form for creating or editing a push notification. Send push notification\n if asked for by user.\n\n :param request: Object representing the user call\n :type request: ~django.http.HttpRequest\n\n :param \\*args: The supplied arguments\n :type \\*args: list\n\n :param \\**kwargs: The supplied keyword arguments\n :type \\**kwargs: dict\n\n :raises ~django.core.exceptions.PermissionDenied: If user does not have the permission to send push notifications\n\n :return: The rendered template response\n :rtype: ~django.template.response.TemplateResponse\n \"\"\"\n\n region = request.region\n language = Language.objects.get(slug=kwargs.get(\"language_slug\"))\n\n push_notification_instance = PushNotification.objects.filter(\n id=kwargs.get(\"push_notification_id\")\n ).first()\n push_notification_translations = PushNotificationTranslation.objects.filter(\n push_notification=push_notification_instance\n )\n\n if not request.user.has_perm(\"cms.change_pushnotification\"):\n logger.warning(\n \"%r tried to edit %r\",\n request.user,\n push_notification_instance,\n )\n raise PermissionDenied\n\n pn_form = PushNotificationForm(\n data=request.POST,\n instance=push_notification_instance,\n additional_instance_attributes={\n \"region\": region,\n },\n )\n\n num_languages = len(region.active_languages)\n PNTFormset = modelformset_factory(\n PushNotificationTranslation,\n form=PushNotificationTranslationForm,\n max_num=num_languages,\n extra=num_languages - push_notification_translations.count(),\n )\n existing_languages = (\n push_notification_instance.languages if push_notification_instance else []\n )\n pnt_formset = PNTFormset(\n data=request.POST,\n # Add queryset for all translations which exist already\n queryset=push_notification_translations,\n # Add initial data for all languages which do not yet have a translation\n initial=[\n {\"language\": language}\n for language in region.active_languages\n if language not in existing_languages\n ],\n )\n\n if not pn_form.is_valid() or not pnt_formset.is_valid():\n # Add error messages\n pn_form.add_error_messages(request)\n for form in pnt_formset:\n form.add_error_messages(request)\n else:\n # Save forms\n pn_form.save()\n for form in pnt_formset:\n form.instance.push_notification = pn_form.instance\n pnt_formset.save()\n\n # Add the success message\n if not push_notification_instance:\n messages.success(\n request,\n _('News message \"{}\" was successfully created').format(\n pn_form.instance\n ),\n )\n else:\n messages.success(\n request,\n _('News message \"{}\" was successfully saved').format(\n pn_form.instance\n ),\n )\n\n if \"submit_send\" in request.POST:\n if not request.user.has_perm(\"cms.send_push_notification\"):\n logger.warning(\n \"%r does not have the permission to send %r\",\n request.user,\n push_notification_instance,\n )\n raise PermissionDenied\n push_sender = PushNotificationSender(pn_form.instance)\n if not push_sender.is_valid():\n messages.warning(\n request,\n _(\n \"News message cannot be sent because required texts are missing\"\n ),\n )\n else:\n if push_sender.send_all():\n messages.success(\n request, _(\"News message was successfully sent\")\n )\n pn_form.instance.sent_date = datetime.now()\n pn_form.instance.save()\n else:\n messages.error(request, _(\"News message could not be sent\"))\n\n # Redirect to the edit page\n return redirect(\n \"edit_push_notification\",\n **{\n \"push_notification_id\": pn_form.instance.id,\n \"region_slug\": region.slug,\n \"language_slug\": language.slug,\n },\n )\n\n return render(\n request,\n self.template_name,\n {\n **self.get_context_data(**kwargs),\n \"push_notification_form\": pn_form,\n \"pnt_formset\": pnt_formset,\n \"language\": language,\n \"languages\": region.active_languages,\n },\n )\n", "path": "integreat_cms/cms/views/push_notifications/push_notification_form_view.py"}], "after_files": [{"content": "import logging\n\nfrom datetime import datetime\n\nfrom django.contrib import messages\nfrom django.core.exceptions import PermissionDenied\nfrom django.shortcuts import render, redirect\nfrom django.utils.decorators import method_decorator\nfrom django.utils.translation import ugettext as _\nfrom django.views.generic import TemplateView\nfrom django.forms import modelformset_factory\n\nfrom .push_notification_sender import PushNotificationSender\nfrom ...decorators import permission_required\nfrom ...forms import (\n PushNotificationForm,\n PushNotificationTranslationForm,\n)\nfrom ...models import Language, PushNotification, PushNotificationTranslation\n\nlogger = logging.getLogger(__name__)\n\n\n@method_decorator(permission_required(\"cms.view_pushnotification\"), name=\"dispatch\")\n@method_decorator(permission_required(\"cms.change_pushnotification\"), name=\"post\")\nclass PushNotificationFormView(TemplateView):\n \"\"\"\n Class that handles HTTP POST and GET requests for editing push notifications\n \"\"\"\n\n #: The template to render (see :class:`~django.views.generic.base.TemplateResponseMixin`)\n template_name = \"push_notifications/push_notification_form.html\"\n #: The context dict passed to the template (see :class:`~django.views.generic.base.ContextMixin`)\n extra_context = {\"current_menu_item\": \"push_notifications_form\"}\n\n def get(self, request, *args, **kwargs):\n r\"\"\"\n Open form for creating or editing a push notification\n\n :param request: Object representing the user call\n :type request: ~django.http.HttpRequest\n\n :param \\*args: The supplied arguments\n :type \\*args: list\n\n :param \\**kwargs: The supplied keyword arguments\n :type \\**kwargs: dict\n\n :return: The rendered template response\n :rtype: ~django.template.response.TemplateResponse\n \"\"\"\n\n region = request.region\n language = region.get_language_or_404(\n kwargs.get(\"language_slug\"), only_active=True\n )\n\n push_notification = PushNotification.objects.filter(\n id=kwargs.get(\"push_notification_id\")\n ).first()\n push_notification_translations = PushNotificationTranslation.objects.filter(\n push_notification=push_notification\n )\n\n push_notification_form = PushNotificationForm(instance=push_notification)\n\n num_languages = len(region.active_languages)\n PNTFormset = modelformset_factory(\n PushNotificationTranslation,\n form=PushNotificationTranslationForm,\n max_num=num_languages,\n extra=num_languages - push_notification_translations.count(),\n )\n existing_languages = push_notification.languages if push_notification else []\n pnt_formset = PNTFormset(\n # Add queryset for all translations which exist already\n queryset=push_notification_translations,\n # Add initial data for all languages which do not yet have a translation\n initial=[\n {\"language\": language}\n for language in region.active_languages\n if language not in existing_languages\n ],\n )\n\n return render(\n request,\n self.template_name,\n {\n **self.get_context_data(**kwargs),\n \"push_notification_form\": push_notification_form,\n \"pnt_formset\": pnt_formset,\n \"language\": language,\n \"languages\": region.active_languages,\n },\n )\n\n # pylint: disable=too-many-branches,unused-argument\n def post(self, request, *args, **kwargs):\n r\"\"\"\n Save and show form for creating or editing a push notification. Send push notification\n if asked for by user.\n\n :param request: Object representing the user call\n :type request: ~django.http.HttpRequest\n\n :param \\*args: The supplied arguments\n :type \\*args: list\n\n :param \\**kwargs: The supplied keyword arguments\n :type \\**kwargs: dict\n\n :raises ~django.core.exceptions.PermissionDenied: If user does not have the permission to send push notifications\n\n :return: The rendered template response\n :rtype: ~django.template.response.TemplateResponse\n \"\"\"\n\n region = request.region\n language = Language.objects.get(slug=kwargs.get(\"language_slug\"))\n\n push_notification_instance = PushNotification.objects.filter(\n id=kwargs.get(\"push_notification_id\")\n ).first()\n push_notification_translations = PushNotificationTranslation.objects.filter(\n push_notification=push_notification_instance\n )\n\n if not request.user.has_perm(\"cms.change_pushnotification\"):\n logger.warning(\n \"%r tried to edit %r\",\n request.user,\n push_notification_instance,\n )\n raise PermissionDenied\n\n pn_form = PushNotificationForm(\n data=request.POST,\n instance=push_notification_instance,\n additional_instance_attributes={\n \"region\": region,\n },\n )\n\n num_languages = len(region.active_languages)\n PNTFormset = modelformset_factory(\n PushNotificationTranslation,\n form=PushNotificationTranslationForm,\n max_num=num_languages,\n extra=num_languages - push_notification_translations.count(),\n )\n existing_languages = (\n push_notification_instance.languages if push_notification_instance else []\n )\n pnt_formset = PNTFormset(\n data=request.POST,\n # Add queryset for all translations which exist already\n queryset=push_notification_translations,\n # Add initial data for all languages which do not yet have a translation\n initial=[\n {\"language\": language}\n for language in region.active_languages\n if language not in existing_languages\n ],\n )\n\n if not pn_form.is_valid() or not pnt_formset.is_valid():\n # Add error messages\n pn_form.add_error_messages(request)\n for form in pnt_formset:\n form.add_error_messages(request)\n else:\n # Save forms\n pn_form.save()\n for form in pnt_formset:\n form.instance.push_notification = pn_form.instance\n pnt_formset.save()\n\n # Add the success message\n if not push_notification_instance:\n messages.success(\n request,\n _('News message \"{}\" was successfully created').format(\n pn_form.instance\n ),\n )\n else:\n messages.success(\n request,\n _('News message \"{}\" was successfully saved').format(\n pn_form.instance\n ),\n )\n\n if \"submit_send\" in request.POST and not pn_form.instance.sent_date:\n if not request.user.has_perm(\"cms.send_push_notification\"):\n logger.warning(\n \"%r does not have the permission to send %r\",\n request.user,\n push_notification_instance,\n )\n raise PermissionDenied\n push_sender = PushNotificationSender(pn_form.instance)\n if not push_sender.is_valid():\n messages.warning(\n request,\n _(\n \"News message cannot be sent because required texts are missing\"\n ),\n )\n else:\n if push_sender.send_all():\n messages.success(\n request, _(\"News message was successfully sent\")\n )\n pn_form.instance.sent_date = datetime.now()\n pn_form.instance.save()\n else:\n messages.error(request, _(\"News message could not be sent\"))\n\n # Redirect to the edit page\n return redirect(\n \"edit_push_notification\",\n **{\n \"push_notification_id\": pn_form.instance.id,\n \"region_slug\": region.slug,\n \"language_slug\": language.slug,\n },\n )\n\n return render(\n request,\n self.template_name,\n {\n **self.get_context_data(**kwargs),\n \"push_notification_form\": pn_form,\n \"pnt_formset\": pnt_formset,\n \"language\": language,\n \"languages\": region.active_languages,\n },\n )\n", "path": "integreat_cms/cms/views/push_notifications/push_notification_form_view.py"}]} | 2,613 | 154 |
gh_patches_debug_13081 | rasdani/github-patches | git_diff | docker__docker-py-1694 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
client.networks.create check_duplicates docs not reflective of behavior
Docs say it does, but it's actually set to `None`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docker/api/network.py`
Content:
```
1 from ..errors import InvalidVersion
2 from ..utils import check_resource, minimum_version
3 from ..utils import version_lt
4 from .. import utils
5
6
7 class NetworkApiMixin(object):
8 @minimum_version('1.21')
9 def networks(self, names=None, ids=None, filters=None):
10 """
11 List networks. Similar to the ``docker networks ls`` command.
12
13 Args:
14 names (:py:class:`list`): List of names to filter by
15 ids (:py:class:`list`): List of ids to filter by
16 filters (dict): Filters to be processed on the network list.
17 Available filters:
18 - ``driver=[<driver-name>]`` Matches a network's driver.
19 - ``label=[<key>]`` or ``label=[<key>=<value>]``.
20 - ``type=["custom"|"builtin"]`` Filters networks by type.
21
22 Returns:
23 (dict): List of network objects.
24
25 Raises:
26 :py:class:`docker.errors.APIError`
27 If the server returns an error.
28 """
29
30 if filters is None:
31 filters = {}
32 if names:
33 filters['name'] = names
34 if ids:
35 filters['id'] = ids
36 params = {'filters': utils.convert_filters(filters)}
37 url = self._url("/networks")
38 res = self._get(url, params=params)
39 return self._result(res, json=True)
40
41 @minimum_version('1.21')
42 def create_network(self, name, driver=None, options=None, ipam=None,
43 check_duplicate=None, internal=False, labels=None,
44 enable_ipv6=False, attachable=None, scope=None,
45 ingress=None):
46 """
47 Create a network. Similar to the ``docker network create``.
48
49 Args:
50 name (str): Name of the network
51 driver (str): Name of the driver used to create the network
52 options (dict): Driver options as a key-value dictionary
53 ipam (IPAMConfig): Optional custom IP scheme for the network.
54 check_duplicate (bool): Request daemon to check for networks with
55 same name. Default: ``True``.
56 internal (bool): Restrict external access to the network. Default
57 ``False``.
58 labels (dict): Map of labels to set on the network. Default
59 ``None``.
60 enable_ipv6 (bool): Enable IPv6 on the network. Default ``False``.
61 attachable (bool): If enabled, and the network is in the global
62 scope, non-service containers on worker nodes will be able to
63 connect to the network.
64 ingress (bool): If set, create an ingress network which provides
65 the routing-mesh in swarm mode.
66
67 Returns:
68 (dict): The created network reference object
69
70 Raises:
71 :py:class:`docker.errors.APIError`
72 If the server returns an error.
73
74 Example:
75 A network using the bridge driver:
76
77 >>> client.create_network("network1", driver="bridge")
78
79 You can also create more advanced networks with custom IPAM
80 configurations. For example, setting the subnet to
81 ``192.168.52.0/24`` and gateway address to ``192.168.52.254``.
82
83 .. code-block:: python
84
85 >>> ipam_pool = docker.types.IPAMPool(
86 subnet='192.168.52.0/24',
87 gateway='192.168.52.254'
88 )
89 >>> ipam_config = docker.types.IPAMConfig(
90 pool_configs=[ipam_pool]
91 )
92 >>> docker_client.create_network("network1", driver="bridge",
93 ipam=ipam_config)
94 """
95 if options is not None and not isinstance(options, dict):
96 raise TypeError('options must be a dictionary')
97
98 data = {
99 'Name': name,
100 'Driver': driver,
101 'Options': options,
102 'IPAM': ipam,
103 'CheckDuplicate': check_duplicate,
104 }
105
106 if labels is not None:
107 if version_lt(self._version, '1.23'):
108 raise InvalidVersion(
109 'network labels were introduced in API 1.23'
110 )
111 if not isinstance(labels, dict):
112 raise TypeError('labels must be a dictionary')
113 data["Labels"] = labels
114
115 if enable_ipv6:
116 if version_lt(self._version, '1.23'):
117 raise InvalidVersion(
118 'enable_ipv6 was introduced in API 1.23'
119 )
120 data['EnableIPv6'] = True
121
122 if internal:
123 if version_lt(self._version, '1.22'):
124 raise InvalidVersion('Internal networks are not '
125 'supported in API version < 1.22')
126 data['Internal'] = True
127
128 if attachable is not None:
129 if version_lt(self._version, '1.24'):
130 raise InvalidVersion(
131 'attachable is not supported in API version < 1.24'
132 )
133 data['Attachable'] = attachable
134
135 if ingress is not None:
136 if version_lt(self._version, '1.29'):
137 raise InvalidVersion(
138 'ingress is not supported in API version < 1.29'
139 )
140
141 data['Ingress'] = ingress
142
143 url = self._url("/networks/create")
144 res = self._post_json(url, data=data)
145 return self._result(res, json=True)
146
147 @minimum_version('1.25')
148 def prune_networks(self, filters=None):
149 """
150 Delete unused networks
151
152 Args:
153 filters (dict): Filters to process on the prune list.
154
155 Returns:
156 (dict): A dict containing a list of deleted network names and
157 the amount of disk space reclaimed in bytes.
158
159 Raises:
160 :py:class:`docker.errors.APIError`
161 If the server returns an error.
162 """
163 params = {}
164 if filters:
165 params['filters'] = utils.convert_filters(filters)
166 url = self._url('/networks/prune')
167 return self._result(self._post(url, params=params), True)
168
169 @minimum_version('1.21')
170 @check_resource('net_id')
171 def remove_network(self, net_id):
172 """
173 Remove a network. Similar to the ``docker network rm`` command.
174
175 Args:
176 net_id (str): The network's id
177 """
178 url = self._url("/networks/{0}", net_id)
179 res = self._delete(url)
180 self._raise_for_status(res)
181
182 @minimum_version('1.21')
183 @check_resource('net_id')
184 def inspect_network(self, net_id, verbose=None):
185 """
186 Get detailed information about a network.
187
188 Args:
189 net_id (str): ID of network
190 verbose (bool): Show the service details across the cluster in
191 swarm mode.
192 """
193 params = {}
194 if verbose is not None:
195 if version_lt(self._version, '1.28'):
196 raise InvalidVersion('verbose was introduced in API 1.28')
197 params['verbose'] = verbose
198
199 url = self._url("/networks/{0}", net_id)
200 res = self._get(url, params=params)
201 return self._result(res, json=True)
202
203 @check_resource('container')
204 @minimum_version('1.21')
205 def connect_container_to_network(self, container, net_id,
206 ipv4_address=None, ipv6_address=None,
207 aliases=None, links=None,
208 link_local_ips=None):
209 """
210 Connect a container to a network.
211
212 Args:
213 container (str): container-id/name to be connected to the network
214 net_id (str): network id
215 aliases (:py:class:`list`): A list of aliases for this endpoint.
216 Names in that list can be used within the network to reach the
217 container. Defaults to ``None``.
218 links (:py:class:`list`): A list of links for this endpoint.
219 Containers declared in this list will be linked to this
220 container. Defaults to ``None``.
221 ipv4_address (str): The IP address of this container on the
222 network, using the IPv4 protocol. Defaults to ``None``.
223 ipv6_address (str): The IP address of this container on the
224 network, using the IPv6 protocol. Defaults to ``None``.
225 link_local_ips (:py:class:`list`): A list of link-local
226 (IPv4/IPv6) addresses.
227 """
228 data = {
229 "Container": container,
230 "EndpointConfig": self.create_endpoint_config(
231 aliases=aliases, links=links, ipv4_address=ipv4_address,
232 ipv6_address=ipv6_address, link_local_ips=link_local_ips
233 ),
234 }
235
236 url = self._url("/networks/{0}/connect", net_id)
237 res = self._post_json(url, data=data)
238 self._raise_for_status(res)
239
240 @check_resource('container')
241 @minimum_version('1.21')
242 def disconnect_container_from_network(self, container, net_id,
243 force=False):
244 """
245 Disconnect a container from a network.
246
247 Args:
248 container (str): container ID or name to be disconnected from the
249 network
250 net_id (str): network ID
251 force (bool): Force the container to disconnect from a network.
252 Default: ``False``
253 """
254 data = {"Container": container}
255 if force:
256 if version_lt(self._version, '1.22'):
257 raise InvalidVersion(
258 'Forced disconnect was introduced in API 1.22'
259 )
260 data['Force'] = force
261 url = self._url("/networks/{0}/disconnect", net_id)
262 res = self._post_json(url, data=data)
263 self._raise_for_status(res)
264
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docker/api/network.py b/docker/api/network.py
--- a/docker/api/network.py
+++ b/docker/api/network.py
@@ -52,7 +52,7 @@
options (dict): Driver options as a key-value dictionary
ipam (IPAMConfig): Optional custom IP scheme for the network.
check_duplicate (bool): Request daemon to check for networks with
- same name. Default: ``True``.
+ same name. Default: ``None``.
internal (bool): Restrict external access to the network. Default
``False``.
labels (dict): Map of labels to set on the network. Default
| {"golden_diff": "diff --git a/docker/api/network.py b/docker/api/network.py\n--- a/docker/api/network.py\n+++ b/docker/api/network.py\n@@ -52,7 +52,7 @@\n options (dict): Driver options as a key-value dictionary\n ipam (IPAMConfig): Optional custom IP scheme for the network.\n check_duplicate (bool): Request daemon to check for networks with\n- same name. Default: ``True``.\n+ same name. Default: ``None``.\n internal (bool): Restrict external access to the network. Default\n ``False``.\n labels (dict): Map of labels to set on the network. Default\n", "issue": "client.networks.create check_duplicates docs not reflective of behavior\nDocs say it does, but it's actually set to `None`.\n", "before_files": [{"content": "from ..errors import InvalidVersion\nfrom ..utils import check_resource, minimum_version\nfrom ..utils import version_lt\nfrom .. import utils\n\n\nclass NetworkApiMixin(object):\n @minimum_version('1.21')\n def networks(self, names=None, ids=None, filters=None):\n \"\"\"\n List networks. Similar to the ``docker networks ls`` command.\n\n Args:\n names (:py:class:`list`): List of names to filter by\n ids (:py:class:`list`): List of ids to filter by\n filters (dict): Filters to be processed on the network list.\n Available filters:\n - ``driver=[<driver-name>]`` Matches a network's driver.\n - ``label=[<key>]`` or ``label=[<key>=<value>]``.\n - ``type=[\"custom\"|\"builtin\"]`` Filters networks by type.\n\n Returns:\n (dict): List of network objects.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n \"\"\"\n\n if filters is None:\n filters = {}\n if names:\n filters['name'] = names\n if ids:\n filters['id'] = ids\n params = {'filters': utils.convert_filters(filters)}\n url = self._url(\"/networks\")\n res = self._get(url, params=params)\n return self._result(res, json=True)\n\n @minimum_version('1.21')\n def create_network(self, name, driver=None, options=None, ipam=None,\n check_duplicate=None, internal=False, labels=None,\n enable_ipv6=False, attachable=None, scope=None,\n ingress=None):\n \"\"\"\n Create a network. Similar to the ``docker network create``.\n\n Args:\n name (str): Name of the network\n driver (str): Name of the driver used to create the network\n options (dict): Driver options as a key-value dictionary\n ipam (IPAMConfig): Optional custom IP scheme for the network.\n check_duplicate (bool): Request daemon to check for networks with\n same name. Default: ``True``.\n internal (bool): Restrict external access to the network. Default\n ``False``.\n labels (dict): Map of labels to set on the network. Default\n ``None``.\n enable_ipv6 (bool): Enable IPv6 on the network. Default ``False``.\n attachable (bool): If enabled, and the network is in the global\n scope, non-service containers on worker nodes will be able to\n connect to the network.\n ingress (bool): If set, create an ingress network which provides\n the routing-mesh in swarm mode.\n\n Returns:\n (dict): The created network reference object\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n\n Example:\n A network using the bridge driver:\n\n >>> client.create_network(\"network1\", driver=\"bridge\")\n\n You can also create more advanced networks with custom IPAM\n configurations. For example, setting the subnet to\n ``192.168.52.0/24`` and gateway address to ``192.168.52.254``.\n\n .. code-block:: python\n\n >>> ipam_pool = docker.types.IPAMPool(\n subnet='192.168.52.0/24',\n gateway='192.168.52.254'\n )\n >>> ipam_config = docker.types.IPAMConfig(\n pool_configs=[ipam_pool]\n )\n >>> docker_client.create_network(\"network1\", driver=\"bridge\",\n ipam=ipam_config)\n \"\"\"\n if options is not None and not isinstance(options, dict):\n raise TypeError('options must be a dictionary')\n\n data = {\n 'Name': name,\n 'Driver': driver,\n 'Options': options,\n 'IPAM': ipam,\n 'CheckDuplicate': check_duplicate,\n }\n\n if labels is not None:\n if version_lt(self._version, '1.23'):\n raise InvalidVersion(\n 'network labels were introduced in API 1.23'\n )\n if not isinstance(labels, dict):\n raise TypeError('labels must be a dictionary')\n data[\"Labels\"] = labels\n\n if enable_ipv6:\n if version_lt(self._version, '1.23'):\n raise InvalidVersion(\n 'enable_ipv6 was introduced in API 1.23'\n )\n data['EnableIPv6'] = True\n\n if internal:\n if version_lt(self._version, '1.22'):\n raise InvalidVersion('Internal networks are not '\n 'supported in API version < 1.22')\n data['Internal'] = True\n\n if attachable is not None:\n if version_lt(self._version, '1.24'):\n raise InvalidVersion(\n 'attachable is not supported in API version < 1.24'\n )\n data['Attachable'] = attachable\n\n if ingress is not None:\n if version_lt(self._version, '1.29'):\n raise InvalidVersion(\n 'ingress is not supported in API version < 1.29'\n )\n\n data['Ingress'] = ingress\n\n url = self._url(\"/networks/create\")\n res = self._post_json(url, data=data)\n return self._result(res, json=True)\n\n @minimum_version('1.25')\n def prune_networks(self, filters=None):\n \"\"\"\n Delete unused networks\n\n Args:\n filters (dict): Filters to process on the prune list.\n\n Returns:\n (dict): A dict containing a list of deleted network names and\n the amount of disk space reclaimed in bytes.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n \"\"\"\n params = {}\n if filters:\n params['filters'] = utils.convert_filters(filters)\n url = self._url('/networks/prune')\n return self._result(self._post(url, params=params), True)\n\n @minimum_version('1.21')\n @check_resource('net_id')\n def remove_network(self, net_id):\n \"\"\"\n Remove a network. Similar to the ``docker network rm`` command.\n\n Args:\n net_id (str): The network's id\n \"\"\"\n url = self._url(\"/networks/{0}\", net_id)\n res = self._delete(url)\n self._raise_for_status(res)\n\n @minimum_version('1.21')\n @check_resource('net_id')\n def inspect_network(self, net_id, verbose=None):\n \"\"\"\n Get detailed information about a network.\n\n Args:\n net_id (str): ID of network\n verbose (bool): Show the service details across the cluster in\n swarm mode.\n \"\"\"\n params = {}\n if verbose is not None:\n if version_lt(self._version, '1.28'):\n raise InvalidVersion('verbose was introduced in API 1.28')\n params['verbose'] = verbose\n\n url = self._url(\"/networks/{0}\", net_id)\n res = self._get(url, params=params)\n return self._result(res, json=True)\n\n @check_resource('container')\n @minimum_version('1.21')\n def connect_container_to_network(self, container, net_id,\n ipv4_address=None, ipv6_address=None,\n aliases=None, links=None,\n link_local_ips=None):\n \"\"\"\n Connect a container to a network.\n\n Args:\n container (str): container-id/name to be connected to the network\n net_id (str): network id\n aliases (:py:class:`list`): A list of aliases for this endpoint.\n Names in that list can be used within the network to reach the\n container. Defaults to ``None``.\n links (:py:class:`list`): A list of links for this endpoint.\n Containers declared in this list will be linked to this\n container. Defaults to ``None``.\n ipv4_address (str): The IP address of this container on the\n network, using the IPv4 protocol. Defaults to ``None``.\n ipv6_address (str): The IP address of this container on the\n network, using the IPv6 protocol. Defaults to ``None``.\n link_local_ips (:py:class:`list`): A list of link-local\n (IPv4/IPv6) addresses.\n \"\"\"\n data = {\n \"Container\": container,\n \"EndpointConfig\": self.create_endpoint_config(\n aliases=aliases, links=links, ipv4_address=ipv4_address,\n ipv6_address=ipv6_address, link_local_ips=link_local_ips\n ),\n }\n\n url = self._url(\"/networks/{0}/connect\", net_id)\n res = self._post_json(url, data=data)\n self._raise_for_status(res)\n\n @check_resource('container')\n @minimum_version('1.21')\n def disconnect_container_from_network(self, container, net_id,\n force=False):\n \"\"\"\n Disconnect a container from a network.\n\n Args:\n container (str): container ID or name to be disconnected from the\n network\n net_id (str): network ID\n force (bool): Force the container to disconnect from a network.\n Default: ``False``\n \"\"\"\n data = {\"Container\": container}\n if force:\n if version_lt(self._version, '1.22'):\n raise InvalidVersion(\n 'Forced disconnect was introduced in API 1.22'\n )\n data['Force'] = force\n url = self._url(\"/networks/{0}/disconnect\", net_id)\n res = self._post_json(url, data=data)\n self._raise_for_status(res)\n", "path": "docker/api/network.py"}], "after_files": [{"content": "from ..errors import InvalidVersion\nfrom ..utils import check_resource, minimum_version\nfrom ..utils import version_lt\nfrom .. import utils\n\n\nclass NetworkApiMixin(object):\n @minimum_version('1.21')\n def networks(self, names=None, ids=None, filters=None):\n \"\"\"\n List networks. Similar to the ``docker networks ls`` command.\n\n Args:\n names (:py:class:`list`): List of names to filter by\n ids (:py:class:`list`): List of ids to filter by\n filters (dict): Filters to be processed on the network list.\n Available filters:\n - ``driver=[<driver-name>]`` Matches a network's driver.\n - ``label=[<key>]`` or ``label=[<key>=<value>]``.\n - ``type=[\"custom\"|\"builtin\"]`` Filters networks by type.\n\n Returns:\n (dict): List of network objects.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n \"\"\"\n\n if filters is None:\n filters = {}\n if names:\n filters['name'] = names\n if ids:\n filters['id'] = ids\n params = {'filters': utils.convert_filters(filters)}\n url = self._url(\"/networks\")\n res = self._get(url, params=params)\n return self._result(res, json=True)\n\n @minimum_version('1.21')\n def create_network(self, name, driver=None, options=None, ipam=None,\n check_duplicate=None, internal=False, labels=None,\n enable_ipv6=False, attachable=None, scope=None,\n ingress=None):\n \"\"\"\n Create a network. Similar to the ``docker network create``.\n\n Args:\n name (str): Name of the network\n driver (str): Name of the driver used to create the network\n options (dict): Driver options as a key-value dictionary\n ipam (IPAMConfig): Optional custom IP scheme for the network.\n check_duplicate (bool): Request daemon to check for networks with\n same name. Default: ``None``.\n internal (bool): Restrict external access to the network. Default\n ``False``.\n labels (dict): Map of labels to set on the network. Default\n ``None``.\n enable_ipv6 (bool): Enable IPv6 on the network. Default ``False``.\n attachable (bool): If enabled, and the network is in the global\n scope, non-service containers on worker nodes will be able to\n connect to the network.\n ingress (bool): If set, create an ingress network which provides\n the routing-mesh in swarm mode.\n\n Returns:\n (dict): The created network reference object\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n\n Example:\n A network using the bridge driver:\n\n >>> client.create_network(\"network1\", driver=\"bridge\")\n\n You can also create more advanced networks with custom IPAM\n configurations. For example, setting the subnet to\n ``192.168.52.0/24`` and gateway address to ``192.168.52.254``.\n\n .. code-block:: python\n\n >>> ipam_pool = docker.types.IPAMPool(\n subnet='192.168.52.0/24',\n gateway='192.168.52.254'\n )\n >>> ipam_config = docker.types.IPAMConfig(\n pool_configs=[ipam_pool]\n )\n >>> docker_client.create_network(\"network1\", driver=\"bridge\",\n ipam=ipam_config)\n \"\"\"\n if options is not None and not isinstance(options, dict):\n raise TypeError('options must be a dictionary')\n\n data = {\n 'Name': name,\n 'Driver': driver,\n 'Options': options,\n 'IPAM': ipam,\n 'CheckDuplicate': check_duplicate,\n }\n\n if labels is not None:\n if version_lt(self._version, '1.23'):\n raise InvalidVersion(\n 'network labels were introduced in API 1.23'\n )\n if not isinstance(labels, dict):\n raise TypeError('labels must be a dictionary')\n data[\"Labels\"] = labels\n\n if enable_ipv6:\n if version_lt(self._version, '1.23'):\n raise InvalidVersion(\n 'enable_ipv6 was introduced in API 1.23'\n )\n data['EnableIPv6'] = True\n\n if internal:\n if version_lt(self._version, '1.22'):\n raise InvalidVersion('Internal networks are not '\n 'supported in API version < 1.22')\n data['Internal'] = True\n\n if attachable is not None:\n if version_lt(self._version, '1.24'):\n raise InvalidVersion(\n 'attachable is not supported in API version < 1.24'\n )\n data['Attachable'] = attachable\n\n if ingress is not None:\n if version_lt(self._version, '1.29'):\n raise InvalidVersion(\n 'ingress is not supported in API version < 1.29'\n )\n\n data['Ingress'] = ingress\n\n url = self._url(\"/networks/create\")\n res = self._post_json(url, data=data)\n return self._result(res, json=True)\n\n @minimum_version('1.25')\n def prune_networks(self, filters=None):\n \"\"\"\n Delete unused networks\n\n Args:\n filters (dict): Filters to process on the prune list.\n\n Returns:\n (dict): A dict containing a list of deleted network names and\n the amount of disk space reclaimed in bytes.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n \"\"\"\n params = {}\n if filters:\n params['filters'] = utils.convert_filters(filters)\n url = self._url('/networks/prune')\n return self._result(self._post(url, params=params), True)\n\n @minimum_version('1.21')\n @check_resource('net_id')\n def remove_network(self, net_id):\n \"\"\"\n Remove a network. Similar to the ``docker network rm`` command.\n\n Args:\n net_id (str): The network's id\n \"\"\"\n url = self._url(\"/networks/{0}\", net_id)\n res = self._delete(url)\n self._raise_for_status(res)\n\n @minimum_version('1.21')\n @check_resource('net_id')\n def inspect_network(self, net_id, verbose=None):\n \"\"\"\n Get detailed information about a network.\n\n Args:\n net_id (str): ID of network\n verbose (bool): Show the service details across the cluster in\n swarm mode.\n \"\"\"\n params = {}\n if verbose is not None:\n if version_lt(self._version, '1.28'):\n raise InvalidVersion('verbose was introduced in API 1.28')\n params['verbose'] = verbose\n\n url = self._url(\"/networks/{0}\", net_id)\n res = self._get(url, params=params)\n return self._result(res, json=True)\n\n @check_resource('container')\n @minimum_version('1.21')\n def connect_container_to_network(self, container, net_id,\n ipv4_address=None, ipv6_address=None,\n aliases=None, links=None,\n link_local_ips=None):\n \"\"\"\n Connect a container to a network.\n\n Args:\n container (str): container-id/name to be connected to the network\n net_id (str): network id\n aliases (:py:class:`list`): A list of aliases for this endpoint.\n Names in that list can be used within the network to reach the\n container. Defaults to ``None``.\n links (:py:class:`list`): A list of links for this endpoint.\n Containers declared in this list will be linked to this\n container. Defaults to ``None``.\n ipv4_address (str): The IP address of this container on the\n network, using the IPv4 protocol. Defaults to ``None``.\n ipv6_address (str): The IP address of this container on the\n network, using the IPv6 protocol. Defaults to ``None``.\n link_local_ips (:py:class:`list`): A list of link-local\n (IPv4/IPv6) addresses.\n \"\"\"\n data = {\n \"Container\": container,\n \"EndpointConfig\": self.create_endpoint_config(\n aliases=aliases, links=links, ipv4_address=ipv4_address,\n ipv6_address=ipv6_address, link_local_ips=link_local_ips\n ),\n }\n\n url = self._url(\"/networks/{0}/connect\", net_id)\n res = self._post_json(url, data=data)\n self._raise_for_status(res)\n\n @check_resource('container')\n @minimum_version('1.21')\n def disconnect_container_from_network(self, container, net_id,\n force=False):\n \"\"\"\n Disconnect a container from a network.\n\n Args:\n container (str): container ID or name to be disconnected from the\n network\n net_id (str): network ID\n force (bool): Force the container to disconnect from a network.\n Default: ``False``\n \"\"\"\n data = {\"Container\": container}\n if force:\n if version_lt(self._version, '1.22'):\n raise InvalidVersion(\n 'Forced disconnect was introduced in API 1.22'\n )\n data['Force'] = force\n url = self._url(\"/networks/{0}/disconnect\", net_id)\n res = self._post_json(url, data=data)\n self._raise_for_status(res)\n", "path": "docker/api/network.py"}]} | 3,106 | 141 |
gh_patches_debug_56716 | rasdani/github-patches | git_diff | mosaicml__composer-182 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add venv into docker image to enable editable `pip install`
When trying to install composer with `pip install -e .` from within the docker image, we are seeing this error:
```
Traceback (most recent call last):
File "/usr/bin/composer", line 33, in <module>
sys.exit(load_entry_point('mosaicml', 'console_scripts', 'composer')())
File "/usr/bin/composer", line 22, in importlib_load_entry_point
for entry_point in distribution(dist_name).entry_points
File "/usr/lib/python3.8/importlib/metadata.py", line 445, in distribution
return Distribution.from_name(distribution_name)
File "/usr/lib/python3.8/importlib/metadata.py", line 169, in from_name
raise PackageNotFoundError(name)
importlib.metadata.PackageNotFoundError: mosaicml
```
This seems to be remedied by running the `pip install` from within a virtualenv. Can we bake a virtualenv into the docker image as a workaround?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # Copyright 2021 MosaicML. All Rights Reserved.
2
3 import os
4 import sys
5 import textwrap
6
7 import setuptools
8 from setuptools import setup
9
10
11 def package_files(directory: str):
12 # from https://stackoverflow.com/a/36693250
13 paths = []
14 for (path, _, filenames) in os.walk(directory):
15 for filename in filenames:
16 paths.append(os.path.join('..', path, filename))
17 return paths
18
19
20 with open("README.md", "r", encoding="utf-8") as fh:
21 long_description = fh.read()
22
23 install_requires = [
24 "pyyaml>=5.4.1",
25 "tqdm>=4.62.3",
26 "torchmetrics>=0.6.0",
27 "torch_optimizer==0.1.0",
28 "torchvision>=0.9.0",
29 "torch>=1.9",
30 "yahp>=0.0.14",
31 "numpy==1.21.5",
32 ]
33 extra_deps = {}
34
35 extra_deps['base'] = []
36
37 extra_deps['dev'] = [
38 "custom_inherit==2.3.2",
39 'junitparser>=2.1.1',
40 'coverage[toml]>=6.1.1',
41 'fasteners>=0.16.3', # run_directory_uploader tests require fasteners
42 'pytest>=6.2.0',
43 'yapf>=0.13.0',
44 'isort>=5.9.3',
45 'ipython>=7.29.0',
46 'ipykernel>=6.5.0',
47 'jupyter>=1.0.0',
48 'yamllint>=1.26.2',
49 'pytest-timeout>=1.4.2',
50 'recommonmark>=0.7.1',
51 'sphinx>=4.2.0',
52 'sphinx_copybutton>=0.4.0',
53 'sphinx_markdown_tables>=0.0.15',
54 'sphinx-argparse>=0.3.1',
55 'sphinxcontrib.katex>=0.8.6',
56 'sphinxext.opengraph>=0.4.2',
57 'sphinxemoji>=0.2.0',
58 'sphinx_rtd_theme>=1.0.0',
59 'testbook>=0.4.2',
60 'myst-parser>=0.15.2',
61 ]
62 extra_deps['logging'] = ['wandb>=0.12.2', 'apache-libcloud>=3.4.1']
63
64 extra_deps['nlp'] = [
65 'transformers>=4.11.3',
66 'datasets>=1.14.0',
67 ]
68
69 extra_deps['unet'] = [
70 'monai>=0.7.0',
71 'scikit-learn>=1.0.1',
72 ]
73
74 extra_deps['deepspeed'] = [
75 'deepspeed>=0.5.5',
76 ]
77
78 extra_deps['all'] = set(dep for deps in extra_deps.values() for dep in deps)
79
80 setup(
81 name="mosaicml",
82 version="0.3.1",
83 author="MosaicML",
84 author_email="[email protected]",
85 description="composing methods for ML training efficiency",
86 long_description=long_description,
87 long_description_content_type="text/markdown",
88 url="https://github.com/mosaicml/composer",
89 include_package_data=True,
90 package_data={
91 "composer": ['py.typed'],
92 "": package_files('composer/yamls'),
93 },
94 packages=setuptools.find_packages(exclude=["tests*"]),
95 classifiers=[
96 "Programming Language :: Python :: 3",
97 ],
98 install_requires=install_requires,
99 entry_points={
100 'console_scripts': ['composer = composer.cli.launcher:main',],
101 },
102 extras_require=extra_deps,
103 dependency_links=['https://developer.download.nvidia.com/compute/redist'],
104 python_requires='>=3.7',
105 ext_package="composer",
106 )
107
108 # only visible if user installs with verbose -v flag
109 # Printing to stdout as not to interfere with setup.py CLI flags (e.g. --version)
110 print("*" * 20, file=sys.stderr)
111 print(textwrap.dedent("""NOTE: For best performance, we recommend installing Pillow-SIMD
112 for accelerated image processing operations. To install:
113 \t pip uninstall pillow && pip install pillow-simd"""),
114 file=sys.stderr)
115 print("*" * 20, file=sys.stderr)
116
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -1,12 +1,16 @@
# Copyright 2021 MosaicML. All Rights Reserved.
import os
+import site
import sys
import textwrap
import setuptools
from setuptools import setup
+# From https://github.com/pypa/pip/issues/7953#issuecomment-645133255
+site.ENABLE_USER_SITE = "--user" in sys.argv[1:]
+
def package_files(directory: str):
# from https://stackoverflow.com/a/36693250
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -1,12 +1,16 @@\n # Copyright 2021 MosaicML. All Rights Reserved.\n \n import os\n+import site\n import sys\n import textwrap\n \n import setuptools\n from setuptools import setup\n \n+# From https://github.com/pypa/pip/issues/7953#issuecomment-645133255\n+site.ENABLE_USER_SITE = \"--user\" in sys.argv[1:]\n+\n \n def package_files(directory: str):\n # from https://stackoverflow.com/a/36693250\n", "issue": "Add venv into docker image to enable editable `pip install`\nWhen trying to install composer with `pip install -e .` from within the docker image, we are seeing this error:\r\n```\r\nTraceback (most recent call last):\r\n File \"/usr/bin/composer\", line 33, in <module>\r\n sys.exit(load_entry_point('mosaicml', 'console_scripts', 'composer')())\r\n File \"/usr/bin/composer\", line 22, in importlib_load_entry_point\r\n for entry_point in distribution(dist_name).entry_points\r\n File \"/usr/lib/python3.8/importlib/metadata.py\", line 445, in distribution\r\n return Distribution.from_name(distribution_name)\r\n File \"/usr/lib/python3.8/importlib/metadata.py\", line 169, in from_name\r\n raise PackageNotFoundError(name)\r\nimportlib.metadata.PackageNotFoundError: mosaicml\r\n```\r\nThis seems to be remedied by running the `pip install` from within a virtualenv. Can we bake a virtualenv into the docker image as a workaround?\n", "before_files": [{"content": "# Copyright 2021 MosaicML. All Rights Reserved.\n\nimport os\nimport sys\nimport textwrap\n\nimport setuptools\nfrom setuptools import setup\n\n\ndef package_files(directory: str):\n # from https://stackoverflow.com/a/36693250\n paths = []\n for (path, _, filenames) in os.walk(directory):\n for filename in filenames:\n paths.append(os.path.join('..', path, filename))\n return paths\n\n\nwith open(\"README.md\", \"r\", encoding=\"utf-8\") as fh:\n long_description = fh.read()\n\ninstall_requires = [\n \"pyyaml>=5.4.1\",\n \"tqdm>=4.62.3\",\n \"torchmetrics>=0.6.0\",\n \"torch_optimizer==0.1.0\",\n \"torchvision>=0.9.0\",\n \"torch>=1.9\",\n \"yahp>=0.0.14\",\n \"numpy==1.21.5\",\n]\nextra_deps = {}\n\nextra_deps['base'] = []\n\nextra_deps['dev'] = [\n \"custom_inherit==2.3.2\",\n 'junitparser>=2.1.1',\n 'coverage[toml]>=6.1.1',\n 'fasteners>=0.16.3', # run_directory_uploader tests require fasteners\n 'pytest>=6.2.0',\n 'yapf>=0.13.0',\n 'isort>=5.9.3',\n 'ipython>=7.29.0',\n 'ipykernel>=6.5.0',\n 'jupyter>=1.0.0',\n 'yamllint>=1.26.2',\n 'pytest-timeout>=1.4.2',\n 'recommonmark>=0.7.1',\n 'sphinx>=4.2.0',\n 'sphinx_copybutton>=0.4.0',\n 'sphinx_markdown_tables>=0.0.15',\n 'sphinx-argparse>=0.3.1',\n 'sphinxcontrib.katex>=0.8.6',\n 'sphinxext.opengraph>=0.4.2',\n 'sphinxemoji>=0.2.0',\n 'sphinx_rtd_theme>=1.0.0',\n 'testbook>=0.4.2',\n 'myst-parser>=0.15.2',\n]\nextra_deps['logging'] = ['wandb>=0.12.2', 'apache-libcloud>=3.4.1']\n\nextra_deps['nlp'] = [\n 'transformers>=4.11.3',\n 'datasets>=1.14.0',\n]\n\nextra_deps['unet'] = [\n 'monai>=0.7.0',\n 'scikit-learn>=1.0.1',\n]\n\nextra_deps['deepspeed'] = [\n 'deepspeed>=0.5.5',\n]\n\nextra_deps['all'] = set(dep for deps in extra_deps.values() for dep in deps)\n\nsetup(\n name=\"mosaicml\",\n version=\"0.3.1\",\n author=\"MosaicML\",\n author_email=\"[email protected]\",\n description=\"composing methods for ML training efficiency\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/mosaicml/composer\",\n include_package_data=True,\n package_data={\n \"composer\": ['py.typed'],\n \"\": package_files('composer/yamls'),\n },\n packages=setuptools.find_packages(exclude=[\"tests*\"]),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n ],\n install_requires=install_requires,\n entry_points={\n 'console_scripts': ['composer = composer.cli.launcher:main',],\n },\n extras_require=extra_deps,\n dependency_links=['https://developer.download.nvidia.com/compute/redist'],\n python_requires='>=3.7',\n ext_package=\"composer\",\n)\n\n# only visible if user installs with verbose -v flag\n# Printing to stdout as not to interfere with setup.py CLI flags (e.g. --version)\nprint(\"*\" * 20, file=sys.stderr)\nprint(textwrap.dedent(\"\"\"NOTE: For best performance, we recommend installing Pillow-SIMD\n for accelerated image processing operations. To install:\n \\t pip uninstall pillow && pip install pillow-simd\"\"\"),\n file=sys.stderr)\nprint(\"*\" * 20, file=sys.stderr)\n", "path": "setup.py"}], "after_files": [{"content": "# Copyright 2021 MosaicML. All Rights Reserved.\n\nimport os\nimport site\nimport sys\nimport textwrap\n\nimport setuptools\nfrom setuptools import setup\n\n# From https://github.com/pypa/pip/issues/7953#issuecomment-645133255\nsite.ENABLE_USER_SITE = \"--user\" in sys.argv[1:]\n\n\ndef package_files(directory: str):\n # from https://stackoverflow.com/a/36693250\n paths = []\n for (path, _, filenames) in os.walk(directory):\n for filename in filenames:\n paths.append(os.path.join('..', path, filename))\n return paths\n\n\nwith open(\"README.md\", \"r\", encoding=\"utf-8\") as fh:\n long_description = fh.read()\n\ninstall_requires = [\n \"pyyaml>=5.4.1\",\n \"tqdm>=4.62.3\",\n \"torchmetrics>=0.6.0\",\n \"torch_optimizer==0.1.0\",\n \"torchvision>=0.9.0\",\n \"torch>=1.9\",\n \"yahp>=0.0.14\",\n \"numpy==1.21.5\",\n]\nextra_deps = {}\n\nextra_deps['base'] = []\n\nextra_deps['dev'] = [\n \"custom_inherit==2.3.2\",\n 'junitparser>=2.1.1',\n 'coverage[toml]>=6.1.1',\n 'fasteners>=0.16.3', # run_directory_uploader tests require fasteners\n 'pytest>=6.2.0',\n 'yapf>=0.13.0',\n 'isort>=5.9.3',\n 'ipython>=7.29.0',\n 'ipykernel>=6.5.0',\n 'jupyter>=1.0.0',\n 'yamllint>=1.26.2',\n 'pytest-timeout>=1.4.2',\n 'recommonmark>=0.7.1',\n 'sphinx>=4.2.0',\n 'sphinx_copybutton>=0.4.0',\n 'sphinx_markdown_tables>=0.0.15',\n 'sphinx-argparse>=0.3.1',\n 'sphinxcontrib.katex>=0.8.6',\n 'sphinxext.opengraph>=0.4.2',\n 'sphinxemoji>=0.2.0',\n 'sphinx_rtd_theme>=1.0.0',\n 'testbook>=0.4.2',\n 'myst-parser>=0.15.2',\n]\nextra_deps['logging'] = ['wandb>=0.12.2', 'apache-libcloud>=3.4.1']\n\nextra_deps['nlp'] = [\n 'transformers>=4.11.3',\n 'datasets>=1.14.0',\n]\n\nextra_deps['unet'] = [\n 'monai>=0.7.0',\n 'scikit-learn>=1.0.1',\n]\n\nextra_deps['deepspeed'] = [\n 'deepspeed>=0.5.5',\n]\n\nextra_deps['all'] = set(dep for deps in extra_deps.values() for dep in deps)\n\nsetup(\n name=\"mosaicml\",\n version=\"0.3.1\",\n author=\"MosaicML\",\n author_email=\"[email protected]\",\n description=\"composing methods for ML training efficiency\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/mosaicml/composer\",\n include_package_data=True,\n package_data={\n \"composer\": ['py.typed'],\n \"\": package_files('composer/yamls'),\n },\n packages=setuptools.find_packages(exclude=[\"tests*\"]),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n ],\n install_requires=install_requires,\n entry_points={\n 'console_scripts': ['composer = composer.cli.launcher:main',],\n },\n extras_require=extra_deps,\n dependency_links=['https://developer.download.nvidia.com/compute/redist'],\n python_requires='>=3.7',\n ext_package=\"composer\",\n)\n\n# only visible if user installs with verbose -v flag\n# Printing to stdout as not to interfere with setup.py CLI flags (e.g. --version)\nprint(\"*\" * 20, file=sys.stderr)\nprint(textwrap.dedent(\"\"\"NOTE: For best performance, we recommend installing Pillow-SIMD\n for accelerated image processing operations. To install:\n \\t pip uninstall pillow && pip install pillow-simd\"\"\"),\n file=sys.stderr)\nprint(\"*\" * 20, file=sys.stderr)\n", "path": "setup.py"}]} | 1,712 | 144 |
gh_patches_debug_15609 | rasdani/github-patches | git_diff | tensorflow__addons-2355 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Compile with AVX only
Seems that TF2.4.0 is accidentally compiled with AVX2 (or more, not sure what's the CPU spec on TF release CI), and we follow it in https://github.com/tensorflow/addons/pull/2299. We should fallback to subset of ISAs, probably AVX, once there is a new release.
https://github.com/tensorflow/tensorflow/pull/46229
/cc @seanpmorgan
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `configure.py`
Content:
```
1 # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 # ==============================================================================
15 # Usage: python configure.py
16 #
17
18
19 import os
20 import pathlib
21 import platform
22 import logging
23
24 import tensorflow as tf
25
26 _TFA_BAZELRC = ".bazelrc"
27
28
29 # Writes variables to bazelrc file
30 def write(line):
31 with open(_TFA_BAZELRC, "a") as f:
32 f.write(line + "\n")
33
34
35 def write_action_env(var_name, var):
36 write('build --action_env {}="{}"'.format(var_name, var))
37
38
39 def is_macos():
40 return platform.system() == "Darwin"
41
42
43 def is_windows():
44 return platform.system() == "Windows"
45
46
47 def is_linux():
48 return platform.system() == "Linux"
49
50
51 def is_raspi_arm():
52 return os.uname()[4] == "armv7l"
53
54
55 def get_tf_header_dir():
56 import tensorflow as tf
57
58 tf_header_dir = tf.sysconfig.get_compile_flags()[0][2:]
59 if is_windows():
60 tf_header_dir = tf_header_dir.replace("\\", "/")
61 return tf_header_dir
62
63
64 def get_tf_shared_lib_dir():
65 import tensorflow as tf
66
67 # OS Specific parsing
68 if is_windows():
69 tf_shared_lib_dir = tf.sysconfig.get_compile_flags()[0][2:-7] + "python"
70 return tf_shared_lib_dir.replace("\\", "/")
71 elif is_raspi_arm():
72 return tf.sysconfig.get_compile_flags()[0][2:-7] + "python"
73 else:
74 return tf.sysconfig.get_link_flags()[0][2:]
75
76
77 # Converts the linkflag namespec to the full shared library name
78 def get_shared_lib_name():
79 import tensorflow as tf
80
81 namespec = tf.sysconfig.get_link_flags()
82 if is_macos():
83 # MacOS
84 return "lib" + namespec[1][2:] + ".dylib"
85 elif is_windows():
86 # Windows
87 return "_pywrap_tensorflow_internal.lib"
88 elif is_raspi_arm():
89 # The below command for linux would return an empty list
90 return "_pywrap_tensorflow_internal.so"
91 else:
92 # Linux
93 return namespec[1][3:]
94
95
96 def create_build_configuration():
97 print()
98 print("Configuring TensorFlow Addons to be built from source...")
99
100 if os.path.isfile(_TFA_BAZELRC):
101 os.remove(_TFA_BAZELRC)
102
103 logging.disable(logging.WARNING)
104
105 write_action_env("TF_HEADER_DIR", get_tf_header_dir())
106 write_action_env("TF_SHARED_LIBRARY_DIR", get_tf_shared_lib_dir())
107 write_action_env("TF_SHARED_LIBRARY_NAME", get_shared_lib_name())
108 write_action_env("TF_CXX11_ABI_FLAG", tf.sysconfig.CXX11_ABI_FLAG)
109
110 write("build --spawn_strategy=standalone")
111 write("build --strategy=Genrule=standalone")
112 write("build -c opt")
113
114 if is_windows():
115 write("build --config=windows")
116 write("build:windows --enable_runfiles")
117 write("build:windows --copt=/experimental:preprocessor")
118 write("build:windows --host_copt=/experimental:preprocessor")
119 write("build:windows --copt=/arch=AVX2")
120 write("build:windows --cxxopt=/std:c++14")
121 write("build:windows --host_cxxopt=/std:c++14")
122
123 if is_macos() or is_linux():
124 write("build --copt=-mavx2")
125 write("build --cxxopt=-std=c++14")
126 write("build --host_cxxopt=-std=c++14")
127
128 if os.getenv("TF_NEED_CUDA", "0") == "1":
129 print("> Building GPU & CPU ops")
130 configure_cuda()
131 else:
132 print("> Building only CPU ops")
133
134 print()
135 print("Build configurations successfully written to", _TFA_BAZELRC, ":\n")
136 print(pathlib.Path(_TFA_BAZELRC).read_text())
137
138
139 def configure_cuda():
140 write_action_env("TF_NEED_CUDA", "1")
141 write_action_env(
142 "CUDA_TOOLKIT_PATH", os.getenv("CUDA_TOOLKIT_PATH", "/usr/local/cuda")
143 )
144 write_action_env(
145 "CUDNN_INSTALL_PATH",
146 os.getenv("CUDNN_INSTALL_PATH", "/usr/lib/x86_64-linux-gnu"),
147 )
148 write_action_env("TF_CUDA_VERSION", os.getenv("TF_CUDA_VERSION", "11"))
149 write_action_env("TF_CUDNN_VERSION", os.getenv("TF_CUDNN_VERSION", "8"))
150
151 write("test --config=cuda")
152 write("build --config=cuda")
153 write("build:cuda --define=using_cuda=true --define=using_cuda_nvcc=true")
154 write("build:cuda --crosstool_top=@local_config_cuda//crosstool:toolchain")
155
156
157 if __name__ == "__main__":
158 create_build_configuration()
159
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/configure.py b/configure.py
--- a/configure.py
+++ b/configure.py
@@ -116,12 +116,12 @@
write("build:windows --enable_runfiles")
write("build:windows --copt=/experimental:preprocessor")
write("build:windows --host_copt=/experimental:preprocessor")
- write("build:windows --copt=/arch=AVX2")
+ write("build:windows --copt=/arch=AVX")
write("build:windows --cxxopt=/std:c++14")
write("build:windows --host_cxxopt=/std:c++14")
if is_macos() or is_linux():
- write("build --copt=-mavx2")
+ write("build --copt=-mavx")
write("build --cxxopt=-std=c++14")
write("build --host_cxxopt=-std=c++14")
| {"golden_diff": "diff --git a/configure.py b/configure.py\n--- a/configure.py\n+++ b/configure.py\n@@ -116,12 +116,12 @@\n write(\"build:windows --enable_runfiles\")\n write(\"build:windows --copt=/experimental:preprocessor\")\n write(\"build:windows --host_copt=/experimental:preprocessor\")\n- write(\"build:windows --copt=/arch=AVX2\")\n+ write(\"build:windows --copt=/arch=AVX\")\n write(\"build:windows --cxxopt=/std:c++14\")\n write(\"build:windows --host_cxxopt=/std:c++14\")\n \n if is_macos() or is_linux():\n- write(\"build --copt=-mavx2\")\n+ write(\"build --copt=-mavx\")\n write(\"build --cxxopt=-std=c++14\")\n write(\"build --host_cxxopt=-std=c++14\")\n", "issue": "Compile with AVX only\nSeems that TF2.4.0 is accidentally compiled with AVX2 (or more, not sure what's the CPU spec on TF release CI), and we follow it in https://github.com/tensorflow/addons/pull/2299. We should fallback to subset of ISAs, probably AVX, once there is a new release.\r\n\r\nhttps://github.com/tensorflow/tensorflow/pull/46229\r\n\r\n/cc @seanpmorgan \n", "before_files": [{"content": "# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n# Usage: python configure.py\n#\n\n\nimport os\nimport pathlib\nimport platform\nimport logging\n\nimport tensorflow as tf\n\n_TFA_BAZELRC = \".bazelrc\"\n\n\n# Writes variables to bazelrc file\ndef write(line):\n with open(_TFA_BAZELRC, \"a\") as f:\n f.write(line + \"\\n\")\n\n\ndef write_action_env(var_name, var):\n write('build --action_env {}=\"{}\"'.format(var_name, var))\n\n\ndef is_macos():\n return platform.system() == \"Darwin\"\n\n\ndef is_windows():\n return platform.system() == \"Windows\"\n\n\ndef is_linux():\n return platform.system() == \"Linux\"\n\n\ndef is_raspi_arm():\n return os.uname()[4] == \"armv7l\"\n\n\ndef get_tf_header_dir():\n import tensorflow as tf\n\n tf_header_dir = tf.sysconfig.get_compile_flags()[0][2:]\n if is_windows():\n tf_header_dir = tf_header_dir.replace(\"\\\\\", \"/\")\n return tf_header_dir\n\n\ndef get_tf_shared_lib_dir():\n import tensorflow as tf\n\n # OS Specific parsing\n if is_windows():\n tf_shared_lib_dir = tf.sysconfig.get_compile_flags()[0][2:-7] + \"python\"\n return tf_shared_lib_dir.replace(\"\\\\\", \"/\")\n elif is_raspi_arm():\n return tf.sysconfig.get_compile_flags()[0][2:-7] + \"python\"\n else:\n return tf.sysconfig.get_link_flags()[0][2:]\n\n\n# Converts the linkflag namespec to the full shared library name\ndef get_shared_lib_name():\n import tensorflow as tf\n\n namespec = tf.sysconfig.get_link_flags()\n if is_macos():\n # MacOS\n return \"lib\" + namespec[1][2:] + \".dylib\"\n elif is_windows():\n # Windows\n return \"_pywrap_tensorflow_internal.lib\"\n elif is_raspi_arm():\n # The below command for linux would return an empty list\n return \"_pywrap_tensorflow_internal.so\"\n else:\n # Linux\n return namespec[1][3:]\n\n\ndef create_build_configuration():\n print()\n print(\"Configuring TensorFlow Addons to be built from source...\")\n\n if os.path.isfile(_TFA_BAZELRC):\n os.remove(_TFA_BAZELRC)\n\n logging.disable(logging.WARNING)\n\n write_action_env(\"TF_HEADER_DIR\", get_tf_header_dir())\n write_action_env(\"TF_SHARED_LIBRARY_DIR\", get_tf_shared_lib_dir())\n write_action_env(\"TF_SHARED_LIBRARY_NAME\", get_shared_lib_name())\n write_action_env(\"TF_CXX11_ABI_FLAG\", tf.sysconfig.CXX11_ABI_FLAG)\n\n write(\"build --spawn_strategy=standalone\")\n write(\"build --strategy=Genrule=standalone\")\n write(\"build -c opt\")\n\n if is_windows():\n write(\"build --config=windows\")\n write(\"build:windows --enable_runfiles\")\n write(\"build:windows --copt=/experimental:preprocessor\")\n write(\"build:windows --host_copt=/experimental:preprocessor\")\n write(\"build:windows --copt=/arch=AVX2\")\n write(\"build:windows --cxxopt=/std:c++14\")\n write(\"build:windows --host_cxxopt=/std:c++14\")\n\n if is_macos() or is_linux():\n write(\"build --copt=-mavx2\")\n write(\"build --cxxopt=-std=c++14\")\n write(\"build --host_cxxopt=-std=c++14\")\n\n if os.getenv(\"TF_NEED_CUDA\", \"0\") == \"1\":\n print(\"> Building GPU & CPU ops\")\n configure_cuda()\n else:\n print(\"> Building only CPU ops\")\n\n print()\n print(\"Build configurations successfully written to\", _TFA_BAZELRC, \":\\n\")\n print(pathlib.Path(_TFA_BAZELRC).read_text())\n\n\ndef configure_cuda():\n write_action_env(\"TF_NEED_CUDA\", \"1\")\n write_action_env(\n \"CUDA_TOOLKIT_PATH\", os.getenv(\"CUDA_TOOLKIT_PATH\", \"/usr/local/cuda\")\n )\n write_action_env(\n \"CUDNN_INSTALL_PATH\",\n os.getenv(\"CUDNN_INSTALL_PATH\", \"/usr/lib/x86_64-linux-gnu\"),\n )\n write_action_env(\"TF_CUDA_VERSION\", os.getenv(\"TF_CUDA_VERSION\", \"11\"))\n write_action_env(\"TF_CUDNN_VERSION\", os.getenv(\"TF_CUDNN_VERSION\", \"8\"))\n\n write(\"test --config=cuda\")\n write(\"build --config=cuda\")\n write(\"build:cuda --define=using_cuda=true --define=using_cuda_nvcc=true\")\n write(\"build:cuda --crosstool_top=@local_config_cuda//crosstool:toolchain\")\n\n\nif __name__ == \"__main__\":\n create_build_configuration()\n", "path": "configure.py"}], "after_files": [{"content": "# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n# Usage: python configure.py\n#\n\n\nimport os\nimport pathlib\nimport platform\nimport logging\n\nimport tensorflow as tf\n\n_TFA_BAZELRC = \".bazelrc\"\n\n\n# Writes variables to bazelrc file\ndef write(line):\n with open(_TFA_BAZELRC, \"a\") as f:\n f.write(line + \"\\n\")\n\n\ndef write_action_env(var_name, var):\n write('build --action_env {}=\"{}\"'.format(var_name, var))\n\n\ndef is_macos():\n return platform.system() == \"Darwin\"\n\n\ndef is_windows():\n return platform.system() == \"Windows\"\n\n\ndef is_linux():\n return platform.system() == \"Linux\"\n\n\ndef is_raspi_arm():\n return os.uname()[4] == \"armv7l\"\n\n\ndef get_tf_header_dir():\n import tensorflow as tf\n\n tf_header_dir = tf.sysconfig.get_compile_flags()[0][2:]\n if is_windows():\n tf_header_dir = tf_header_dir.replace(\"\\\\\", \"/\")\n return tf_header_dir\n\n\ndef get_tf_shared_lib_dir():\n import tensorflow as tf\n\n # OS Specific parsing\n if is_windows():\n tf_shared_lib_dir = tf.sysconfig.get_compile_flags()[0][2:-7] + \"python\"\n return tf_shared_lib_dir.replace(\"\\\\\", \"/\")\n elif is_raspi_arm():\n return tf.sysconfig.get_compile_flags()[0][2:-7] + \"python\"\n else:\n return tf.sysconfig.get_link_flags()[0][2:]\n\n\n# Converts the linkflag namespec to the full shared library name\ndef get_shared_lib_name():\n import tensorflow as tf\n\n namespec = tf.sysconfig.get_link_flags()\n if is_macos():\n # MacOS\n return \"lib\" + namespec[1][2:] + \".dylib\"\n elif is_windows():\n # Windows\n return \"_pywrap_tensorflow_internal.lib\"\n elif is_raspi_arm():\n # The below command for linux would return an empty list\n return \"_pywrap_tensorflow_internal.so\"\n else:\n # Linux\n return namespec[1][3:]\n\n\ndef create_build_configuration():\n print()\n print(\"Configuring TensorFlow Addons to be built from source...\")\n\n if os.path.isfile(_TFA_BAZELRC):\n os.remove(_TFA_BAZELRC)\n\n logging.disable(logging.WARNING)\n\n write_action_env(\"TF_HEADER_DIR\", get_tf_header_dir())\n write_action_env(\"TF_SHARED_LIBRARY_DIR\", get_tf_shared_lib_dir())\n write_action_env(\"TF_SHARED_LIBRARY_NAME\", get_shared_lib_name())\n write_action_env(\"TF_CXX11_ABI_FLAG\", tf.sysconfig.CXX11_ABI_FLAG)\n\n write(\"build --spawn_strategy=standalone\")\n write(\"build --strategy=Genrule=standalone\")\n write(\"build -c opt\")\n\n if is_windows():\n write(\"build --config=windows\")\n write(\"build:windows --enable_runfiles\")\n write(\"build:windows --copt=/experimental:preprocessor\")\n write(\"build:windows --host_copt=/experimental:preprocessor\")\n write(\"build:windows --copt=/arch=AVX\")\n write(\"build:windows --cxxopt=/std:c++14\")\n write(\"build:windows --host_cxxopt=/std:c++14\")\n\n if is_macos() or is_linux():\n write(\"build --copt=-mavx\")\n write(\"build --cxxopt=-std=c++14\")\n write(\"build --host_cxxopt=-std=c++14\")\n\n if os.getenv(\"TF_NEED_CUDA\", \"0\") == \"1\":\n print(\"> Building GPU & CPU ops\")\n configure_cuda()\n else:\n print(\"> Building only CPU ops\")\n\n print()\n print(\"Build configurations successfully written to\", _TFA_BAZELRC, \":\\n\")\n print(pathlib.Path(_TFA_BAZELRC).read_text())\n\n\ndef configure_cuda():\n write_action_env(\"TF_NEED_CUDA\", \"1\")\n write_action_env(\n \"CUDA_TOOLKIT_PATH\", os.getenv(\"CUDA_TOOLKIT_PATH\", \"/usr/local/cuda\")\n )\n write_action_env(\n \"CUDNN_INSTALL_PATH\",\n os.getenv(\"CUDNN_INSTALL_PATH\", \"/usr/lib/x86_64-linux-gnu\"),\n )\n write_action_env(\"TF_CUDA_VERSION\", os.getenv(\"TF_CUDA_VERSION\", \"11\"))\n write_action_env(\"TF_CUDNN_VERSION\", os.getenv(\"TF_CUDNN_VERSION\", \"8\"))\n\n write(\"test --config=cuda\")\n write(\"build --config=cuda\")\n write(\"build:cuda --define=using_cuda=true --define=using_cuda_nvcc=true\")\n write(\"build:cuda --crosstool_top=@local_config_cuda//crosstool:toolchain\")\n\n\nif __name__ == \"__main__\":\n create_build_configuration()\n", "path": "configure.py"}]} | 1,951 | 217 |
gh_patches_debug_29851 | rasdani/github-patches | git_diff | huggingface__trl-660 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
sft_llama2 error
Hello!
I made no changes to the sample code or the dataset. I just simply wanted to get it to run the stacked llama2 example. However, I get this error:
UserWarning: The passed formatting_func has more than one argument. Usually that function should have a single argument `example` which corresponds to the dictionary returned by each element of the dataset. Make sure you know what you are doing.
in __init__
raise ValueError("the `--group_by_length` option is only available for `Dataset`, not `IterableDataset")
ValueError: the `--group_by_length` option is only available for `Dataset`, not `IterableDataset
(Sorry, I must omit certain parts of the error message since I am working on an institution's server).
Thank you!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/research_projects/stack_llama_2/scripts/sft_llama2.py`
Content:
```
1 # Fine-Tune Llama2-7b on SE paired dataset
2 import os
3 from dataclasses import dataclass, field
4 from typing import Optional
5
6 import torch
7 from datasets import load_dataset
8 from peft import AutoPeftModelForCausalLM, LoraConfig
9 from tqdm import tqdm
10 from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig, HfArgumentParser, TrainingArguments
11
12 from trl import SFTTrainer
13 from trl.trainer import ConstantLengthDataset
14
15
16 @dataclass
17 class ScriptArguments:
18 model_name: Optional[str] = field(default="meta-llama/Llama-2-7b-hf", metadata={"help": "the model name"})
19 log_with: Optional[str] = field(default="wandb", metadata={"help": "use 'wandb' to log with wandb"})
20
21 dataset_name: Optional[str] = field(default="lvwerra/stack-exchange-paired", metadata={"help": "the dataset name"})
22 subset: Optional[str] = field(default="data/finetune", metadata={"help": "the subset to use"})
23 split: Optional[str] = field(default="train", metadata={"help": "the split to use"})
24 size_valid_set: Optional[int] = field(default=4000, metadata={"help": "the size of the validation set"})
25 streaming: Optional[bool] = field(default=True, metadata={"help": "whether to stream the dataset"})
26 shuffle_buffer: Optional[int] = field(default=5000, metadata={"help": "the shuffle buffer size"})
27 seq_length: Optional[int] = field(default=1024, metadata={"help": "the sequence length"})
28
29 max_steps: Optional[int] = field(default=500, metadata={"help": "the maximum number of sgd steps"})
30 logging_steps: Optional[int] = field(default=10, metadata={"help": "the logging frequency"})
31 save_steps: Optional[int] = field(default=10, metadata={"help": "the saving frequency"})
32 per_device_train_batch_size: Optional[int] = field(default=4, metadata={"help": "the per device train batch size"})
33 per_device_eval_batch_size: Optional[int] = field(default=1, metadata={"help": "the per device eval batch size"})
34 gradient_accumulation_steps: Optional[int] = field(default=2, metadata={"help": "the gradient accumulation steps"})
35 gradient_checkpointing: Optional[bool] = field(
36 default=True, metadata={"help": "whether to use gradient checkpointing"}
37 )
38 group_by_length: Optional[bool] = field(default=True, metadata={"help": "whether to group by length"})
39
40 lora_alpha: Optional[float] = field(default=16, metadata={"help": "the lora alpha parameter"})
41 lora_dropout: Optional[float] = field(default=0.05, metadata={"help": "the lora dropout parameter"})
42 lora_r: Optional[int] = field(default=8, metadata={"help": "the lora r parameter"})
43
44 learning_rate: Optional[float] = field(default=1e-4, metadata={"help": "the learning rate"})
45 lr_scheduler_type: Optional[str] = field(default="cosine", metadata={"help": "the lr scheduler type"})
46 num_warmup_steps: Optional[int] = field(default=100, metadata={"help": "the number of warmup steps"})
47 weight_decay: Optional[float] = field(default=0.05, metadata={"help": "the weight decay"})
48 optimizer_type: Optional[str] = field(default="paged_adamw_32bit", metadata={"help": "the optimizer type"})
49
50 output_dir: Optional[str] = field(default="./results", metadata={"help": "the output directory"})
51 log_freq: Optional[int] = field(default=1, metadata={"help": "the logging frequency"})
52
53
54 parser = HfArgumentParser(ScriptArguments)
55 script_args = parser.parse_args_into_dataclasses()[0]
56
57
58 def chars_token_ratio(dataset, tokenizer, nb_examples=400):
59 """
60 Estimate the average number of characters per token in the dataset.
61 """
62 total_characters, total_tokens = 0, 0
63 for _, example in tqdm(zip(range(nb_examples), iter(dataset)), total=nb_examples):
64 text = prepare_sample_text(example)
65 total_characters += len(text)
66 if tokenizer.is_fast:
67 total_tokens += len(tokenizer(text).tokens())
68 else:
69 total_tokens += len(tokenizer.tokenize(text))
70
71 return total_characters / total_tokens
72
73
74 def print_trainable_parameters(model):
75 """
76 Prints the number of trainable parameters in the model.
77 """
78 trainable_params = 0
79 all_param = 0
80 for _, param in model.named_parameters():
81 all_param += param.numel()
82 if param.requires_grad:
83 trainable_params += param.numel()
84 print(
85 f"trainable params: {trainable_params} || all params: {all_param} || trainable%: {100 * trainable_params / all_param}"
86 )
87
88
89 def prepare_sample_text(example):
90 """Prepare the text from a sample of the dataset."""
91 text = f"Question: {example['question']}\n\nAnswer: {example['response_j']}"
92 return text
93
94
95 def create_datasets(tokenizer, args):
96 dataset = load_dataset(
97 args.dataset_name,
98 data_dir=args.subset,
99 split=args.split,
100 use_auth_token=True,
101 num_proc=args.num_workers if not args.streaming else None,
102 streaming=args.streaming,
103 )
104 if args.streaming:
105 print("Loading the dataset in streaming mode")
106 valid_data = dataset.take(args.size_valid_set)
107 train_data = dataset.skip(args.size_valid_set)
108 train_data = train_data.shuffle(buffer_size=args.shuffle_buffer, seed=None)
109 else:
110 dataset = dataset.train_test_split(test_size=0.005, seed=None)
111 train_data = dataset["train"]
112 valid_data = dataset["test"]
113 print(f"Size of the train set: {len(train_data)}. Size of the validation set: {len(valid_data)}")
114
115 chars_per_token = chars_token_ratio(train_data, tokenizer)
116 print(f"The character to token ratio of the dataset is: {chars_per_token:.2f}")
117
118 train_dataset = ConstantLengthDataset(
119 tokenizer,
120 train_data,
121 formatting_func=prepare_sample_text,
122 infinite=True,
123 seq_length=args.seq_length,
124 chars_per_token=chars_per_token,
125 )
126 valid_dataset = ConstantLengthDataset(
127 tokenizer,
128 valid_data,
129 formatting_func=prepare_sample_text,
130 infinite=False,
131 seq_length=args.seq_length,
132 chars_per_token=chars_per_token,
133 )
134 return train_dataset, valid_dataset
135
136
137 bnb_config = BitsAndBytesConfig(
138 load_in_4bit=True,
139 bnb_4bit_quant_type="nf4",
140 bnb_4bit_compute_dtype=torch.bfloat16,
141 )
142
143 base_model = AutoModelForCausalLM.from_pretrained(
144 script_args.model_name,
145 quantization_config=bnb_config,
146 device_map={"": 0},
147 trust_remote_code=True,
148 use_auth_token=True,
149 )
150 base_model.config.use_cache = False
151
152 peft_config = LoraConfig(
153 r=script_args.lora_r,
154 lora_alpha=script_args.lora_alpha,
155 lora_dropout=script_args.lora_dropout,
156 target_modules=["q_proj", "v_proj"],
157 bias="none",
158 task_type="CAUSAL_LM",
159 )
160
161 tokenizer = AutoTokenizer.from_pretrained(script_args.model_name, trust_remote_code=True)
162 tokenizer.pad_token = tokenizer.eos_token
163 tokenizer.padding_side = "right" # Fix weird overflow issue with fp16 training
164
165
166 training_args = TrainingArguments(
167 output_dir=script_args.output_dir,
168 per_device_train_batch_size=script_args.per_device_train_batch_size,
169 gradient_accumulation_steps=script_args.gradient_accumulation_steps,
170 per_device_eval_batch_size=script_args.per_device_eval_batch_size,
171 learning_rate=script_args.learning_rate,
172 logging_steps=script_args.logging_steps,
173 max_steps=script_args.max_steps,
174 report_to=script_args.log_with,
175 save_steps=script_args.save_steps,
176 group_by_length=script_args.group_by_length,
177 lr_scheduler_type=script_args.lr_scheduler_type,
178 warmup_steps=script_args.num_warmup_steps,
179 optim=script_args.optimizer_type,
180 bf16=True,
181 remove_unused_columns=False,
182 run_name="sft_llama2",
183 )
184
185 train_dataset, eval_dataset = create_datasets(tokenizer, script_args)
186
187 trainer = SFTTrainer(
188 model=base_model,
189 train_dataset=train_dataset,
190 eval_dataset=eval_dataset,
191 peft_config=peft_config,
192 packing=True,
193 max_seq_length=None,
194 tokenizer=tokenizer,
195 args=training_args,
196 )
197 trainer.train()
198 trainer.save_model(script_args.output_dir)
199
200 output_dir = os.path.join(script_args.output_dir, "final_checkpoint")
201 trainer.model.save_pretrained(output_dir)
202
203 # Free memory for merging weights
204 del base_model
205 torch.cuda.empty_cache()
206
207 model = AutoPeftModelForCausalLM.from_pretrained(output_dir, device_map="auto", torch_dtype=torch.bfloat16)
208 model = model.merge_and_unload()
209
210 output_merged_dir = os.path.join(script_args.output_dir, "final_merged_checkpoint")
211 model.save_pretrained(output_merged_dir, safe_serialization=True)
212
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/research_projects/stack_llama_2/scripts/sft_llama2.py b/examples/research_projects/stack_llama_2/scripts/sft_llama2.py
--- a/examples/research_projects/stack_llama_2/scripts/sft_llama2.py
+++ b/examples/research_projects/stack_llama_2/scripts/sft_llama2.py
@@ -35,7 +35,8 @@
gradient_checkpointing: Optional[bool] = field(
default=True, metadata={"help": "whether to use gradient checkpointing"}
)
- group_by_length: Optional[bool] = field(default=True, metadata={"help": "whether to group by length"})
+ group_by_length: Optional[bool] = field(default=False, metadata={"help": "whether to group by length"})
+ packing: Optional[bool] = field(default=True, metadata={"help": "whether to use packing for SFTTrainer"})
lora_alpha: Optional[float] = field(default=16, metadata={"help": "the lora alpha parameter"})
lora_dropout: Optional[float] = field(default=0.05, metadata={"help": "the lora dropout parameter"})
@@ -54,6 +55,9 @@
parser = HfArgumentParser(ScriptArguments)
script_args = parser.parse_args_into_dataclasses()[0]
+if script_args.group_by_length and script_args.packing:
+ raise ValueError("Cannot use both packing and group by length")
+
def chars_token_ratio(dataset, tokenizer, nb_examples=400):
"""
@@ -189,7 +193,7 @@
train_dataset=train_dataset,
eval_dataset=eval_dataset,
peft_config=peft_config,
- packing=True,
+ packing=script_args.packing,
max_seq_length=None,
tokenizer=tokenizer,
args=training_args,
| {"golden_diff": "diff --git a/examples/research_projects/stack_llama_2/scripts/sft_llama2.py b/examples/research_projects/stack_llama_2/scripts/sft_llama2.py\n--- a/examples/research_projects/stack_llama_2/scripts/sft_llama2.py\n+++ b/examples/research_projects/stack_llama_2/scripts/sft_llama2.py\n@@ -35,7 +35,8 @@\n gradient_checkpointing: Optional[bool] = field(\n default=True, metadata={\"help\": \"whether to use gradient checkpointing\"}\n )\n- group_by_length: Optional[bool] = field(default=True, metadata={\"help\": \"whether to group by length\"})\n+ group_by_length: Optional[bool] = field(default=False, metadata={\"help\": \"whether to group by length\"})\n+ packing: Optional[bool] = field(default=True, metadata={\"help\": \"whether to use packing for SFTTrainer\"})\n \n lora_alpha: Optional[float] = field(default=16, metadata={\"help\": \"the lora alpha parameter\"})\n lora_dropout: Optional[float] = field(default=0.05, metadata={\"help\": \"the lora dropout parameter\"})\n@@ -54,6 +55,9 @@\n parser = HfArgumentParser(ScriptArguments)\n script_args = parser.parse_args_into_dataclasses()[0]\n \n+if script_args.group_by_length and script_args.packing:\n+ raise ValueError(\"Cannot use both packing and group by length\")\n+\n \n def chars_token_ratio(dataset, tokenizer, nb_examples=400):\n \"\"\"\n@@ -189,7 +193,7 @@\n train_dataset=train_dataset,\n eval_dataset=eval_dataset,\n peft_config=peft_config,\n- packing=True,\n+ packing=script_args.packing,\n max_seq_length=None,\n tokenizer=tokenizer,\n args=training_args,\n", "issue": "sft_llama2 error\nHello! \r\n\r\nI made no changes to the sample code or the dataset. I just simply wanted to get it to run the stacked llama2 example. However, I get this error:\r\n\r\nUserWarning: The passed formatting_func has more than one argument. Usually that function should have a single argument `example` which corresponds to the dictionary returned by each element of the dataset. Make sure you know what you are doing.\r\n\r\n in __init__\r\n raise ValueError(\"the `--group_by_length` option is only available for `Dataset`, not `IterableDataset\")\r\nValueError: the `--group_by_length` option is only available for `Dataset`, not `IterableDataset\r\n\r\n(Sorry, I must omit certain parts of the error message since I am working on an institution's server). \r\n\r\nThank you!\n", "before_files": [{"content": "# Fine-Tune Llama2-7b on SE paired dataset\nimport os\nfrom dataclasses import dataclass, field\nfrom typing import Optional\n\nimport torch\nfrom datasets import load_dataset\nfrom peft import AutoPeftModelForCausalLM, LoraConfig\nfrom tqdm import tqdm\nfrom transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig, HfArgumentParser, TrainingArguments\n\nfrom trl import SFTTrainer\nfrom trl.trainer import ConstantLengthDataset\n\n\n@dataclass\nclass ScriptArguments:\n model_name: Optional[str] = field(default=\"meta-llama/Llama-2-7b-hf\", metadata={\"help\": \"the model name\"})\n log_with: Optional[str] = field(default=\"wandb\", metadata={\"help\": \"use 'wandb' to log with wandb\"})\n\n dataset_name: Optional[str] = field(default=\"lvwerra/stack-exchange-paired\", metadata={\"help\": \"the dataset name\"})\n subset: Optional[str] = field(default=\"data/finetune\", metadata={\"help\": \"the subset to use\"})\n split: Optional[str] = field(default=\"train\", metadata={\"help\": \"the split to use\"})\n size_valid_set: Optional[int] = field(default=4000, metadata={\"help\": \"the size of the validation set\"})\n streaming: Optional[bool] = field(default=True, metadata={\"help\": \"whether to stream the dataset\"})\n shuffle_buffer: Optional[int] = field(default=5000, metadata={\"help\": \"the shuffle buffer size\"})\n seq_length: Optional[int] = field(default=1024, metadata={\"help\": \"the sequence length\"})\n\n max_steps: Optional[int] = field(default=500, metadata={\"help\": \"the maximum number of sgd steps\"})\n logging_steps: Optional[int] = field(default=10, metadata={\"help\": \"the logging frequency\"})\n save_steps: Optional[int] = field(default=10, metadata={\"help\": \"the saving frequency\"})\n per_device_train_batch_size: Optional[int] = field(default=4, metadata={\"help\": \"the per device train batch size\"})\n per_device_eval_batch_size: Optional[int] = field(default=1, metadata={\"help\": \"the per device eval batch size\"})\n gradient_accumulation_steps: Optional[int] = field(default=2, metadata={\"help\": \"the gradient accumulation steps\"})\n gradient_checkpointing: Optional[bool] = field(\n default=True, metadata={\"help\": \"whether to use gradient checkpointing\"}\n )\n group_by_length: Optional[bool] = field(default=True, metadata={\"help\": \"whether to group by length\"})\n\n lora_alpha: Optional[float] = field(default=16, metadata={\"help\": \"the lora alpha parameter\"})\n lora_dropout: Optional[float] = field(default=0.05, metadata={\"help\": \"the lora dropout parameter\"})\n lora_r: Optional[int] = field(default=8, metadata={\"help\": \"the lora r parameter\"})\n\n learning_rate: Optional[float] = field(default=1e-4, metadata={\"help\": \"the learning rate\"})\n lr_scheduler_type: Optional[str] = field(default=\"cosine\", metadata={\"help\": \"the lr scheduler type\"})\n num_warmup_steps: Optional[int] = field(default=100, metadata={\"help\": \"the number of warmup steps\"})\n weight_decay: Optional[float] = field(default=0.05, metadata={\"help\": \"the weight decay\"})\n optimizer_type: Optional[str] = field(default=\"paged_adamw_32bit\", metadata={\"help\": \"the optimizer type\"})\n\n output_dir: Optional[str] = field(default=\"./results\", metadata={\"help\": \"the output directory\"})\n log_freq: Optional[int] = field(default=1, metadata={\"help\": \"the logging frequency\"})\n\n\nparser = HfArgumentParser(ScriptArguments)\nscript_args = parser.parse_args_into_dataclasses()[0]\n\n\ndef chars_token_ratio(dataset, tokenizer, nb_examples=400):\n \"\"\"\n Estimate the average number of characters per token in the dataset.\n \"\"\"\n total_characters, total_tokens = 0, 0\n for _, example in tqdm(zip(range(nb_examples), iter(dataset)), total=nb_examples):\n text = prepare_sample_text(example)\n total_characters += len(text)\n if tokenizer.is_fast:\n total_tokens += len(tokenizer(text).tokens())\n else:\n total_tokens += len(tokenizer.tokenize(text))\n\n return total_characters / total_tokens\n\n\ndef print_trainable_parameters(model):\n \"\"\"\n Prints the number of trainable parameters in the model.\n \"\"\"\n trainable_params = 0\n all_param = 0\n for _, param in model.named_parameters():\n all_param += param.numel()\n if param.requires_grad:\n trainable_params += param.numel()\n print(\n f\"trainable params: {trainable_params} || all params: {all_param} || trainable%: {100 * trainable_params / all_param}\"\n )\n\n\ndef prepare_sample_text(example):\n \"\"\"Prepare the text from a sample of the dataset.\"\"\"\n text = f\"Question: {example['question']}\\n\\nAnswer: {example['response_j']}\"\n return text\n\n\ndef create_datasets(tokenizer, args):\n dataset = load_dataset(\n args.dataset_name,\n data_dir=args.subset,\n split=args.split,\n use_auth_token=True,\n num_proc=args.num_workers if not args.streaming else None,\n streaming=args.streaming,\n )\n if args.streaming:\n print(\"Loading the dataset in streaming mode\")\n valid_data = dataset.take(args.size_valid_set)\n train_data = dataset.skip(args.size_valid_set)\n train_data = train_data.shuffle(buffer_size=args.shuffle_buffer, seed=None)\n else:\n dataset = dataset.train_test_split(test_size=0.005, seed=None)\n train_data = dataset[\"train\"]\n valid_data = dataset[\"test\"]\n print(f\"Size of the train set: {len(train_data)}. Size of the validation set: {len(valid_data)}\")\n\n chars_per_token = chars_token_ratio(train_data, tokenizer)\n print(f\"The character to token ratio of the dataset is: {chars_per_token:.2f}\")\n\n train_dataset = ConstantLengthDataset(\n tokenizer,\n train_data,\n formatting_func=prepare_sample_text,\n infinite=True,\n seq_length=args.seq_length,\n chars_per_token=chars_per_token,\n )\n valid_dataset = ConstantLengthDataset(\n tokenizer,\n valid_data,\n formatting_func=prepare_sample_text,\n infinite=False,\n seq_length=args.seq_length,\n chars_per_token=chars_per_token,\n )\n return train_dataset, valid_dataset\n\n\nbnb_config = BitsAndBytesConfig(\n load_in_4bit=True,\n bnb_4bit_quant_type=\"nf4\",\n bnb_4bit_compute_dtype=torch.bfloat16,\n)\n\nbase_model = AutoModelForCausalLM.from_pretrained(\n script_args.model_name,\n quantization_config=bnb_config,\n device_map={\"\": 0},\n trust_remote_code=True,\n use_auth_token=True,\n)\nbase_model.config.use_cache = False\n\npeft_config = LoraConfig(\n r=script_args.lora_r,\n lora_alpha=script_args.lora_alpha,\n lora_dropout=script_args.lora_dropout,\n target_modules=[\"q_proj\", \"v_proj\"],\n bias=\"none\",\n task_type=\"CAUSAL_LM\",\n)\n\ntokenizer = AutoTokenizer.from_pretrained(script_args.model_name, trust_remote_code=True)\ntokenizer.pad_token = tokenizer.eos_token\ntokenizer.padding_side = \"right\" # Fix weird overflow issue with fp16 training\n\n\ntraining_args = TrainingArguments(\n output_dir=script_args.output_dir,\n per_device_train_batch_size=script_args.per_device_train_batch_size,\n gradient_accumulation_steps=script_args.gradient_accumulation_steps,\n per_device_eval_batch_size=script_args.per_device_eval_batch_size,\n learning_rate=script_args.learning_rate,\n logging_steps=script_args.logging_steps,\n max_steps=script_args.max_steps,\n report_to=script_args.log_with,\n save_steps=script_args.save_steps,\n group_by_length=script_args.group_by_length,\n lr_scheduler_type=script_args.lr_scheduler_type,\n warmup_steps=script_args.num_warmup_steps,\n optim=script_args.optimizer_type,\n bf16=True,\n remove_unused_columns=False,\n run_name=\"sft_llama2\",\n)\n\ntrain_dataset, eval_dataset = create_datasets(tokenizer, script_args)\n\ntrainer = SFTTrainer(\n model=base_model,\n train_dataset=train_dataset,\n eval_dataset=eval_dataset,\n peft_config=peft_config,\n packing=True,\n max_seq_length=None,\n tokenizer=tokenizer,\n args=training_args,\n)\ntrainer.train()\ntrainer.save_model(script_args.output_dir)\n\noutput_dir = os.path.join(script_args.output_dir, \"final_checkpoint\")\ntrainer.model.save_pretrained(output_dir)\n\n# Free memory for merging weights\ndel base_model\ntorch.cuda.empty_cache()\n\nmodel = AutoPeftModelForCausalLM.from_pretrained(output_dir, device_map=\"auto\", torch_dtype=torch.bfloat16)\nmodel = model.merge_and_unload()\n\noutput_merged_dir = os.path.join(script_args.output_dir, \"final_merged_checkpoint\")\nmodel.save_pretrained(output_merged_dir, safe_serialization=True)\n", "path": "examples/research_projects/stack_llama_2/scripts/sft_llama2.py"}], "after_files": [{"content": "# Fine-Tune Llama2-7b on SE paired dataset\nimport os\nfrom dataclasses import dataclass, field\nfrom typing import Optional\n\nimport torch\nfrom datasets import load_dataset\nfrom peft import AutoPeftModelForCausalLM, LoraConfig\nfrom tqdm import tqdm\nfrom transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig, HfArgumentParser, TrainingArguments\n\nfrom trl import SFTTrainer\nfrom trl.trainer import ConstantLengthDataset\n\n\n@dataclass\nclass ScriptArguments:\n model_name: Optional[str] = field(default=\"meta-llama/Llama-2-7b-hf\", metadata={\"help\": \"the model name\"})\n log_with: Optional[str] = field(default=\"wandb\", metadata={\"help\": \"use 'wandb' to log with wandb\"})\n\n dataset_name: Optional[str] = field(default=\"lvwerra/stack-exchange-paired\", metadata={\"help\": \"the dataset name\"})\n subset: Optional[str] = field(default=\"data/finetune\", metadata={\"help\": \"the subset to use\"})\n split: Optional[str] = field(default=\"train\", metadata={\"help\": \"the split to use\"})\n size_valid_set: Optional[int] = field(default=4000, metadata={\"help\": \"the size of the validation set\"})\n streaming: Optional[bool] = field(default=True, metadata={\"help\": \"whether to stream the dataset\"})\n shuffle_buffer: Optional[int] = field(default=5000, metadata={\"help\": \"the shuffle buffer size\"})\n seq_length: Optional[int] = field(default=1024, metadata={\"help\": \"the sequence length\"})\n\n max_steps: Optional[int] = field(default=500, metadata={\"help\": \"the maximum number of sgd steps\"})\n logging_steps: Optional[int] = field(default=10, metadata={\"help\": \"the logging frequency\"})\n save_steps: Optional[int] = field(default=10, metadata={\"help\": \"the saving frequency\"})\n per_device_train_batch_size: Optional[int] = field(default=4, metadata={\"help\": \"the per device train batch size\"})\n per_device_eval_batch_size: Optional[int] = field(default=1, metadata={\"help\": \"the per device eval batch size\"})\n gradient_accumulation_steps: Optional[int] = field(default=2, metadata={\"help\": \"the gradient accumulation steps\"})\n gradient_checkpointing: Optional[bool] = field(\n default=True, metadata={\"help\": \"whether to use gradient checkpointing\"}\n )\n group_by_length: Optional[bool] = field(default=False, metadata={\"help\": \"whether to group by length\"})\n packing: Optional[bool] = field(default=True, metadata={\"help\": \"whether to use packing for SFTTrainer\"})\n\n lora_alpha: Optional[float] = field(default=16, metadata={\"help\": \"the lora alpha parameter\"})\n lora_dropout: Optional[float] = field(default=0.05, metadata={\"help\": \"the lora dropout parameter\"})\n lora_r: Optional[int] = field(default=8, metadata={\"help\": \"the lora r parameter\"})\n\n learning_rate: Optional[float] = field(default=1e-4, metadata={\"help\": \"the learning rate\"})\n lr_scheduler_type: Optional[str] = field(default=\"cosine\", metadata={\"help\": \"the lr scheduler type\"})\n num_warmup_steps: Optional[int] = field(default=100, metadata={\"help\": \"the number of warmup steps\"})\n weight_decay: Optional[float] = field(default=0.05, metadata={\"help\": \"the weight decay\"})\n optimizer_type: Optional[str] = field(default=\"paged_adamw_32bit\", metadata={\"help\": \"the optimizer type\"})\n\n output_dir: Optional[str] = field(default=\"./results\", metadata={\"help\": \"the output directory\"})\n log_freq: Optional[int] = field(default=1, metadata={\"help\": \"the logging frequency\"})\n\n\nparser = HfArgumentParser(ScriptArguments)\nscript_args = parser.parse_args_into_dataclasses()[0]\n\nif script_args.group_by_length and script_args.packing:\n raise ValueError(\"Cannot use both packing and group by length\")\n\n\ndef chars_token_ratio(dataset, tokenizer, nb_examples=400):\n \"\"\"\n Estimate the average number of characters per token in the dataset.\n \"\"\"\n total_characters, total_tokens = 0, 0\n for _, example in tqdm(zip(range(nb_examples), iter(dataset)), total=nb_examples):\n text = prepare_sample_text(example)\n total_characters += len(text)\n if tokenizer.is_fast:\n total_tokens += len(tokenizer(text).tokens())\n else:\n total_tokens += len(tokenizer.tokenize(text))\n\n return total_characters / total_tokens\n\n\ndef print_trainable_parameters(model):\n \"\"\"\n Prints the number of trainable parameters in the model.\n \"\"\"\n trainable_params = 0\n all_param = 0\n for _, param in model.named_parameters():\n all_param += param.numel()\n if param.requires_grad:\n trainable_params += param.numel()\n print(\n f\"trainable params: {trainable_params} || all params: {all_param} || trainable%: {100 * trainable_params / all_param}\"\n )\n\n\ndef prepare_sample_text(example):\n \"\"\"Prepare the text from a sample of the dataset.\"\"\"\n text = f\"Question: {example['question']}\\n\\nAnswer: {example['response_j']}\"\n return text\n\n\ndef create_datasets(tokenizer, args):\n dataset = load_dataset(\n args.dataset_name,\n data_dir=args.subset,\n split=args.split,\n use_auth_token=True,\n num_proc=args.num_workers if not args.streaming else None,\n streaming=args.streaming,\n )\n if args.streaming:\n print(\"Loading the dataset in streaming mode\")\n valid_data = dataset.take(args.size_valid_set)\n train_data = dataset.skip(args.size_valid_set)\n train_data = train_data.shuffle(buffer_size=args.shuffle_buffer, seed=None)\n else:\n dataset = dataset.train_test_split(test_size=0.005, seed=None)\n train_data = dataset[\"train\"]\n valid_data = dataset[\"test\"]\n print(f\"Size of the train set: {len(train_data)}. Size of the validation set: {len(valid_data)}\")\n\n chars_per_token = chars_token_ratio(train_data, tokenizer)\n print(f\"The character to token ratio of the dataset is: {chars_per_token:.2f}\")\n\n train_dataset = ConstantLengthDataset(\n tokenizer,\n train_data,\n formatting_func=prepare_sample_text,\n infinite=True,\n seq_length=args.seq_length,\n chars_per_token=chars_per_token,\n )\n valid_dataset = ConstantLengthDataset(\n tokenizer,\n valid_data,\n formatting_func=prepare_sample_text,\n infinite=False,\n seq_length=args.seq_length,\n chars_per_token=chars_per_token,\n )\n return train_dataset, valid_dataset\n\n\nbnb_config = BitsAndBytesConfig(\n load_in_4bit=True,\n bnb_4bit_quant_type=\"nf4\",\n bnb_4bit_compute_dtype=torch.bfloat16,\n)\n\nbase_model = AutoModelForCausalLM.from_pretrained(\n script_args.model_name,\n quantization_config=bnb_config,\n device_map={\"\": 0},\n trust_remote_code=True,\n use_auth_token=True,\n)\nbase_model.config.use_cache = False\n\npeft_config = LoraConfig(\n r=script_args.lora_r,\n lora_alpha=script_args.lora_alpha,\n lora_dropout=script_args.lora_dropout,\n target_modules=[\"q_proj\", \"v_proj\"],\n bias=\"none\",\n task_type=\"CAUSAL_LM\",\n)\n\ntokenizer = AutoTokenizer.from_pretrained(script_args.model_name, trust_remote_code=True)\ntokenizer.pad_token = tokenizer.eos_token\ntokenizer.padding_side = \"right\" # Fix weird overflow issue with fp16 training\n\n\ntraining_args = TrainingArguments(\n output_dir=script_args.output_dir,\n per_device_train_batch_size=script_args.per_device_train_batch_size,\n gradient_accumulation_steps=script_args.gradient_accumulation_steps,\n per_device_eval_batch_size=script_args.per_device_eval_batch_size,\n learning_rate=script_args.learning_rate,\n logging_steps=script_args.logging_steps,\n max_steps=script_args.max_steps,\n report_to=script_args.log_with,\n save_steps=script_args.save_steps,\n group_by_length=script_args.group_by_length,\n lr_scheduler_type=script_args.lr_scheduler_type,\n warmup_steps=script_args.num_warmup_steps,\n optim=script_args.optimizer_type,\n bf16=True,\n remove_unused_columns=False,\n run_name=\"sft_llama2\",\n)\n\ntrain_dataset, eval_dataset = create_datasets(tokenizer, script_args)\n\ntrainer = SFTTrainer(\n model=base_model,\n train_dataset=train_dataset,\n eval_dataset=eval_dataset,\n peft_config=peft_config,\n packing=script_args.packing,\n max_seq_length=None,\n tokenizer=tokenizer,\n args=training_args,\n)\ntrainer.train()\ntrainer.save_model(script_args.output_dir)\n\noutput_dir = os.path.join(script_args.output_dir, \"final_checkpoint\")\ntrainer.model.save_pretrained(output_dir)\n\n# Free memory for merging weights\ndel base_model\ntorch.cuda.empty_cache()\n\nmodel = AutoPeftModelForCausalLM.from_pretrained(output_dir, device_map=\"auto\", torch_dtype=torch.bfloat16)\nmodel = model.merge_and_unload()\n\noutput_merged_dir = os.path.join(script_args.output_dir, \"final_merged_checkpoint\")\nmodel.save_pretrained(output_merged_dir, safe_serialization=True)\n", "path": "examples/research_projects/stack_llama_2/scripts/sft_llama2.py"}]} | 2,996 | 405 |
gh_patches_debug_56595 | rasdani/github-patches | git_diff | RedHatInsights__insights-core-3045 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
The virsh_list_all parser is raising ValueError exceptions in production
The VirshListAll parser is throwing a large number of the exception ValueError("Line containing 'Id,Name,State' was not found in table",) in production.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `insights/parsers/virsh_list_all.py`
Content:
```
1 """VirshListAll - command ``virsh --readonly list --all``
2 =========================================================
3
4 This module provides VM status using output of command ``virsh --readonly list --all``.
5 """
6 from collections import namedtuple
7
8 from insights.specs import Specs
9 from .. import CommandParser, parser
10 from . import parse_fixed_table, keyword_search
11
12
13 @parser(Specs.virsh_list_all)
14 class VirshListAll(CommandParser):
15 """Parsing output of ``virsh --readonly list --all``.
16
17 Typical output of ``virsh --readonly list --all`` command is::
18
19 Id Name State
20 ----------------------------------------------------
21 2 rhel7.4 running
22 4 rhel7.0 paused
23 - centos6.8-router shut off
24 - cfme-5.7.13 shut off
25 - cfme-rhos-5.9.0.15 shut off
26 - fedora-24-kernel shut off
27 - fedora-saio_fedoraSaio shut off
28 - fedora24-misc shut off
29 - freebsd11.0 shut off
30 - guixSD shut off
31 - miq-gap-1 shut off
32 - rhel7.2 shut off
33 - RHOSP10 shut off
34
35
36 Examples:
37
38 >>> len(output.search(state='shut off')) == 11
39 True
40 >>> len(output.search(id=None)) == 11
41 True
42 >>> len(output.search(id=2)) == 1
43 True
44 >>> output.search(name='rhel7.4') == [{'state': 'running', 'id': 2, 'name': 'rhel7.4'}]
45 True
46 >>> output.get_vm_state('rhel7.0') == 'paused'
47 True
48 >>> output.get_vm_state('rhel9.0') is None
49 True
50 >>> 'cfme' in output
51 False
52 >>> 'cfme-5.7.13' in output
53 True
54
55 Attributes:
56 fields (list): List of ``KeyValue`` namedtupules for each line
57 in the command.
58
59 cols (list): List id key value pair derived from the command.
60
61 keywords (list): keywords present in the command, each
62 keyword is converted to lowercase.
63
64 """
65 keyvalue = namedtuple('KeyValue',
66 ['name', 'state', 'id', 'name_lower'])
67 """namedtuple: Represent name value pair as a namedtuple with case."""
68 def _cleanup(self):
69 for col in self.cols:
70 if col['id'] == '-':
71 col['id'] = None
72 else:
73 col['id'] = (lambda x: int(x) if x.isdigit() else x)(col['id'])
74
75 def parse_content(self, content):
76 self.fields = []
77 self.cols = []
78 self.keywords = []
79 if not content:
80 return
81
82 self.cols = parse_fixed_table(content,
83 heading_ignore=['Id', 'Name', 'State'],
84 header_substitute=[('Id', 'id'), ('Name', 'name'), ('State', 'state')])[1:] # noqa
85 self._cleanup()
86
87 for item in self.cols:
88 self.fields.append(self.keyvalue(item['name'], item['state'], item['id'], item['name'].lower())) # noqa
89 self.keywords = [name.name_lower for name in self.fields]
90
91 def __contains__(self, keyword):
92 return keyword.lower() in self.keywords
93
94 def __iter__(self):
95 return iter(self.fields)
96
97 def search(self, **kw):
98 '''Search item based on key value pair.
99
100 Example:
101
102 >>> len(output.search(state='shut off')) == 11
103 True
104 >>> len(output.search(id=None)) == 11
105 True
106 >>> len(output.search(id=2)) == 1
107 True
108 '''
109 return keyword_search(self.cols, **kw)
110
111 def get_vm_state(self, vmname):
112 '''Get VM state associated with vmname
113
114 Typical output is ``virsh --readonly list --all`` command::
115
116 Id Name State
117 ----------------------------------------------------
118 2 rhel7.4 running
119 4 rhel7.0 paused
120
121
122 Example:
123
124 >>> output.get_vm_state('rhel7.0')
125 'paused'
126
127 Args:
128
129 vmname (str): A key. For ex. ``rhel7.0``.
130
131 Returns:
132
133 str: State of VM. Returns None if, ``vmname`` does not exist.
134 '''
135 if vmname.lower() in self.keywords:
136 return self.search(name=vmname)[0]['state']
137 return None
138
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/insights/parsers/virsh_list_all.py b/insights/parsers/virsh_list_all.py
--- a/insights/parsers/virsh_list_all.py
+++ b/insights/parsers/virsh_list_all.py
@@ -76,6 +76,10 @@
self.fields = []
self.cols = []
self.keywords = []
+ # Check and remove any error message, or empty lines. This to
+ # prevent any ValueError exceptions when parse_fixed_table is
+ # called below.
+ content = [l for l in content if not l.startswith("error: ") and l != ""]
if not content:
return
| {"golden_diff": "diff --git a/insights/parsers/virsh_list_all.py b/insights/parsers/virsh_list_all.py\n--- a/insights/parsers/virsh_list_all.py\n+++ b/insights/parsers/virsh_list_all.py\n@@ -76,6 +76,10 @@\n self.fields = []\n self.cols = []\n self.keywords = []\n+ # Check and remove any error message, or empty lines. This to\n+ # prevent any ValueError exceptions when parse_fixed_table is\n+ # called below.\n+ content = [l for l in content if not l.startswith(\"error: \") and l != \"\"]\n if not content:\n return\n", "issue": "The virsh_list_all parser is raising ValueError exceptions in production\nThe VirshListAll parser is throwing a large number of the exception ValueError(\"Line containing 'Id,Name,State' was not found in table\",) in production.\n", "before_files": [{"content": "\"\"\"VirshListAll - command ``virsh --readonly list --all``\n=========================================================\n\nThis module provides VM status using output of command ``virsh --readonly list --all``.\n\"\"\"\nfrom collections import namedtuple\n\nfrom insights.specs import Specs\nfrom .. import CommandParser, parser\nfrom . import parse_fixed_table, keyword_search\n\n\n@parser(Specs.virsh_list_all)\nclass VirshListAll(CommandParser):\n \"\"\"Parsing output of ``virsh --readonly list --all``.\n\n Typical output of ``virsh --readonly list --all`` command is::\n\n Id Name State\n ----------------------------------------------------\n 2 rhel7.4 running\n 4 rhel7.0 paused\n - centos6.8-router shut off\n - cfme-5.7.13 shut off\n - cfme-rhos-5.9.0.15 shut off\n - fedora-24-kernel shut off\n - fedora-saio_fedoraSaio shut off\n - fedora24-misc shut off\n - freebsd11.0 shut off\n - guixSD shut off\n - miq-gap-1 shut off\n - rhel7.2 shut off\n - RHOSP10 shut off\n\n\n Examples:\n\n >>> len(output.search(state='shut off')) == 11\n True\n >>> len(output.search(id=None)) == 11\n True\n >>> len(output.search(id=2)) == 1\n True\n >>> output.search(name='rhel7.4') == [{'state': 'running', 'id': 2, 'name': 'rhel7.4'}]\n True\n >>> output.get_vm_state('rhel7.0') == 'paused'\n True\n >>> output.get_vm_state('rhel9.0') is None\n True\n >>> 'cfme' in output\n False\n >>> 'cfme-5.7.13' in output\n True\n\n Attributes:\n fields (list): List of ``KeyValue`` namedtupules for each line\n in the command.\n\n cols (list): List id key value pair derived from the command.\n\n keywords (list): keywords present in the command, each\n keyword is converted to lowercase.\n\n \"\"\"\n keyvalue = namedtuple('KeyValue',\n ['name', 'state', 'id', 'name_lower'])\n \"\"\"namedtuple: Represent name value pair as a namedtuple with case.\"\"\"\n def _cleanup(self):\n for col in self.cols:\n if col['id'] == '-':\n col['id'] = None\n else:\n col['id'] = (lambda x: int(x) if x.isdigit() else x)(col['id'])\n\n def parse_content(self, content):\n self.fields = []\n self.cols = []\n self.keywords = []\n if not content:\n return\n\n self.cols = parse_fixed_table(content,\n heading_ignore=['Id', 'Name', 'State'],\n header_substitute=[('Id', 'id'), ('Name', 'name'), ('State', 'state')])[1:] # noqa\n self._cleanup()\n\n for item in self.cols:\n self.fields.append(self.keyvalue(item['name'], item['state'], item['id'], item['name'].lower())) # noqa\n self.keywords = [name.name_lower for name in self.fields]\n\n def __contains__(self, keyword):\n return keyword.lower() in self.keywords\n\n def __iter__(self):\n return iter(self.fields)\n\n def search(self, **kw):\n '''Search item based on key value pair.\n\n Example:\n\n >>> len(output.search(state='shut off')) == 11\n True\n >>> len(output.search(id=None)) == 11\n True\n >>> len(output.search(id=2)) == 1\n True\n '''\n return keyword_search(self.cols, **kw)\n\n def get_vm_state(self, vmname):\n '''Get VM state associated with vmname\n\n Typical output is ``virsh --readonly list --all`` command::\n\n Id Name State\n ----------------------------------------------------\n 2 rhel7.4 running\n 4 rhel7.0 paused\n\n\n Example:\n\n >>> output.get_vm_state('rhel7.0')\n 'paused'\n\n Args:\n\n vmname (str): A key. For ex. ``rhel7.0``.\n\n Returns:\n\n str: State of VM. Returns None if, ``vmname`` does not exist.\n '''\n if vmname.lower() in self.keywords:\n return self.search(name=vmname)[0]['state']\n return None\n", "path": "insights/parsers/virsh_list_all.py"}], "after_files": [{"content": "\"\"\"VirshListAll - command ``virsh --readonly list --all``\n=========================================================\n\nThis module provides VM status using output of command ``virsh --readonly list --all``.\n\"\"\"\nfrom collections import namedtuple\n\nfrom insights.specs import Specs\nfrom .. import CommandParser, parser\nfrom . import parse_fixed_table, keyword_search\n\n\n@parser(Specs.virsh_list_all)\nclass VirshListAll(CommandParser):\n \"\"\"Parsing output of ``virsh --readonly list --all``.\n\n Typical output of ``virsh --readonly list --all`` command is::\n\n Id Name State\n ----------------------------------------------------\n 2 rhel7.4 running\n 4 rhel7.0 paused\n - centos6.8-router shut off\n - cfme-5.7.13 shut off\n - cfme-rhos-5.9.0.15 shut off\n - fedora-24-kernel shut off\n - fedora-saio_fedoraSaio shut off\n - fedora24-misc shut off\n - freebsd11.0 shut off\n - guixSD shut off\n - miq-gap-1 shut off\n - rhel7.2 shut off\n - RHOSP10 shut off\n\n\n Examples:\n\n >>> len(output.search(state='shut off')) == 11\n True\n >>> len(output.search(id=None)) == 11\n True\n >>> len(output.search(id=2)) == 1\n True\n >>> output.search(name='rhel7.4') == [{'state': 'running', 'id': 2, 'name': 'rhel7.4'}]\n True\n >>> output.get_vm_state('rhel7.0') == 'paused'\n True\n >>> output.get_vm_state('rhel9.0') is None\n True\n >>> 'cfme' in output\n False\n >>> 'cfme-5.7.13' in output\n True\n\n Attributes:\n fields (list): List of ``KeyValue`` namedtupules for each line\n in the command.\n\n cols (list): List id key value pair derived from the command.\n\n keywords (list): keywords present in the command, each\n keyword is converted to lowercase.\n\n \"\"\"\n keyvalue = namedtuple('KeyValue',\n ['name', 'state', 'id', 'name_lower'])\n \"\"\"namedtuple: Represent name value pair as a namedtuple with case.\"\"\"\n def _cleanup(self):\n for col in self.cols:\n if col['id'] == '-':\n col['id'] = None\n else:\n col['id'] = (lambda x: int(x) if x.isdigit() else x)(col['id'])\n\n def parse_content(self, content):\n self.fields = []\n self.cols = []\n self.keywords = []\n # Check and remove any error message, or empty lines. This to\n # prevent any ValueError exceptions when parse_fixed_table is\n # called below.\n content = [l for l in content if not l.startswith(\"error: \") and l != \"\"]\n if not content:\n return\n\n self.cols = parse_fixed_table(content,\n heading_ignore=['Id', 'Name', 'State'],\n header_substitute=[('Id', 'id'), ('Name', 'name'), ('State', 'state')])[1:] # noqa\n self._cleanup()\n\n for item in self.cols:\n self.fields.append(self.keyvalue(item['name'], item['state'], item['id'], item['name'].lower())) # noqa\n self.keywords = [name.name_lower for name in self.fields]\n\n def __contains__(self, keyword):\n return keyword.lower() in self.keywords\n\n def __iter__(self):\n return iter(self.fields)\n\n def search(self, **kw):\n '''Search item based on key value pair.\n\n Example:\n\n >>> len(output.search(state='shut off')) == 11\n True\n >>> len(output.search(id=None)) == 11\n True\n >>> len(output.search(id=2)) == 1\n True\n '''\n return keyword_search(self.cols, **kw)\n\n def get_vm_state(self, vmname):\n '''Get VM state associated with vmname\n\n Typical output is ``virsh --readonly list --all`` command::\n\n Id Name State\n ----------------------------------------------------\n 2 rhel7.4 running\n 4 rhel7.0 paused\n\n\n Example:\n\n >>> output.get_vm_state('rhel7.0')\n 'paused'\n\n Args:\n\n vmname (str): A key. For ex. ``rhel7.0``.\n\n Returns:\n\n str: State of VM. Returns None if, ``vmname`` does not exist.\n '''\n if vmname.lower() in self.keywords:\n return self.search(name=vmname)[0]['state']\n return None\n", "path": "insights/parsers/virsh_list_all.py"}]} | 1,687 | 151 |
gh_patches_debug_15068 | rasdani/github-patches | git_diff | GeotrekCE__Geotrek-admin-1708 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Installation issue
Hello,
I'm trying to install Geotrek following the documentation, and I have some problems.
At the very beginnig, when I run the install.sh, the script can't find the `etc/setting.ini` file. I checked, and I have no `'etc'` folder at all... So the install aborted.
I tried to create myself this folder and the `settings.ini` file with the variable expected (dbhost, dbname etc...). It works (the database is installed), but the install crash few step later when it try to install the python environnement. `Could not setup python environment !`
Did I miss something in the installation documentation ?
How can I fix this problem ?
Thanks for your help
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `geotrek/zoning/factories.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 import factory
3
4 from django.conf import settings
5 from django.contrib.gis.geos import Polygon, MultiPolygon
6
7 from mapentity.helpers import bbox_split_srid_2154
8
9 from geotrek.core.factories import TopologyFactory
10
11 from . import models
12
13
14 # Create 16 cities and 4 districts distinct same-area zone covering the spatial_extent and cycle on it
15 geom_city_iter = bbox_split_srid_2154(settings.SPATIAL_EXTENT, by_x=4, by_y=4, cycle=True)
16 geom_district_iter = bbox_split_srid_2154(settings.SPATIAL_EXTENT, by_x=2, by_y=2, cycle=True)
17 geom_area_iter = bbox_split_srid_2154(settings.SPATIAL_EXTENT, by_x=2, by_y=2, cycle=True)
18
19
20 class CityFactory(factory.DjangoModelFactory):
21 class Meta:
22 model = models.City
23
24 code = factory.Sequence(lambda n: u"#%s" % n) # id (!) with max_length=6
25 name = factory.Sequence(lambda n: u"City name %s" % n)
26 geom = factory.Sequence(lambda _: MultiPolygon(Polygon.from_bbox(geom_city_iter.next()), srid=settings.SRID))
27
28
29 class DistrictFactory(factory.DjangoModelFactory):
30 class Meta:
31 model = models.District
32
33 name = factory.Sequence(lambda n: u"District name %s" % n)
34 geom = factory.Sequence(lambda _: MultiPolygon(Polygon.from_bbox(geom_district_iter.next()), srid=settings.SRID))
35
36
37 class RestrictedAreaTypeFactory(factory.DjangoModelFactory):
38
39 class Meta:
40 model = models.RestrictedAreaType
41
42 name = factory.Sequence(lambda n: u"Restricted name %s" % n)
43
44
45 class RestrictedAreaFactory(factory.DjangoModelFactory):
46 class Meta:
47 model = models.RestrictedArea
48
49 name = factory.Sequence(lambda n: u"Restricted area name %s" % n)
50 geom = factory.Sequence(lambda _: MultiPolygon(Polygon.from_bbox(geom_area_iter.next()), srid=settings.SRID))
51 area_type = factory.SubFactory(RestrictedAreaTypeFactory)
52
53
54 class RestrictedAreaEdgeFactory(TopologyFactory):
55
56 class Meta:
57 model = models.RestrictedAreaEdge
58
59 restricted_area = factory.SubFactory(RestrictedAreaFactory)
60
61
62 class CityEdgeFactory(TopologyFactory):
63
64 class Meta:
65 model = models.CityEdge
66
67 city = factory.SubFactory(CityFactory)
68
69
70 class DistrictEdgeFactory(TopologyFactory):
71
72 class Meta:
73 model = models.DistrictEdge
74
75 district = factory.SubFactory(DistrictFactory)
76
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/geotrek/zoning/factories.py b/geotrek/zoning/factories.py
--- a/geotrek/zoning/factories.py
+++ b/geotrek/zoning/factories.py
@@ -11,10 +11,13 @@
from . import models
+# Don't intersect with geom from PathFactory
+SPATIAL_EXTENT = (200000, 300000, 1100000, 1200000)
+
# Create 16 cities and 4 districts distinct same-area zone covering the spatial_extent and cycle on it
-geom_city_iter = bbox_split_srid_2154(settings.SPATIAL_EXTENT, by_x=4, by_y=4, cycle=True)
-geom_district_iter = bbox_split_srid_2154(settings.SPATIAL_EXTENT, by_x=2, by_y=2, cycle=True)
-geom_area_iter = bbox_split_srid_2154(settings.SPATIAL_EXTENT, by_x=2, by_y=2, cycle=True)
+geom_city_iter = bbox_split_srid_2154(SPATIAL_EXTENT, by_x=4, by_y=4, cycle=True)
+geom_district_iter = bbox_split_srid_2154(SPATIAL_EXTENT, by_x=2, by_y=2, cycle=True)
+geom_area_iter = bbox_split_srid_2154(SPATIAL_EXTENT, by_x=2, by_y=2, cycle=True)
class CityFactory(factory.DjangoModelFactory):
| {"golden_diff": "diff --git a/geotrek/zoning/factories.py b/geotrek/zoning/factories.py\n--- a/geotrek/zoning/factories.py\n+++ b/geotrek/zoning/factories.py\n@@ -11,10 +11,13 @@\n from . import models\n \n \n+# Don't intersect with geom from PathFactory\n+SPATIAL_EXTENT = (200000, 300000, 1100000, 1200000)\n+\n # Create 16 cities and 4 districts distinct same-area zone covering the spatial_extent and cycle on it\n-geom_city_iter = bbox_split_srid_2154(settings.SPATIAL_EXTENT, by_x=4, by_y=4, cycle=True)\n-geom_district_iter = bbox_split_srid_2154(settings.SPATIAL_EXTENT, by_x=2, by_y=2, cycle=True)\n-geom_area_iter = bbox_split_srid_2154(settings.SPATIAL_EXTENT, by_x=2, by_y=2, cycle=True)\n+geom_city_iter = bbox_split_srid_2154(SPATIAL_EXTENT, by_x=4, by_y=4, cycle=True)\n+geom_district_iter = bbox_split_srid_2154(SPATIAL_EXTENT, by_x=2, by_y=2, cycle=True)\n+geom_area_iter = bbox_split_srid_2154(SPATIAL_EXTENT, by_x=2, by_y=2, cycle=True)\n \n \n class CityFactory(factory.DjangoModelFactory):\n", "issue": "Installation issue\nHello,\r\nI'm trying to install Geotrek following the documentation, and I have some problems.\r\n\r\nAt the very beginnig, when I run the install.sh, the script can't find the `etc/setting.ini` file. I checked, and I have no `'etc'` folder at all... So the install aborted. \r\nI tried to create myself this folder and the `settings.ini` file with the variable expected (dbhost, dbname etc...). It works (the database is installed), but the install crash few step later when it try to install the python environnement. `Could not setup python environment !`\r\n\r\nDid I miss something in the installation documentation ?\r\nHow can I fix this problem ?\r\n\r\nThanks for your help\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport factory\n\nfrom django.conf import settings\nfrom django.contrib.gis.geos import Polygon, MultiPolygon\n\nfrom mapentity.helpers import bbox_split_srid_2154\n\nfrom geotrek.core.factories import TopologyFactory\n\nfrom . import models\n\n\n# Create 16 cities and 4 districts distinct same-area zone covering the spatial_extent and cycle on it\ngeom_city_iter = bbox_split_srid_2154(settings.SPATIAL_EXTENT, by_x=4, by_y=4, cycle=True)\ngeom_district_iter = bbox_split_srid_2154(settings.SPATIAL_EXTENT, by_x=2, by_y=2, cycle=True)\ngeom_area_iter = bbox_split_srid_2154(settings.SPATIAL_EXTENT, by_x=2, by_y=2, cycle=True)\n\n\nclass CityFactory(factory.DjangoModelFactory):\n class Meta:\n model = models.City\n\n code = factory.Sequence(lambda n: u\"#%s\" % n) # id (!) with max_length=6\n name = factory.Sequence(lambda n: u\"City name %s\" % n)\n geom = factory.Sequence(lambda _: MultiPolygon(Polygon.from_bbox(geom_city_iter.next()), srid=settings.SRID))\n\n\nclass DistrictFactory(factory.DjangoModelFactory):\n class Meta:\n model = models.District\n\n name = factory.Sequence(lambda n: u\"District name %s\" % n)\n geom = factory.Sequence(lambda _: MultiPolygon(Polygon.from_bbox(geom_district_iter.next()), srid=settings.SRID))\n\n\nclass RestrictedAreaTypeFactory(factory.DjangoModelFactory):\n\n class Meta:\n model = models.RestrictedAreaType\n\n name = factory.Sequence(lambda n: u\"Restricted name %s\" % n)\n\n\nclass RestrictedAreaFactory(factory.DjangoModelFactory):\n class Meta:\n model = models.RestrictedArea\n\n name = factory.Sequence(lambda n: u\"Restricted area name %s\" % n)\n geom = factory.Sequence(lambda _: MultiPolygon(Polygon.from_bbox(geom_area_iter.next()), srid=settings.SRID))\n area_type = factory.SubFactory(RestrictedAreaTypeFactory)\n\n\nclass RestrictedAreaEdgeFactory(TopologyFactory):\n\n class Meta:\n model = models.RestrictedAreaEdge\n\n restricted_area = factory.SubFactory(RestrictedAreaFactory)\n\n\nclass CityEdgeFactory(TopologyFactory):\n\n class Meta:\n model = models.CityEdge\n\n city = factory.SubFactory(CityFactory)\n\n\nclass DistrictEdgeFactory(TopologyFactory):\n\n class Meta:\n model = models.DistrictEdge\n\n district = factory.SubFactory(DistrictFactory)\n", "path": "geotrek/zoning/factories.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport factory\n\nfrom django.conf import settings\nfrom django.contrib.gis.geos import Polygon, MultiPolygon\n\nfrom mapentity.helpers import bbox_split_srid_2154\n\nfrom geotrek.core.factories import TopologyFactory\n\nfrom . import models\n\n\n# Don't intersect with geom from PathFactory\nSPATIAL_EXTENT = (200000, 300000, 1100000, 1200000)\n\n# Create 16 cities and 4 districts distinct same-area zone covering the spatial_extent and cycle on it\ngeom_city_iter = bbox_split_srid_2154(SPATIAL_EXTENT, by_x=4, by_y=4, cycle=True)\ngeom_district_iter = bbox_split_srid_2154(SPATIAL_EXTENT, by_x=2, by_y=2, cycle=True)\ngeom_area_iter = bbox_split_srid_2154(SPATIAL_EXTENT, by_x=2, by_y=2, cycle=True)\n\n\nclass CityFactory(factory.DjangoModelFactory):\n class Meta:\n model = models.City\n\n code = factory.Sequence(lambda n: u\"#%s\" % n) # id (!) with max_length=6\n name = factory.Sequence(lambda n: u\"City name %s\" % n)\n geom = factory.Sequence(lambda _: MultiPolygon(Polygon.from_bbox(geom_city_iter.next()), srid=settings.SRID))\n\n\nclass DistrictFactory(factory.DjangoModelFactory):\n class Meta:\n model = models.District\n\n name = factory.Sequence(lambda n: u\"District name %s\" % n)\n geom = factory.Sequence(lambda _: MultiPolygon(Polygon.from_bbox(geom_district_iter.next()), srid=settings.SRID))\n\n\nclass RestrictedAreaTypeFactory(factory.DjangoModelFactory):\n\n class Meta:\n model = models.RestrictedAreaType\n\n name = factory.Sequence(lambda n: u\"Restricted name %s\" % n)\n\n\nclass RestrictedAreaFactory(factory.DjangoModelFactory):\n class Meta:\n model = models.RestrictedArea\n\n name = factory.Sequence(lambda n: u\"Restricted area name %s\" % n)\n geom = factory.Sequence(lambda _: MultiPolygon(Polygon.from_bbox(geom_area_iter.next()), srid=settings.SRID))\n area_type = factory.SubFactory(RestrictedAreaTypeFactory)\n\n\nclass RestrictedAreaEdgeFactory(TopologyFactory):\n\n class Meta:\n model = models.RestrictedAreaEdge\n\n restricted_area = factory.SubFactory(RestrictedAreaFactory)\n\n\nclass CityEdgeFactory(TopologyFactory):\n\n class Meta:\n model = models.CityEdge\n\n city = factory.SubFactory(CityFactory)\n\n\nclass DistrictEdgeFactory(TopologyFactory):\n\n class Meta:\n model = models.DistrictEdge\n\n district = factory.SubFactory(DistrictFactory)\n", "path": "geotrek/zoning/factories.py"}]} | 1,146 | 350 |
gh_patches_debug_41150 | rasdani/github-patches | git_diff | svthalia__concrexit-3555 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
What to do with active members who don't have memberships?
I noticed that we have some/at-least-1 users who don't have an active membership any more, but who are part of a society (or probably also committee). That's not really allowed of course.
I would like to think about and discuss how this should ideally be handled. I'm not sure if we would want to change any behaviour (and even if we decide something _should_ be done, it likely has no priority at all).
The case of committee/society members without a membership occurs naturally, for example for a short time when people forget to renew before september. Hence, I don't think it would be sensible to e.g. automatically end committee memberships at that point. (I also haven't read the HR to check whether people with no membership can be part of a committee or society officially.)
In principle, it would be the intern's responsibility to check for this kind of thing. However, that's probably not feasible, especially for societies. Secondarily, it makes sense that society chairs would be responsible, but those don't quite have an overview of Thalia memberships.
While it's probably not really desirable to prevent committee memberships existing without thalia memberships, we could think about the corresponding permissions on the website, appearance on the committee/society pages, and possibly some kind of warning system.
- Maybe it would be good to not allow people to use committee permissions when they don't have an active membership? On the other hand, there _might_ be cases where we do want non-members in a group? It would be good to check if the HR allows that.
- Non-member group members do currently show up on the website. This is clearly nice for boards, but is also the case for societies and committees. I don't like the idea of hiding non-members as that would be a difference between boards and other groups.
- Would it be worth it to warn people about this. For example, to periodically send an email to committee chairs or the board about active members who don't have a membership any more? This would be pretty easy to implement, but we should only do it if it's valuable.
@WidadMajdoubi24 @JeeVee11 Perhaps you guys should give this some thought (but there's no rush).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `website/activemembers/backends.py`
Content:
```
1 """Authentication backend to check permissions."""
2 from django.contrib.auth.models import Permission
3 from django.db.models import Q
4 from django.utils import timezone
5
6 from members.models import Member
7
8
9 class MemberGroupBackend:
10 """Check permissions against MemberGroups."""
11
12 def authenticate(self, *args, **kwargs):
13 """Not implemented in this backend."""
14 return
15
16 def get_user(self, *args, **kwargs):
17 """Not implemented in this backend."""
18 return
19
20 @staticmethod
21 def _get_permissions(user, obj):
22 if not user.is_active or user.is_anonymous or obj is not None:
23 return set()
24
25 perm_cache_name = "_membergroup_perm_cache"
26 if not hasattr(user, perm_cache_name):
27 if isinstance(user, Member):
28 member = user
29 else:
30 try:
31 member = Member.objects.get(pk=user.pk)
32 except Member.DoesNotExist:
33 return set()
34 now = timezone.now()
35 groups = member.membergroup_set.filter(
36 Q(membergroupmembership__until=None)
37 | Q(
38 membergroupmembership__since__lte=now,
39 membergroupmembership__until__gte=now,
40 )
41 )
42
43 chair_permission_groups = member.membergroup_set.filter(
44 Q(
45 membergroupmembership__until=None,
46 membergroupmembership__has_chair_permissions=True,
47 )
48 | Q(
49 membergroupmembership__since__lte=now,
50 membergroupmembership__until__gte=now,
51 membergroupmembership__has_chair_permissions=True,
52 )
53 )
54 perms = (
55 Permission.objects.filter(
56 Q(permissions_groups__in=groups)
57 | Q(chair_permissions_groups__in=chair_permission_groups)
58 )
59 .values_list("content_type__app_label", "codename")
60 .order_by()
61 )
62 setattr(
63 user,
64 perm_cache_name,
65 set(f"{ct}.{name}" for ct, name in perms),
66 )
67 return getattr(user, perm_cache_name)
68
69 def get_all_permissions(self, user, obj=None):
70 return self._get_permissions(user, obj)
71
72 def get_group_permissions(self, user, obj=None):
73 return self._get_permissions(user, obj)
74
75 def has_perm(self, user, perm, obj=None):
76 if not user.is_active:
77 return False
78 return perm in self.get_all_permissions(user, obj)
79
80 def has_module_perms(self, user, app_label):
81 """Return True if user has any permissions in the given app_label."""
82 if not user.is_active:
83 return False
84 for perm in self.get_all_permissions(user):
85 if perm[: perm.index(".")] == app_label:
86 return True
87 return False
88
```
Path: `website/members/models/membership.py`
Content:
```
1 import datetime
2
3 from django.conf import settings
4 from django.core.exceptions import ValidationError
5 from django.db import models
6 from django.utils import timezone
7 from django.utils.translation import gettext_lazy as _
8 from django.utils.translation import pgettext_lazy
9
10 from utils.snippets import overlaps
11
12
13 class Membership(models.Model):
14 MEMBER = "member"
15 BENEFACTOR = "benefactor"
16 HONORARY = "honorary"
17
18 MEMBERSHIP_TYPES = (
19 (MEMBER, _("Member")),
20 (BENEFACTOR, _("Benefactor")),
21 (HONORARY, _("Honorary Member")),
22 )
23
24 type = models.CharField(
25 max_length=40,
26 choices=MEMBERSHIP_TYPES,
27 verbose_name=_("Membership type"),
28 )
29
30 user = models.ForeignKey(
31 settings.AUTH_USER_MODEL,
32 on_delete=models.CASCADE,
33 verbose_name=_("User"),
34 )
35
36 since = models.DateField(
37 verbose_name=_("Membership since"),
38 help_text=_("The date the member started holding this membership."),
39 default=datetime.date.today,
40 )
41
42 until = models.DateField(
43 verbose_name=_("Membership until"),
44 help_text=_("The date the member stops holding this membership."),
45 blank=True,
46 null=True,
47 )
48
49 def __str__(self):
50 s = _("Membership of type {} for {} ({}) starting {}").format(
51 self.get_type_display(),
52 self.user.get_full_name(),
53 self.user.username,
54 self.since,
55 )
56 if self.until is not None:
57 s += pgettext_lazy("Membership until x", " until {}").format(self.until)
58 return s
59
60 def clean(self):
61 super().clean()
62
63 errors = {}
64 if self.until and (not self.since or self.until < self.since):
65 raise ValidationError({"until": _("End date can't be before start date")})
66
67 if self.since is not None:
68 memberships = self.user.membership_set.all()
69 if overlaps(self, memberships):
70 errors.update(
71 {
72 "since": _("A membership already exists for that period"),
73 "until": _("A membership already exists for that period"),
74 }
75 )
76
77 if errors:
78 raise ValidationError(errors)
79
80 def is_active(self):
81 return not self.until or self.until > timezone.now().date()
82
```
Path: `website/members/models/member.py`
Content:
```
1 import logging
2 import operator
3 from datetime import timedelta
4 from functools import reduce
5
6 from django.contrib.auth.models import User, UserManager
7 from django.db.models import Q
8 from django.urls import reverse
9 from django.utils import timezone
10 from django.utils.functional import cached_property
11 from django.utils.translation import gettext_lazy as _
12
13 from activemembers.models import MemberGroup, MemberGroupMembership
14
15 logger = logging.getLogger(__name__)
16
17
18 class MemberManager(UserManager):
19 """Get all members, i.e. all users with a profile."""
20
21 def get_queryset(self):
22 return super().get_queryset().exclude(profile=None)
23
24
25 class ActiveMemberManager(MemberManager):
26 """Get all active members, i.e. who have a committee membership."""
27
28 def get_queryset(self):
29 """Select all committee members."""
30 active_memberships = MemberGroupMembership.active_objects.filter(
31 group__board=None
32 ).filter(group__society=None)
33
34 return (
35 super()
36 .get_queryset()
37 .filter(membergroupmembership__in=active_memberships)
38 .distinct()
39 )
40
41
42 class CurrentMemberManager(MemberManager):
43 """Get all members with an active membership."""
44
45 def get_queryset(self):
46 """Select all members who have a current membership."""
47 return (
48 super()
49 .get_queryset()
50 .exclude(membership=None)
51 .filter(
52 Q(membership__until__isnull=True)
53 | Q(membership__until__gt=timezone.now().date())
54 )
55 .distinct()
56 )
57
58 def with_birthdays_in_range(self, from_date, to_date):
59 """Select all who are currently a Thalia member and have a birthday within the specified range.
60
61 :param from_date: the start of the range (inclusive)
62 :param to_date: the end of the range (inclusive)
63 :paramtype from_date: datetime
64 :paramtype to_date: datetime
65
66 :return: the filtered queryset
67 :rtype: Queryset
68 """
69 queryset = self.get_queryset().filter(profile__birthday__lte=to_date)
70
71 if (to_date - from_date).days >= 366:
72 # 366 is important to also account for leap years
73 # Everyone that's born before to_date has a birthday
74 return queryset
75
76 delta = to_date - from_date
77 dates = [from_date + timedelta(days=i) for i in range(delta.days + 1)]
78 monthdays = [
79 {"profile__birthday__month": d.month, "profile__birthday__day": d.day}
80 for d in dates
81 ]
82 # Don't get me started (basically, we are making a giant OR query with
83 # all days and months that are in the range)
84 query = reduce(operator.or_, [Q(**d) for d in monthdays])
85 return queryset.filter(query)
86
87
88 class Member(User):
89 class Meta:
90 proxy = True
91 ordering = ("first_name", "last_name")
92
93 objects = MemberManager()
94 current_members = CurrentMemberManager()
95 active_members = ActiveMemberManager()
96
97 def __str__(self):
98 return f"{self.get_full_name()} ({self.username})"
99
100 def refresh_from_db(self, **kwargs):
101 # Clear the cached latest_membership
102 if hasattr(self, "_latest_membership"):
103 del self._latest_membership
104 if hasattr(self, "latest_membership"):
105 del self.latest_membership
106
107 return super().refresh_from_db(**kwargs)
108
109 @property
110 def current_membership(self):
111 """Return the currently active membership of the user, None if not active.
112
113 Warning: this property uses the *cached* `latest_membership`.
114 You can use `refresh_from_db` to clear it.
115 """
116 membership = self.latest_membership
117 if membership and not membership.is_active():
118 return None
119 return membership
120
121 @cached_property
122 def latest_membership(self):
123 """Get the most recent membership of this user.
124
125 Warning: this property is cached.
126 You can use `refresh_from_db` to clear it.
127 """
128 # Use membership from a Prefetch object if available.
129 if hasattr(self, "_latest_membership"):
130 return self._latest_membership[0]
131
132 if not self.membership_set.exists():
133 return None
134 return self.membership_set.latest("since")
135
136 @property
137 def earliest_membership(self):
138 """Get the earliest membership of this user."""
139 if not self.membership_set.exists():
140 return None
141 return self.membership_set.earliest("since")
142
143 def has_been_member(self):
144 """Has this user ever been a member?."""
145 return self.membership_set.filter(type="member").exists()
146
147 def has_been_honorary_member(self):
148 """Has this user ever been an honorary member?."""
149 return self.membership_set.filter(type="honorary").exists()
150
151 def has_active_membership(self):
152 """Is this member currently active.
153
154 Tested by checking if the expiration date has passed.
155 """
156 return self.current_membership is not None
157
158 # Special properties for admin site
159 has_active_membership.boolean = True
160 has_active_membership.short_description = _("Is this user currently active")
161
162 @classmethod
163 def all_with_membership(cls, membership_type):
164 """Get all users who have a specific membership.
165
166 :param membership_type: The membership to select by
167 :return: List of users
168 :rtype: [Member]
169 """
170 return [
171 x
172 for x in cls.objects.all()
173 if x.current_membership and x.current_membership.type == membership_type
174 ]
175
176 @property
177 def can_attend_events(self):
178 """May this user attend events."""
179 if not self.profile:
180 return False
181
182 return (
183 self.profile.event_permissions in ("all", "no_drinks")
184 and self.current_membership is not None
185 )
186
187 @property
188 def can_attend_events_without_membership(self):
189 if not self.profile:
190 return False
191
192 return self.profile.event_permissions in ("all", "no_drinks")
193
194 def get_member_groups(self):
195 """Get the groups this user is a member of."""
196 now = timezone.now()
197 return MemberGroup.objects.filter(
198 Q(membergroupmembership__member=self),
199 Q(membergroupmembership__until=None)
200 | Q(
201 membergroupmembership__since__lte=now,
202 membergroupmembership__until__gte=now,
203 ),
204 active=True,
205 )
206
207 def get_absolute_url(self):
208 return reverse("members:profile", args=[str(self.pk)])
209
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/website/activemembers/backends.py b/website/activemembers/backends.py
--- a/website/activemembers/backends.py
+++ b/website/activemembers/backends.py
@@ -31,7 +31,11 @@
member = Member.objects.get(pk=user.pk)
except Member.DoesNotExist:
return set()
+
now = timezone.now()
+ if not member.has_active_membership():
+ return set()
+
groups = member.membergroup_set.filter(
Q(membergroupmembership__until=None)
| Q(
diff --git a/website/members/models/member.py b/website/members/models/member.py
--- a/website/members/models/member.py
+++ b/website/members/models/member.py
@@ -11,6 +11,7 @@
from django.utils.translation import gettext_lazy as _
from activemembers.models import MemberGroup, MemberGroupMembership
+from members.models.membership import Membership
logger = logging.getLogger(__name__)
@@ -107,7 +108,7 @@
return super().refresh_from_db(**kwargs)
@property
- def current_membership(self):
+ def current_membership(self) -> Membership | None:
"""Return the currently active membership of the user, None if not active.
Warning: this property uses the *cached* `latest_membership`.
@@ -119,7 +120,7 @@
return membership
@cached_property
- def latest_membership(self):
+ def latest_membership(self) -> Membership | None:
"""Get the most recent membership of this user.
Warning: this property is cached.
@@ -134,21 +135,21 @@
return self.membership_set.latest("since")
@property
- def earliest_membership(self):
+ def earliest_membership(self) -> Membership | None:
"""Get the earliest membership of this user."""
if not self.membership_set.exists():
return None
return self.membership_set.earliest("since")
- def has_been_member(self):
+ def has_been_member(self) -> bool:
"""Has this user ever been a member?."""
return self.membership_set.filter(type="member").exists()
- def has_been_honorary_member(self):
+ def has_been_honorary_member(self) -> bool:
"""Has this user ever been an honorary member?."""
return self.membership_set.filter(type="honorary").exists()
- def has_active_membership(self):
+ def has_active_membership(self) -> bool:
"""Is this member currently active.
Tested by checking if the expiration date has passed.
diff --git a/website/members/models/membership.py b/website/members/models/membership.py
--- a/website/members/models/membership.py
+++ b/website/members/models/membership.py
@@ -78,4 +78,5 @@
raise ValidationError(errors)
def is_active(self):
- return not self.until or self.until > timezone.now().date()
+ today = timezone.now().date()
+ return self.since <= today and (not self.until or self.until > today)
| {"golden_diff": "diff --git a/website/activemembers/backends.py b/website/activemembers/backends.py\n--- a/website/activemembers/backends.py\n+++ b/website/activemembers/backends.py\n@@ -31,7 +31,11 @@\n member = Member.objects.get(pk=user.pk)\n except Member.DoesNotExist:\n return set()\n+\n now = timezone.now()\n+ if not member.has_active_membership():\n+ return set()\n+\n groups = member.membergroup_set.filter(\n Q(membergroupmembership__until=None)\n | Q(\ndiff --git a/website/members/models/member.py b/website/members/models/member.py\n--- a/website/members/models/member.py\n+++ b/website/members/models/member.py\n@@ -11,6 +11,7 @@\n from django.utils.translation import gettext_lazy as _\n \n from activemembers.models import MemberGroup, MemberGroupMembership\n+from members.models.membership import Membership\n \n logger = logging.getLogger(__name__)\n \n@@ -107,7 +108,7 @@\n return super().refresh_from_db(**kwargs)\n \n @property\n- def current_membership(self):\n+ def current_membership(self) -> Membership | None:\n \"\"\"Return the currently active membership of the user, None if not active.\n \n Warning: this property uses the *cached* `latest_membership`.\n@@ -119,7 +120,7 @@\n return membership\n \n @cached_property\n- def latest_membership(self):\n+ def latest_membership(self) -> Membership | None:\n \"\"\"Get the most recent membership of this user.\n \n Warning: this property is cached.\n@@ -134,21 +135,21 @@\n return self.membership_set.latest(\"since\")\n \n @property\n- def earliest_membership(self):\n+ def earliest_membership(self) -> Membership | None:\n \"\"\"Get the earliest membership of this user.\"\"\"\n if not self.membership_set.exists():\n return None\n return self.membership_set.earliest(\"since\")\n \n- def has_been_member(self):\n+ def has_been_member(self) -> bool:\n \"\"\"Has this user ever been a member?.\"\"\"\n return self.membership_set.filter(type=\"member\").exists()\n \n- def has_been_honorary_member(self):\n+ def has_been_honorary_member(self) -> bool:\n \"\"\"Has this user ever been an honorary member?.\"\"\"\n return self.membership_set.filter(type=\"honorary\").exists()\n \n- def has_active_membership(self):\n+ def has_active_membership(self) -> bool:\n \"\"\"Is this member currently active.\n \n Tested by checking if the expiration date has passed.\ndiff --git a/website/members/models/membership.py b/website/members/models/membership.py\n--- a/website/members/models/membership.py\n+++ b/website/members/models/membership.py\n@@ -78,4 +78,5 @@\n raise ValidationError(errors)\n \n def is_active(self):\n- return not self.until or self.until > timezone.now().date()\n+ today = timezone.now().date()\n+ return self.since <= today and (not self.until or self.until > today)\n", "issue": "What to do with active members who don't have memberships?\nI noticed that we have some/at-least-1 users who don't have an active membership any more, but who are part of a society (or probably also committee). That's not really allowed of course.\r\n\r\nI would like to think about and discuss how this should ideally be handled. I'm not sure if we would want to change any behaviour (and even if we decide something _should_ be done, it likely has no priority at all).\r\n\r\nThe case of committee/society members without a membership occurs naturally, for example for a short time when people forget to renew before september. Hence, I don't think it would be sensible to e.g. automatically end committee memberships at that point. (I also haven't read the HR to check whether people with no membership can be part of a committee or society officially.)\r\n\r\nIn principle, it would be the intern's responsibility to check for this kind of thing. However, that's probably not feasible, especially for societies. Secondarily, it makes sense that society chairs would be responsible, but those don't quite have an overview of Thalia memberships.\r\n\r\nWhile it's probably not really desirable to prevent committee memberships existing without thalia memberships, we could think about the corresponding permissions on the website, appearance on the committee/society pages, and possibly some kind of warning system.\r\n\r\n- Maybe it would be good to not allow people to use committee permissions when they don't have an active membership? On the other hand, there _might_ be cases where we do want non-members in a group? It would be good to check if the HR allows that.\r\n\r\n- Non-member group members do currently show up on the website. This is clearly nice for boards, but is also the case for societies and committees. I don't like the idea of hiding non-members as that would be a difference between boards and other groups.\r\n\r\n- Would it be worth it to warn people about this. For example, to periodically send an email to committee chairs or the board about active members who don't have a membership any more? This would be pretty easy to implement, but we should only do it if it's valuable.\r\n\r\n@WidadMajdoubi24 @JeeVee11 Perhaps you guys should give this some thought (but there's no rush).\r\n\r\n\n", "before_files": [{"content": "\"\"\"Authentication backend to check permissions.\"\"\"\nfrom django.contrib.auth.models import Permission\nfrom django.db.models import Q\nfrom django.utils import timezone\n\nfrom members.models import Member\n\n\nclass MemberGroupBackend:\n \"\"\"Check permissions against MemberGroups.\"\"\"\n\n def authenticate(self, *args, **kwargs):\n \"\"\"Not implemented in this backend.\"\"\"\n return\n\n def get_user(self, *args, **kwargs):\n \"\"\"Not implemented in this backend.\"\"\"\n return\n\n @staticmethod\n def _get_permissions(user, obj):\n if not user.is_active or user.is_anonymous or obj is not None:\n return set()\n\n perm_cache_name = \"_membergroup_perm_cache\"\n if not hasattr(user, perm_cache_name):\n if isinstance(user, Member):\n member = user\n else:\n try:\n member = Member.objects.get(pk=user.pk)\n except Member.DoesNotExist:\n return set()\n now = timezone.now()\n groups = member.membergroup_set.filter(\n Q(membergroupmembership__until=None)\n | Q(\n membergroupmembership__since__lte=now,\n membergroupmembership__until__gte=now,\n )\n )\n\n chair_permission_groups = member.membergroup_set.filter(\n Q(\n membergroupmembership__until=None,\n membergroupmembership__has_chair_permissions=True,\n )\n | Q(\n membergroupmembership__since__lte=now,\n membergroupmembership__until__gte=now,\n membergroupmembership__has_chair_permissions=True,\n )\n )\n perms = (\n Permission.objects.filter(\n Q(permissions_groups__in=groups)\n | Q(chair_permissions_groups__in=chair_permission_groups)\n )\n .values_list(\"content_type__app_label\", \"codename\")\n .order_by()\n )\n setattr(\n user,\n perm_cache_name,\n set(f\"{ct}.{name}\" for ct, name in perms),\n )\n return getattr(user, perm_cache_name)\n\n def get_all_permissions(self, user, obj=None):\n return self._get_permissions(user, obj)\n\n def get_group_permissions(self, user, obj=None):\n return self._get_permissions(user, obj)\n\n def has_perm(self, user, perm, obj=None):\n if not user.is_active:\n return False\n return perm in self.get_all_permissions(user, obj)\n\n def has_module_perms(self, user, app_label):\n \"\"\"Return True if user has any permissions in the given app_label.\"\"\"\n if not user.is_active:\n return False\n for perm in self.get_all_permissions(user):\n if perm[: perm.index(\".\")] == app_label:\n return True\n return False\n", "path": "website/activemembers/backends.py"}, {"content": "import datetime\n\nfrom django.conf import settings\nfrom django.core.exceptions import ValidationError\nfrom django.db import models\nfrom django.utils import timezone\nfrom django.utils.translation import gettext_lazy as _\nfrom django.utils.translation import pgettext_lazy\n\nfrom utils.snippets import overlaps\n\n\nclass Membership(models.Model):\n MEMBER = \"member\"\n BENEFACTOR = \"benefactor\"\n HONORARY = \"honorary\"\n\n MEMBERSHIP_TYPES = (\n (MEMBER, _(\"Member\")),\n (BENEFACTOR, _(\"Benefactor\")),\n (HONORARY, _(\"Honorary Member\")),\n )\n\n type = models.CharField(\n max_length=40,\n choices=MEMBERSHIP_TYPES,\n verbose_name=_(\"Membership type\"),\n )\n\n user = models.ForeignKey(\n settings.AUTH_USER_MODEL,\n on_delete=models.CASCADE,\n verbose_name=_(\"User\"),\n )\n\n since = models.DateField(\n verbose_name=_(\"Membership since\"),\n help_text=_(\"The date the member started holding this membership.\"),\n default=datetime.date.today,\n )\n\n until = models.DateField(\n verbose_name=_(\"Membership until\"),\n help_text=_(\"The date the member stops holding this membership.\"),\n blank=True,\n null=True,\n )\n\n def __str__(self):\n s = _(\"Membership of type {} for {} ({}) starting {}\").format(\n self.get_type_display(),\n self.user.get_full_name(),\n self.user.username,\n self.since,\n )\n if self.until is not None:\n s += pgettext_lazy(\"Membership until x\", \" until {}\").format(self.until)\n return s\n\n def clean(self):\n super().clean()\n\n errors = {}\n if self.until and (not self.since or self.until < self.since):\n raise ValidationError({\"until\": _(\"End date can't be before start date\")})\n\n if self.since is not None:\n memberships = self.user.membership_set.all()\n if overlaps(self, memberships):\n errors.update(\n {\n \"since\": _(\"A membership already exists for that period\"),\n \"until\": _(\"A membership already exists for that period\"),\n }\n )\n\n if errors:\n raise ValidationError(errors)\n\n def is_active(self):\n return not self.until or self.until > timezone.now().date()\n", "path": "website/members/models/membership.py"}, {"content": "import logging\nimport operator\nfrom datetime import timedelta\nfrom functools import reduce\n\nfrom django.contrib.auth.models import User, UserManager\nfrom django.db.models import Q\nfrom django.urls import reverse\nfrom django.utils import timezone\nfrom django.utils.functional import cached_property\nfrom django.utils.translation import gettext_lazy as _\n\nfrom activemembers.models import MemberGroup, MemberGroupMembership\n\nlogger = logging.getLogger(__name__)\n\n\nclass MemberManager(UserManager):\n \"\"\"Get all members, i.e. all users with a profile.\"\"\"\n\n def get_queryset(self):\n return super().get_queryset().exclude(profile=None)\n\n\nclass ActiveMemberManager(MemberManager):\n \"\"\"Get all active members, i.e. who have a committee membership.\"\"\"\n\n def get_queryset(self):\n \"\"\"Select all committee members.\"\"\"\n active_memberships = MemberGroupMembership.active_objects.filter(\n group__board=None\n ).filter(group__society=None)\n\n return (\n super()\n .get_queryset()\n .filter(membergroupmembership__in=active_memberships)\n .distinct()\n )\n\n\nclass CurrentMemberManager(MemberManager):\n \"\"\"Get all members with an active membership.\"\"\"\n\n def get_queryset(self):\n \"\"\"Select all members who have a current membership.\"\"\"\n return (\n super()\n .get_queryset()\n .exclude(membership=None)\n .filter(\n Q(membership__until__isnull=True)\n | Q(membership__until__gt=timezone.now().date())\n )\n .distinct()\n )\n\n def with_birthdays_in_range(self, from_date, to_date):\n \"\"\"Select all who are currently a Thalia member and have a birthday within the specified range.\n\n :param from_date: the start of the range (inclusive)\n :param to_date: the end of the range (inclusive)\n :paramtype from_date: datetime\n :paramtype to_date: datetime\n\n :return: the filtered queryset\n :rtype: Queryset\n \"\"\"\n queryset = self.get_queryset().filter(profile__birthday__lte=to_date)\n\n if (to_date - from_date).days >= 366:\n # 366 is important to also account for leap years\n # Everyone that's born before to_date has a birthday\n return queryset\n\n delta = to_date - from_date\n dates = [from_date + timedelta(days=i) for i in range(delta.days + 1)]\n monthdays = [\n {\"profile__birthday__month\": d.month, \"profile__birthday__day\": d.day}\n for d in dates\n ]\n # Don't get me started (basically, we are making a giant OR query with\n # all days and months that are in the range)\n query = reduce(operator.or_, [Q(**d) for d in monthdays])\n return queryset.filter(query)\n\n\nclass Member(User):\n class Meta:\n proxy = True\n ordering = (\"first_name\", \"last_name\")\n\n objects = MemberManager()\n current_members = CurrentMemberManager()\n active_members = ActiveMemberManager()\n\n def __str__(self):\n return f\"{self.get_full_name()} ({self.username})\"\n\n def refresh_from_db(self, **kwargs):\n # Clear the cached latest_membership\n if hasattr(self, \"_latest_membership\"):\n del self._latest_membership\n if hasattr(self, \"latest_membership\"):\n del self.latest_membership\n\n return super().refresh_from_db(**kwargs)\n\n @property\n def current_membership(self):\n \"\"\"Return the currently active membership of the user, None if not active.\n\n Warning: this property uses the *cached* `latest_membership`.\n You can use `refresh_from_db` to clear it.\n \"\"\"\n membership = self.latest_membership\n if membership and not membership.is_active():\n return None\n return membership\n\n @cached_property\n def latest_membership(self):\n \"\"\"Get the most recent membership of this user.\n\n Warning: this property is cached.\n You can use `refresh_from_db` to clear it.\n \"\"\"\n # Use membership from a Prefetch object if available.\n if hasattr(self, \"_latest_membership\"):\n return self._latest_membership[0]\n\n if not self.membership_set.exists():\n return None\n return self.membership_set.latest(\"since\")\n\n @property\n def earliest_membership(self):\n \"\"\"Get the earliest membership of this user.\"\"\"\n if not self.membership_set.exists():\n return None\n return self.membership_set.earliest(\"since\")\n\n def has_been_member(self):\n \"\"\"Has this user ever been a member?.\"\"\"\n return self.membership_set.filter(type=\"member\").exists()\n\n def has_been_honorary_member(self):\n \"\"\"Has this user ever been an honorary member?.\"\"\"\n return self.membership_set.filter(type=\"honorary\").exists()\n\n def has_active_membership(self):\n \"\"\"Is this member currently active.\n\n Tested by checking if the expiration date has passed.\n \"\"\"\n return self.current_membership is not None\n\n # Special properties for admin site\n has_active_membership.boolean = True\n has_active_membership.short_description = _(\"Is this user currently active\")\n\n @classmethod\n def all_with_membership(cls, membership_type):\n \"\"\"Get all users who have a specific membership.\n\n :param membership_type: The membership to select by\n :return: List of users\n :rtype: [Member]\n \"\"\"\n return [\n x\n for x in cls.objects.all()\n if x.current_membership and x.current_membership.type == membership_type\n ]\n\n @property\n def can_attend_events(self):\n \"\"\"May this user attend events.\"\"\"\n if not self.profile:\n return False\n\n return (\n self.profile.event_permissions in (\"all\", \"no_drinks\")\n and self.current_membership is not None\n )\n\n @property\n def can_attend_events_without_membership(self):\n if not self.profile:\n return False\n\n return self.profile.event_permissions in (\"all\", \"no_drinks\")\n\n def get_member_groups(self):\n \"\"\"Get the groups this user is a member of.\"\"\"\n now = timezone.now()\n return MemberGroup.objects.filter(\n Q(membergroupmembership__member=self),\n Q(membergroupmembership__until=None)\n | Q(\n membergroupmembership__since__lte=now,\n membergroupmembership__until__gte=now,\n ),\n active=True,\n )\n\n def get_absolute_url(self):\n return reverse(\"members:profile\", args=[str(self.pk)])\n", "path": "website/members/models/member.py"}], "after_files": [{"content": "\"\"\"Authentication backend to check permissions.\"\"\"\nfrom django.contrib.auth.models import Permission\nfrom django.db.models import Q\nfrom django.utils import timezone\n\nfrom members.models import Member\n\n\nclass MemberGroupBackend:\n \"\"\"Check permissions against MemberGroups.\"\"\"\n\n def authenticate(self, *args, **kwargs):\n \"\"\"Not implemented in this backend.\"\"\"\n return\n\n def get_user(self, *args, **kwargs):\n \"\"\"Not implemented in this backend.\"\"\"\n return\n\n @staticmethod\n def _get_permissions(user, obj):\n if not user.is_active or user.is_anonymous or obj is not None:\n return set()\n\n perm_cache_name = \"_membergroup_perm_cache\"\n if not hasattr(user, perm_cache_name):\n if isinstance(user, Member):\n member = user\n else:\n try:\n member = Member.objects.get(pk=user.pk)\n except Member.DoesNotExist:\n return set()\n\n now = timezone.now()\n if not member.has_active_membership():\n return set()\n\n groups = member.membergroup_set.filter(\n Q(membergroupmembership__until=None)\n | Q(\n membergroupmembership__since__lte=now,\n membergroupmembership__until__gte=now,\n )\n )\n\n chair_permission_groups = member.membergroup_set.filter(\n Q(\n membergroupmembership__until=None,\n membergroupmembership__has_chair_permissions=True,\n )\n | Q(\n membergroupmembership__since__lte=now,\n membergroupmembership__until__gte=now,\n membergroupmembership__has_chair_permissions=True,\n )\n )\n perms = (\n Permission.objects.filter(\n Q(permissions_groups__in=groups)\n | Q(chair_permissions_groups__in=chair_permission_groups)\n )\n .values_list(\"content_type__app_label\", \"codename\")\n .order_by()\n )\n setattr(\n user,\n perm_cache_name,\n set(f\"{ct}.{name}\" for ct, name in perms),\n )\n return getattr(user, perm_cache_name)\n\n def get_all_permissions(self, user, obj=None):\n return self._get_permissions(user, obj)\n\n def get_group_permissions(self, user, obj=None):\n return self._get_permissions(user, obj)\n\n def has_perm(self, user, perm, obj=None):\n if not user.is_active:\n return False\n return perm in self.get_all_permissions(user, obj)\n\n def has_module_perms(self, user, app_label):\n \"\"\"Return True if user has any permissions in the given app_label.\"\"\"\n if not user.is_active:\n return False\n for perm in self.get_all_permissions(user):\n if perm[: perm.index(\".\")] == app_label:\n return True\n return False\n", "path": "website/activemembers/backends.py"}, {"content": "import datetime\n\nfrom django.conf import settings\nfrom django.core.exceptions import ValidationError\nfrom django.db import models\nfrom django.utils import timezone\nfrom django.utils.translation import gettext_lazy as _\nfrom django.utils.translation import pgettext_lazy\n\nfrom utils.snippets import overlaps\n\n\nclass Membership(models.Model):\n MEMBER = \"member\"\n BENEFACTOR = \"benefactor\"\n HONORARY = \"honorary\"\n\n MEMBERSHIP_TYPES = (\n (MEMBER, _(\"Member\")),\n (BENEFACTOR, _(\"Benefactor\")),\n (HONORARY, _(\"Honorary Member\")),\n )\n\n type = models.CharField(\n max_length=40,\n choices=MEMBERSHIP_TYPES,\n verbose_name=_(\"Membership type\"),\n )\n\n user = models.ForeignKey(\n settings.AUTH_USER_MODEL,\n on_delete=models.CASCADE,\n verbose_name=_(\"User\"),\n )\n\n since = models.DateField(\n verbose_name=_(\"Membership since\"),\n help_text=_(\"The date the member started holding this membership.\"),\n default=datetime.date.today,\n )\n\n until = models.DateField(\n verbose_name=_(\"Membership until\"),\n help_text=_(\"The date the member stops holding this membership.\"),\n blank=True,\n null=True,\n )\n\n def __str__(self):\n s = _(\"Membership of type {} for {} ({}) starting {}\").format(\n self.get_type_display(),\n self.user.get_full_name(),\n self.user.username,\n self.since,\n )\n if self.until is not None:\n s += pgettext_lazy(\"Membership until x\", \" until {}\").format(self.until)\n return s\n\n def clean(self):\n super().clean()\n\n errors = {}\n if self.until and (not self.since or self.until < self.since):\n raise ValidationError({\"until\": _(\"End date can't be before start date\")})\n\n if self.since is not None:\n memberships = self.user.membership_set.all()\n if overlaps(self, memberships):\n errors.update(\n {\n \"since\": _(\"A membership already exists for that period\"),\n \"until\": _(\"A membership already exists for that period\"),\n }\n )\n\n if errors:\n raise ValidationError(errors)\n\n def is_active(self):\n today = timezone.now().date()\n return self.since <= today and (not self.until or self.until > today)\n", "path": "website/members/models/membership.py"}, {"content": "import logging\nimport operator\nfrom datetime import timedelta\nfrom functools import reduce\n\nfrom django.contrib.auth.models import User, UserManager\nfrom django.db.models import Q\nfrom django.urls import reverse\nfrom django.utils import timezone\nfrom django.utils.functional import cached_property\nfrom django.utils.translation import gettext_lazy as _\n\nfrom activemembers.models import MemberGroup, MemberGroupMembership\nfrom members.models.membership import Membership\n\nlogger = logging.getLogger(__name__)\n\n\nclass MemberManager(UserManager):\n \"\"\"Get all members, i.e. all users with a profile.\"\"\"\n\n def get_queryset(self):\n return super().get_queryset().exclude(profile=None)\n\n\nclass ActiveMemberManager(MemberManager):\n \"\"\"Get all active members, i.e. who have a committee membership.\"\"\"\n\n def get_queryset(self):\n \"\"\"Select all committee members.\"\"\"\n active_memberships = MemberGroupMembership.active_objects.filter(\n group__board=None\n ).filter(group__society=None)\n\n return (\n super()\n .get_queryset()\n .filter(membergroupmembership__in=active_memberships)\n .distinct()\n )\n\n\nclass CurrentMemberManager(MemberManager):\n \"\"\"Get all members with an active membership.\"\"\"\n\n def get_queryset(self):\n \"\"\"Select all members who have a current membership.\"\"\"\n return (\n super()\n .get_queryset()\n .exclude(membership=None)\n .filter(\n Q(membership__until__isnull=True)\n | Q(membership__until__gt=timezone.now().date())\n )\n .distinct()\n )\n\n def with_birthdays_in_range(self, from_date, to_date):\n \"\"\"Select all who are currently a Thalia member and have a birthday within the specified range.\n\n :param from_date: the start of the range (inclusive)\n :param to_date: the end of the range (inclusive)\n :paramtype from_date: datetime\n :paramtype to_date: datetime\n\n :return: the filtered queryset\n :rtype: Queryset\n \"\"\"\n queryset = self.get_queryset().filter(profile__birthday__lte=to_date)\n\n if (to_date - from_date).days >= 366:\n # 366 is important to also account for leap years\n # Everyone that's born before to_date has a birthday\n return queryset\n\n delta = to_date - from_date\n dates = [from_date + timedelta(days=i) for i in range(delta.days + 1)]\n monthdays = [\n {\"profile__birthday__month\": d.month, \"profile__birthday__day\": d.day}\n for d in dates\n ]\n # Don't get me started (basically, we are making a giant OR query with\n # all days and months that are in the range)\n query = reduce(operator.or_, [Q(**d) for d in monthdays])\n return queryset.filter(query)\n\n\nclass Member(User):\n class Meta:\n proxy = True\n ordering = (\"first_name\", \"last_name\")\n\n objects = MemberManager()\n current_members = CurrentMemberManager()\n active_members = ActiveMemberManager()\n\n def __str__(self):\n return f\"{self.get_full_name()} ({self.username})\"\n\n def refresh_from_db(self, **kwargs):\n # Clear the cached latest_membership\n if hasattr(self, \"_latest_membership\"):\n del self._latest_membership\n if hasattr(self, \"latest_membership\"):\n del self.latest_membership\n\n return super().refresh_from_db(**kwargs)\n\n @property\n def current_membership(self) -> Membership | None:\n \"\"\"Return the currently active membership of the user, None if not active.\n\n Warning: this property uses the *cached* `latest_membership`.\n You can use `refresh_from_db` to clear it.\n \"\"\"\n membership = self.latest_membership\n if membership and not membership.is_active():\n return None\n return membership\n\n @cached_property\n def latest_membership(self) -> Membership | None:\n \"\"\"Get the most recent membership of this user.\n\n Warning: this property is cached.\n You can use `refresh_from_db` to clear it.\n \"\"\"\n # Use membership from a Prefetch object if available.\n if hasattr(self, \"_latest_membership\"):\n return self._latest_membership[0]\n\n if not self.membership_set.exists():\n return None\n return self.membership_set.latest(\"since\")\n\n @property\n def earliest_membership(self) -> Membership | None:\n \"\"\"Get the earliest membership of this user.\"\"\"\n if not self.membership_set.exists():\n return None\n return self.membership_set.earliest(\"since\")\n\n def has_been_member(self) -> bool:\n \"\"\"Has this user ever been a member?.\"\"\"\n return self.membership_set.filter(type=\"member\").exists()\n\n def has_been_honorary_member(self) -> bool:\n \"\"\"Has this user ever been an honorary member?.\"\"\"\n return self.membership_set.filter(type=\"honorary\").exists()\n\n def has_active_membership(self) -> bool:\n \"\"\"Is this member currently active.\n\n Tested by checking if the expiration date has passed.\n \"\"\"\n return self.current_membership is not None\n\n # Special properties for admin site\n has_active_membership.boolean = True\n has_active_membership.short_description = _(\"Is this user currently active\")\n\n @classmethod\n def all_with_membership(cls, membership_type):\n \"\"\"Get all users who have a specific membership.\n\n :param membership_type: The membership to select by\n :return: List of users\n :rtype: [Member]\n \"\"\"\n return [\n x\n for x in cls.objects.all()\n if x.current_membership and x.current_membership.type == membership_type\n ]\n\n @property\n def can_attend_events(self):\n \"\"\"May this user attend events.\"\"\"\n if not self.profile:\n return False\n\n return (\n self.profile.event_permissions in (\"all\", \"no_drinks\")\n and self.current_membership is not None\n )\n\n @property\n def can_attend_events_without_membership(self):\n if not self.profile:\n return False\n\n return self.profile.event_permissions in (\"all\", \"no_drinks\")\n\n def get_member_groups(self):\n \"\"\"Get the groups this user is a member of.\"\"\"\n now = timezone.now()\n return MemberGroup.objects.filter(\n Q(membergroupmembership__member=self),\n Q(membergroupmembership__until=None)\n | Q(\n membergroupmembership__since__lte=now,\n membergroupmembership__until__gte=now,\n ),\n active=True,\n )\n\n def get_absolute_url(self):\n return reverse(\"members:profile\", args=[str(self.pk)])\n", "path": "website/members/models/member.py"}]} | 4,071 | 697 |
gh_patches_debug_41107 | rasdani/github-patches | git_diff | dj-stripe__dj-stripe-418 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Redirect other than 'home'
Would love if you'd add a settings variable to define what the "cancel plan" redirect would be. I dont have a URL with the name 'home', so this causes an error. Maybe I'm missing a way to change this.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `djstripe/views.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """
3 .. module:: djstripe.webhooks.
4
5 :synopsis: dj-stripe - Views related to the djstripe app.
6
7 .. moduleauthor:: @kavdev, @pydanny, @lskillen, @wahuneke, @dollydagr, @chrissmejia
8 """
9 from __future__ import unicode_literals
10
11 import json
12 import logging
13
14 from braces.views import CsrfExemptMixin, FormValidMessageMixin, LoginRequiredMixin, SelectRelatedMixin
15 from django.contrib import messages
16 from django.contrib.auth import logout as auth_logout
17 from django.core.urlresolvers import reverse_lazy, reverse
18 from django.http import HttpResponse
19 from django.http.response import HttpResponseNotFound
20 from django.shortcuts import render, redirect
21 from django.utils.encoding import smart_str
22 from django.views.generic import DetailView, FormView, TemplateView, View
23 from stripe.error import StripeError
24
25 from . import settings as djstripe_settings
26 from .forms import PlanForm, CancelSubscriptionForm
27 from .mixins import PaymentsContextMixin, SubscriptionMixin
28 from .models import Customer, Event, EventProcessingException, Plan
29 from .sync import sync_subscriber
30 from .webhooks import TEST_EVENT_ID
31
32 logger = logging.getLogger(__name__)
33
34 # ============================================================================ #
35 # Account Views #
36 # ============================================================================ #
37
38
39 class AccountView(LoginRequiredMixin, SelectRelatedMixin, SubscriptionMixin, PaymentsContextMixin, TemplateView):
40 """Shows account details including customer and subscription details."""
41
42 template_name = "djstripe/account.html"
43
44
45 # ============================================================================ #
46 # Billing Views #
47 # ============================================================================ #
48
49 class ChangeCardView(LoginRequiredMixin, PaymentsContextMixin, DetailView):
50 """TODO: Needs to be refactored to leverage forms and context data."""
51
52 template_name = "djstripe/change_card.html"
53
54 def get_object(self):
55 """
56 Return a Customer object.
57
58 Ether returns the Customer object from the current class instance or
59 uses get_or_create.
60 """
61 if hasattr(self, "customer"):
62 return self.customer
63 self.customer, _created = Customer.get_or_create(
64 subscriber=djstripe_settings.subscriber_request_callback(self.request)
65 )
66 return self.customer
67
68 def post(self, request, *args, **kwargs):
69 """TODO: Raise a validation error when a stripe token isn't passed. Should be resolved when a form is used."""
70 customer = self.get_object()
71 try:
72 send_invoice = not customer.default_source
73 customer.add_card(
74 request.POST.get("stripe_token")
75 )
76 if send_invoice:
77 customer.send_invoice()
78 customer.retry_unpaid_invoices()
79 except StripeError as exc:
80 messages.info(request, "Stripe Error")
81 return render(
82 request,
83 self.template_name,
84 {
85 "customer": self.get_object(),
86 "stripe_error": str(exc)
87 }
88 )
89 messages.info(request, "Your card is now updated.")
90 return redirect(self.get_post_success_url())
91
92 def get_post_success_url(self):
93 """Make it easier to do custom dj-stripe integrations."""
94 return reverse("djstripe:account")
95
96
97 class HistoryView(LoginRequiredMixin, SelectRelatedMixin, DetailView):
98 """A view used to return customer history of invoices."""
99
100 template_name = "djstripe/history.html"
101 model = Customer
102 select_related = ["invoice"]
103
104 def get_object(self):
105 """Return a Customer object."""
106 customer, _created = Customer.get_or_create(
107 subscriber=djstripe_settings.subscriber_request_callback(self.request)
108 )
109 return customer
110
111
112 class SyncHistoryView(CsrfExemptMixin, LoginRequiredMixin, View):
113 """TODO: Needs to be refactored to leverage context data."""
114
115 template_name = "djstripe/includes/_history_table.html"
116
117 def post(self, request, *args, **kwargs):
118 """Render the template while injecting extra context."""
119 return render(
120 request,
121 self.template_name,
122 {"customer": sync_subscriber(djstripe_settings.subscriber_request_callback(request))}
123 )
124
125
126 # ============================================================================ #
127 # Subscription Views #
128 # ============================================================================ #
129
130 class ConfirmFormView(LoginRequiredMixin, FormValidMessageMixin, SubscriptionMixin, FormView):
131 """A view used to confirm customers into a subscription plan."""
132
133 form_class = PlanForm
134 template_name = "djstripe/confirm_form.html"
135 success_url = reverse_lazy("djstripe:history")
136 form_valid_message = "You are now subscribed!"
137
138 def get(self, request, *args, **kwargs):
139 """Override ConfirmFormView GET to perform extra validation.
140
141 - Returns 404 when no plan exists.
142 - Redirects to djstripe:subscribe when customer is already subscribed to this plan.
143 """
144 plan_id = self.kwargs['plan_id']
145
146 if not Plan.objects.filter(id=plan_id).exists():
147 return HttpResponseNotFound()
148
149 customer, _created = Customer.get_or_create(
150 subscriber=djstripe_settings.subscriber_request_callback(self.request)
151 )
152
153 if (customer.subscription and str(customer.subscription.plan.id) == plan_id and
154 customer.subscription.is_valid()):
155 message = "You already subscribed to this plan"
156 messages.info(request, message, fail_silently=True)
157 return redirect("djstripe:subscribe")
158
159 return super(ConfirmFormView, self).get(request, *args, **kwargs)
160
161 def get_context_data(self, *args, **kwargs):
162 """Return ConfirmFormView's context with plan_id."""
163 context = super(ConfirmFormView, self).get_context_data(**kwargs)
164 context['plan'] = Plan.objects.get(id=self.kwargs['plan_id'])
165 return context
166
167 def post(self, request, *args, **kwargs):
168 """
169 Handle POST requests.
170
171 Instantiates a form instance with the passed POST variables and
172 then checks for validity.
173 """
174 form_class = self.get_form_class()
175 form = self.get_form(form_class)
176 if form.is_valid():
177 try:
178 customer, _created = Customer.get_or_create(
179 subscriber=djstripe_settings.subscriber_request_callback(self.request)
180 )
181 customer.add_card(self.request.POST.get("stripe_token"))
182 customer.subscribe(form.cleaned_data["plan"])
183 except StripeError as exc:
184 form.add_error(None, str(exc))
185 return self.form_invalid(form)
186 return self.form_valid(form)
187 else:
188 return self.form_invalid(form)
189
190
191 class SubscribeView(LoginRequiredMixin, SubscriptionMixin, TemplateView):
192 """A view to render the subscribe template."""
193
194 template_name = "djstripe/subscribe.html"
195
196
197 class ChangePlanView(LoginRequiredMixin, FormValidMessageMixin, SubscriptionMixin, FormView):
198 """
199 A view used to change a Customers plan.
200
201 TODO: Work in a trial_days kwarg.
202
203 Also, this should be combined with ConfirmFormView.
204 """
205
206 form_class = PlanForm
207 template_name = "djstripe/confirm_form.html"
208 success_url = reverse_lazy("djstripe:history")
209 form_valid_message = "You've just changed your plan!"
210
211 def post(self, request, *args, **kwargs):
212 """Handle a Customer changing a plan.
213
214 Handles upgrading a plan as well. Throws an error when Customer is not subscribed to any plan.
215 """
216 form = PlanForm(request.POST)
217
218 customer, _created = Customer.get_or_create(
219 subscriber=djstripe_settings.subscriber_request_callback(self.request)
220 )
221
222 if not customer.subscription:
223 form.add_error(None, "You must already be subscribed to a plan before you can change it.")
224 return self.form_invalid(form)
225
226 if form.is_valid():
227 try:
228 selected_plan = form.cleaned_data["plan"]
229
230 # When a customer upgrades their plan, and DJSTRIPE_PRORATION_POLICY_FOR_UPGRADES is set to True,
231 # we force the proration of the current plan and use it towards the upgraded plan,
232 # no matter what DJSTRIPE_PRORATION_POLICY is set to.
233 if djstripe_settings.PRORATION_POLICY_FOR_UPGRADES:
234 # Is it an upgrade?
235 if selected_plan.amount > customer.subscription.plan.amount:
236 customer.subscription.update(plan=selected_plan, prorate=True)
237 else:
238 customer.subscription.update(plan=selected_plan)
239 else:
240 customer.subscription.update(plan=selected_plan)
241 except StripeError as exc:
242 form.add_error(None, str(exc))
243 return self.form_invalid(form)
244 return self.form_valid(form)
245 else:
246 return self.form_invalid(form)
247
248
249 class CancelSubscriptionView(LoginRequiredMixin, SubscriptionMixin, FormView):
250 """A view used to cancel a Customer's subscription."""
251
252 template_name = "djstripe/cancel_subscription.html"
253 form_class = CancelSubscriptionForm
254 success_url = reverse_lazy("djstripe:account")
255
256 def form_valid(self, form):
257 """Handle canceling the Customer's subscription."""
258 customer, _created = Customer.get_or_create(
259 subscriber=djstripe_settings.subscriber_request_callback(self.request)
260 )
261 subscription = customer.subscription.cancel()
262
263 if subscription.status == subscription.STATUS_CANCELED:
264 # If no pro-rate, they get kicked right out.
265 messages.info(self.request, "Your subscription is now cancelled.")
266 # logout the user
267 auth_logout(self.request)
268 return redirect("home")
269 else:
270 # If pro-rate, they get some time to stay.
271 messages.info(self.request, "Your subscription status is now '{status}' until '{period_end}'".format(
272 status=subscription.status, period_end=subscription.current_period_end)
273 )
274
275 return super(CancelSubscriptionView, self).form_valid(form)
276
277
278 # ============================================================================ #
279 # Web Services #
280 # ============================================================================ #
281
282
283 class WebHook(CsrfExemptMixin, View):
284 """A view used to handle webhooks."""
285
286 def post(self, request, *args, **kwargs):
287 """
288 Create an Event object based on request data.
289
290 Creates an EventProcessingException if the webhook Event is a duplicate.
291 """
292 body = smart_str(request.body)
293 data = json.loads(body)
294
295 if data['id'] == TEST_EVENT_ID:
296 logger.info("Test webhook received: {}".format(data['type']))
297 return HttpResponse()
298
299 if Event.stripe_objects.exists_by_json(data):
300 EventProcessingException.objects.create(
301 data=data,
302 message="Duplicate event record",
303 traceback=""
304 )
305 else:
306 event = Event._create_from_stripe_object(data)
307 event.validate()
308
309 if djstripe_settings.WEBHOOK_EVENT_CALLBACK:
310 djstripe_settings.WEBHOOK_EVENT_CALLBACK(event)
311 else:
312 event.process()
313
314 return HttpResponse()
315
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/djstripe/views.py b/djstripe/views.py
--- a/djstripe/views.py
+++ b/djstripe/views.py
@@ -13,12 +13,13 @@
from braces.views import CsrfExemptMixin, FormValidMessageMixin, LoginRequiredMixin, SelectRelatedMixin
from django.contrib import messages
-from django.contrib.auth import logout as auth_logout
+from django.contrib.auth import logout as auth_logout, REDIRECT_FIELD_NAME
from django.core.urlresolvers import reverse_lazy, reverse
from django.http import HttpResponse
from django.http.response import HttpResponseNotFound
from django.shortcuts import render, redirect
from django.utils.encoding import smart_str
+from django.utils.http import is_safe_url
from django.views.generic import DetailView, FormView, TemplateView, View
from stripe.error import StripeError
@@ -252,6 +253,24 @@
template_name = "djstripe/cancel_subscription.html"
form_class = CancelSubscriptionForm
success_url = reverse_lazy("djstripe:account")
+ redirect_url = reverse_lazy("home")
+
+ # messages
+ subscription_cancel_message = "Your subscription is now cancelled."
+ subscription_status_message = "Your subscription status is now '{status}' until '{period_end}'"
+
+ def get_redirect_url(self):
+ """
+ Return the URL to redirect to when canceling is successful.
+ Looks in query string for ?next, ensuring it is on the same domain.
+ """
+ next = self.request.GET.get(REDIRECT_FIELD_NAME)
+
+ # is_safe_url() will ensure we don't redirect to another domain
+ if next and is_safe_url(next):
+ return next
+ else:
+ return self.redirect_url
def form_valid(self, form):
"""Handle canceling the Customer's subscription."""
@@ -261,19 +280,24 @@
subscription = customer.subscription.cancel()
if subscription.status == subscription.STATUS_CANCELED:
- # If no pro-rate, they get kicked right out.
- messages.info(self.request, "Your subscription is now cancelled.")
- # logout the user
- auth_logout(self.request)
- return redirect("home")
+ return self.status_cancel()
else:
# If pro-rate, they get some time to stay.
- messages.info(self.request, "Your subscription status is now '{status}' until '{period_end}'".format(
+ messages.info(self.request, self.subscription_status_message.format(
status=subscription.status, period_end=subscription.current_period_end)
)
return super(CancelSubscriptionView, self).form_valid(form)
+ def status_cancel(self):
+ """Triggered when the subscription is immediately canceled (not pro-rated)"""
+ # If no pro-rate, they get kicked right out.
+ messages.info(self.request, self.subscription_cancel_message)
+ # logout the user
+ auth_logout(self.request)
+ # Redirect to next url
+ return redirect(self.get_redirect_url())
+
# ============================================================================ #
# Web Services #
| {"golden_diff": "diff --git a/djstripe/views.py b/djstripe/views.py\n--- a/djstripe/views.py\n+++ b/djstripe/views.py\n@@ -13,12 +13,13 @@\n \n from braces.views import CsrfExemptMixin, FormValidMessageMixin, LoginRequiredMixin, SelectRelatedMixin\n from django.contrib import messages\n-from django.contrib.auth import logout as auth_logout\n+from django.contrib.auth import logout as auth_logout, REDIRECT_FIELD_NAME\n from django.core.urlresolvers import reverse_lazy, reverse\n from django.http import HttpResponse\n from django.http.response import HttpResponseNotFound\n from django.shortcuts import render, redirect\n from django.utils.encoding import smart_str\n+from django.utils.http import is_safe_url\n from django.views.generic import DetailView, FormView, TemplateView, View\n from stripe.error import StripeError\n \n@@ -252,6 +253,24 @@\n template_name = \"djstripe/cancel_subscription.html\"\n form_class = CancelSubscriptionForm\n success_url = reverse_lazy(\"djstripe:account\")\n+ redirect_url = reverse_lazy(\"home\")\n+\n+ # messages\n+ subscription_cancel_message = \"Your subscription is now cancelled.\"\n+ subscription_status_message = \"Your subscription status is now '{status}' until '{period_end}'\"\n+\n+ def get_redirect_url(self):\n+ \"\"\"\n+ Return the URL to redirect to when canceling is successful.\n+ Looks in query string for ?next, ensuring it is on the same domain.\n+ \"\"\"\n+ next = self.request.GET.get(REDIRECT_FIELD_NAME)\n+\n+ # is_safe_url() will ensure we don't redirect to another domain\n+ if next and is_safe_url(next):\n+ return next\n+ else:\n+ return self.redirect_url\n \n def form_valid(self, form):\n \"\"\"Handle canceling the Customer's subscription.\"\"\"\n@@ -261,19 +280,24 @@\n subscription = customer.subscription.cancel()\n \n if subscription.status == subscription.STATUS_CANCELED:\n- # If no pro-rate, they get kicked right out.\n- messages.info(self.request, \"Your subscription is now cancelled.\")\n- # logout the user\n- auth_logout(self.request)\n- return redirect(\"home\")\n+ return self.status_cancel()\n else:\n # If pro-rate, they get some time to stay.\n- messages.info(self.request, \"Your subscription status is now '{status}' until '{period_end}'\".format(\n+ messages.info(self.request, self.subscription_status_message.format(\n status=subscription.status, period_end=subscription.current_period_end)\n )\n \n return super(CancelSubscriptionView, self).form_valid(form)\n \n+ def status_cancel(self):\n+ \"\"\"Triggered when the subscription is immediately canceled (not pro-rated)\"\"\"\n+ # If no pro-rate, they get kicked right out.\n+ messages.info(self.request, self.subscription_cancel_message)\n+ # logout the user\n+ auth_logout(self.request)\n+ # Redirect to next url\n+ return redirect(self.get_redirect_url())\n+\n \n # ============================================================================ #\n # Web Services #\n", "issue": "Redirect other than 'home'\nWould love if you'd add a settings variable to define what the \"cancel plan\" redirect would be. I dont have a URL with the name 'home', so this causes an error. Maybe I'm missing a way to change this.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\n.. module:: djstripe.webhooks.\n\n :synopsis: dj-stripe - Views related to the djstripe app.\n\n.. moduleauthor:: @kavdev, @pydanny, @lskillen, @wahuneke, @dollydagr, @chrissmejia\n\"\"\"\nfrom __future__ import unicode_literals\n\nimport json\nimport logging\n\nfrom braces.views import CsrfExemptMixin, FormValidMessageMixin, LoginRequiredMixin, SelectRelatedMixin\nfrom django.contrib import messages\nfrom django.contrib.auth import logout as auth_logout\nfrom django.core.urlresolvers import reverse_lazy, reverse\nfrom django.http import HttpResponse\nfrom django.http.response import HttpResponseNotFound\nfrom django.shortcuts import render, redirect\nfrom django.utils.encoding import smart_str\nfrom django.views.generic import DetailView, FormView, TemplateView, View\nfrom stripe.error import StripeError\n\nfrom . import settings as djstripe_settings\nfrom .forms import PlanForm, CancelSubscriptionForm\nfrom .mixins import PaymentsContextMixin, SubscriptionMixin\nfrom .models import Customer, Event, EventProcessingException, Plan\nfrom .sync import sync_subscriber\nfrom .webhooks import TEST_EVENT_ID\n\nlogger = logging.getLogger(__name__)\n\n# ============================================================================ #\n# Account Views #\n# ============================================================================ #\n\n\nclass AccountView(LoginRequiredMixin, SelectRelatedMixin, SubscriptionMixin, PaymentsContextMixin, TemplateView):\n \"\"\"Shows account details including customer and subscription details.\"\"\"\n\n template_name = \"djstripe/account.html\"\n\n\n# ============================================================================ #\n# Billing Views #\n# ============================================================================ #\n\nclass ChangeCardView(LoginRequiredMixin, PaymentsContextMixin, DetailView):\n \"\"\"TODO: Needs to be refactored to leverage forms and context data.\"\"\"\n\n template_name = \"djstripe/change_card.html\"\n\n def get_object(self):\n \"\"\"\n Return a Customer object.\n\n Ether returns the Customer object from the current class instance or\n uses get_or_create.\n \"\"\"\n if hasattr(self, \"customer\"):\n return self.customer\n self.customer, _created = Customer.get_or_create(\n subscriber=djstripe_settings.subscriber_request_callback(self.request)\n )\n return self.customer\n\n def post(self, request, *args, **kwargs):\n \"\"\"TODO: Raise a validation error when a stripe token isn't passed. Should be resolved when a form is used.\"\"\"\n customer = self.get_object()\n try:\n send_invoice = not customer.default_source\n customer.add_card(\n request.POST.get(\"stripe_token\")\n )\n if send_invoice:\n customer.send_invoice()\n customer.retry_unpaid_invoices()\n except StripeError as exc:\n messages.info(request, \"Stripe Error\")\n return render(\n request,\n self.template_name,\n {\n \"customer\": self.get_object(),\n \"stripe_error\": str(exc)\n }\n )\n messages.info(request, \"Your card is now updated.\")\n return redirect(self.get_post_success_url())\n\n def get_post_success_url(self):\n \"\"\"Make it easier to do custom dj-stripe integrations.\"\"\"\n return reverse(\"djstripe:account\")\n\n\nclass HistoryView(LoginRequiredMixin, SelectRelatedMixin, DetailView):\n \"\"\"A view used to return customer history of invoices.\"\"\"\n\n template_name = \"djstripe/history.html\"\n model = Customer\n select_related = [\"invoice\"]\n\n def get_object(self):\n \"\"\"Return a Customer object.\"\"\"\n customer, _created = Customer.get_or_create(\n subscriber=djstripe_settings.subscriber_request_callback(self.request)\n )\n return customer\n\n\nclass SyncHistoryView(CsrfExemptMixin, LoginRequiredMixin, View):\n \"\"\"TODO: Needs to be refactored to leverage context data.\"\"\"\n\n template_name = \"djstripe/includes/_history_table.html\"\n\n def post(self, request, *args, **kwargs):\n \"\"\"Render the template while injecting extra context.\"\"\"\n return render(\n request,\n self.template_name,\n {\"customer\": sync_subscriber(djstripe_settings.subscriber_request_callback(request))}\n )\n\n\n# ============================================================================ #\n# Subscription Views #\n# ============================================================================ #\n\nclass ConfirmFormView(LoginRequiredMixin, FormValidMessageMixin, SubscriptionMixin, FormView):\n \"\"\"A view used to confirm customers into a subscription plan.\"\"\"\n\n form_class = PlanForm\n template_name = \"djstripe/confirm_form.html\"\n success_url = reverse_lazy(\"djstripe:history\")\n form_valid_message = \"You are now subscribed!\"\n\n def get(self, request, *args, **kwargs):\n \"\"\"Override ConfirmFormView GET to perform extra validation.\n\n - Returns 404 when no plan exists.\n - Redirects to djstripe:subscribe when customer is already subscribed to this plan.\n \"\"\"\n plan_id = self.kwargs['plan_id']\n\n if not Plan.objects.filter(id=plan_id).exists():\n return HttpResponseNotFound()\n\n customer, _created = Customer.get_or_create(\n subscriber=djstripe_settings.subscriber_request_callback(self.request)\n )\n\n if (customer.subscription and str(customer.subscription.plan.id) == plan_id and\n customer.subscription.is_valid()):\n message = \"You already subscribed to this plan\"\n messages.info(request, message, fail_silently=True)\n return redirect(\"djstripe:subscribe\")\n\n return super(ConfirmFormView, self).get(request, *args, **kwargs)\n\n def get_context_data(self, *args, **kwargs):\n \"\"\"Return ConfirmFormView's context with plan_id.\"\"\"\n context = super(ConfirmFormView, self).get_context_data(**kwargs)\n context['plan'] = Plan.objects.get(id=self.kwargs['plan_id'])\n return context\n\n def post(self, request, *args, **kwargs):\n \"\"\"\n Handle POST requests.\n\n Instantiates a form instance with the passed POST variables and\n then checks for validity.\n \"\"\"\n form_class = self.get_form_class()\n form = self.get_form(form_class)\n if form.is_valid():\n try:\n customer, _created = Customer.get_or_create(\n subscriber=djstripe_settings.subscriber_request_callback(self.request)\n )\n customer.add_card(self.request.POST.get(\"stripe_token\"))\n customer.subscribe(form.cleaned_data[\"plan\"])\n except StripeError as exc:\n form.add_error(None, str(exc))\n return self.form_invalid(form)\n return self.form_valid(form)\n else:\n return self.form_invalid(form)\n\n\nclass SubscribeView(LoginRequiredMixin, SubscriptionMixin, TemplateView):\n \"\"\"A view to render the subscribe template.\"\"\"\n\n template_name = \"djstripe/subscribe.html\"\n\n\nclass ChangePlanView(LoginRequiredMixin, FormValidMessageMixin, SubscriptionMixin, FormView):\n \"\"\"\n A view used to change a Customers plan.\n\n TODO: Work in a trial_days kwarg.\n\n Also, this should be combined with ConfirmFormView.\n \"\"\"\n\n form_class = PlanForm\n template_name = \"djstripe/confirm_form.html\"\n success_url = reverse_lazy(\"djstripe:history\")\n form_valid_message = \"You've just changed your plan!\"\n\n def post(self, request, *args, **kwargs):\n \"\"\"Handle a Customer changing a plan.\n\n Handles upgrading a plan as well. Throws an error when Customer is not subscribed to any plan.\n \"\"\"\n form = PlanForm(request.POST)\n\n customer, _created = Customer.get_or_create(\n subscriber=djstripe_settings.subscriber_request_callback(self.request)\n )\n\n if not customer.subscription:\n form.add_error(None, \"You must already be subscribed to a plan before you can change it.\")\n return self.form_invalid(form)\n\n if form.is_valid():\n try:\n selected_plan = form.cleaned_data[\"plan\"]\n\n # When a customer upgrades their plan, and DJSTRIPE_PRORATION_POLICY_FOR_UPGRADES is set to True,\n # we force the proration of the current plan and use it towards the upgraded plan,\n # no matter what DJSTRIPE_PRORATION_POLICY is set to.\n if djstripe_settings.PRORATION_POLICY_FOR_UPGRADES:\n # Is it an upgrade?\n if selected_plan.amount > customer.subscription.plan.amount:\n customer.subscription.update(plan=selected_plan, prorate=True)\n else:\n customer.subscription.update(plan=selected_plan)\n else:\n customer.subscription.update(plan=selected_plan)\n except StripeError as exc:\n form.add_error(None, str(exc))\n return self.form_invalid(form)\n return self.form_valid(form)\n else:\n return self.form_invalid(form)\n\n\nclass CancelSubscriptionView(LoginRequiredMixin, SubscriptionMixin, FormView):\n \"\"\"A view used to cancel a Customer's subscription.\"\"\"\n\n template_name = \"djstripe/cancel_subscription.html\"\n form_class = CancelSubscriptionForm\n success_url = reverse_lazy(\"djstripe:account\")\n\n def form_valid(self, form):\n \"\"\"Handle canceling the Customer's subscription.\"\"\"\n customer, _created = Customer.get_or_create(\n subscriber=djstripe_settings.subscriber_request_callback(self.request)\n )\n subscription = customer.subscription.cancel()\n\n if subscription.status == subscription.STATUS_CANCELED:\n # If no pro-rate, they get kicked right out.\n messages.info(self.request, \"Your subscription is now cancelled.\")\n # logout the user\n auth_logout(self.request)\n return redirect(\"home\")\n else:\n # If pro-rate, they get some time to stay.\n messages.info(self.request, \"Your subscription status is now '{status}' until '{period_end}'\".format(\n status=subscription.status, period_end=subscription.current_period_end)\n )\n\n return super(CancelSubscriptionView, self).form_valid(form)\n\n\n# ============================================================================ #\n# Web Services #\n# ============================================================================ #\n\n\nclass WebHook(CsrfExemptMixin, View):\n \"\"\"A view used to handle webhooks.\"\"\"\n\n def post(self, request, *args, **kwargs):\n \"\"\"\n Create an Event object based on request data.\n\n Creates an EventProcessingException if the webhook Event is a duplicate.\n \"\"\"\n body = smart_str(request.body)\n data = json.loads(body)\n\n if data['id'] == TEST_EVENT_ID:\n logger.info(\"Test webhook received: {}\".format(data['type']))\n return HttpResponse()\n\n if Event.stripe_objects.exists_by_json(data):\n EventProcessingException.objects.create(\n data=data,\n message=\"Duplicate event record\",\n traceback=\"\"\n )\n else:\n event = Event._create_from_stripe_object(data)\n event.validate()\n\n if djstripe_settings.WEBHOOK_EVENT_CALLBACK:\n djstripe_settings.WEBHOOK_EVENT_CALLBACK(event)\n else:\n event.process()\n\n return HttpResponse()\n", "path": "djstripe/views.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\n.. module:: djstripe.webhooks.\n\n :synopsis: dj-stripe - Views related to the djstripe app.\n\n.. moduleauthor:: @kavdev, @pydanny, @lskillen, @wahuneke, @dollydagr, @chrissmejia\n\"\"\"\nfrom __future__ import unicode_literals\n\nimport json\nimport logging\n\nfrom braces.views import CsrfExemptMixin, FormValidMessageMixin, LoginRequiredMixin, SelectRelatedMixin\nfrom django.contrib import messages\nfrom django.contrib.auth import logout as auth_logout, REDIRECT_FIELD_NAME\nfrom django.core.urlresolvers import reverse_lazy, reverse\nfrom django.http import HttpResponse\nfrom django.http.response import HttpResponseNotFound\nfrom django.shortcuts import render, redirect\nfrom django.utils.encoding import smart_str\nfrom django.utils.http import is_safe_url\nfrom django.views.generic import DetailView, FormView, TemplateView, View\nfrom stripe.error import StripeError\n\nfrom . import settings as djstripe_settings\nfrom .forms import PlanForm, CancelSubscriptionForm\nfrom .mixins import PaymentsContextMixin, SubscriptionMixin\nfrom .models import Customer, Event, EventProcessingException, Plan\nfrom .sync import sync_subscriber\nfrom .webhooks import TEST_EVENT_ID\n\nlogger = logging.getLogger(__name__)\n\n# ============================================================================ #\n# Account Views #\n# ============================================================================ #\n\n\nclass AccountView(LoginRequiredMixin, SelectRelatedMixin, SubscriptionMixin, PaymentsContextMixin, TemplateView):\n \"\"\"Shows account details including customer and subscription details.\"\"\"\n\n template_name = \"djstripe/account.html\"\n\n\n# ============================================================================ #\n# Billing Views #\n# ============================================================================ #\n\nclass ChangeCardView(LoginRequiredMixin, PaymentsContextMixin, DetailView):\n \"\"\"TODO: Needs to be refactored to leverage forms and context data.\"\"\"\n\n template_name = \"djstripe/change_card.html\"\n\n def get_object(self):\n \"\"\"\n Return a Customer object.\n\n Ether returns the Customer object from the current class instance or\n uses get_or_create.\n \"\"\"\n if hasattr(self, \"customer\"):\n return self.customer\n self.customer, _created = Customer.get_or_create(\n subscriber=djstripe_settings.subscriber_request_callback(self.request)\n )\n return self.customer\n\n def post(self, request, *args, **kwargs):\n \"\"\"TODO: Raise a validation error when a stripe token isn't passed. Should be resolved when a form is used.\"\"\"\n customer = self.get_object()\n try:\n send_invoice = not customer.default_source\n customer.add_card(\n request.POST.get(\"stripe_token\")\n )\n if send_invoice:\n customer.send_invoice()\n customer.retry_unpaid_invoices()\n except StripeError as exc:\n messages.info(request, \"Stripe Error\")\n return render(\n request,\n self.template_name,\n {\n \"customer\": self.get_object(),\n \"stripe_error\": str(exc)\n }\n )\n messages.info(request, \"Your card is now updated.\")\n return redirect(self.get_post_success_url())\n\n def get_post_success_url(self):\n \"\"\"Make it easier to do custom dj-stripe integrations.\"\"\"\n return reverse(\"djstripe:account\")\n\n\nclass HistoryView(LoginRequiredMixin, SelectRelatedMixin, DetailView):\n \"\"\"A view used to return customer history of invoices.\"\"\"\n\n template_name = \"djstripe/history.html\"\n model = Customer\n select_related = [\"invoice\"]\n\n def get_object(self):\n \"\"\"Return a Customer object.\"\"\"\n customer, _created = Customer.get_or_create(\n subscriber=djstripe_settings.subscriber_request_callback(self.request)\n )\n return customer\n\n\nclass SyncHistoryView(CsrfExemptMixin, LoginRequiredMixin, View):\n \"\"\"TODO: Needs to be refactored to leverage context data.\"\"\"\n\n template_name = \"djstripe/includes/_history_table.html\"\n\n def post(self, request, *args, **kwargs):\n \"\"\"Render the template while injecting extra context.\"\"\"\n return render(\n request,\n self.template_name,\n {\"customer\": sync_subscriber(djstripe_settings.subscriber_request_callback(request))}\n )\n\n\n# ============================================================================ #\n# Subscription Views #\n# ============================================================================ #\n\nclass ConfirmFormView(LoginRequiredMixin, FormValidMessageMixin, SubscriptionMixin, FormView):\n \"\"\"A view used to confirm customers into a subscription plan.\"\"\"\n\n form_class = PlanForm\n template_name = \"djstripe/confirm_form.html\"\n success_url = reverse_lazy(\"djstripe:history\")\n form_valid_message = \"You are now subscribed!\"\n\n def get(self, request, *args, **kwargs):\n \"\"\"Override ConfirmFormView GET to perform extra validation.\n\n - Returns 404 when no plan exists.\n - Redirects to djstripe:subscribe when customer is already subscribed to this plan.\n \"\"\"\n plan_id = self.kwargs['plan_id']\n\n if not Plan.objects.filter(id=plan_id).exists():\n return HttpResponseNotFound()\n\n customer, _created = Customer.get_or_create(\n subscriber=djstripe_settings.subscriber_request_callback(self.request)\n )\n\n if (customer.subscription and str(customer.subscription.plan.id) == plan_id and\n customer.subscription.is_valid()):\n message = \"You already subscribed to this plan\"\n messages.info(request, message, fail_silently=True)\n return redirect(\"djstripe:subscribe\")\n\n return super(ConfirmFormView, self).get(request, *args, **kwargs)\n\n def get_context_data(self, *args, **kwargs):\n \"\"\"Return ConfirmFormView's context with plan_id.\"\"\"\n context = super(ConfirmFormView, self).get_context_data(**kwargs)\n context['plan'] = Plan.objects.get(id=self.kwargs['plan_id'])\n return context\n\n def post(self, request, *args, **kwargs):\n \"\"\"\n Handle POST requests.\n\n Instantiates a form instance with the passed POST variables and\n then checks for validity.\n \"\"\"\n form_class = self.get_form_class()\n form = self.get_form(form_class)\n if form.is_valid():\n try:\n customer, _created = Customer.get_or_create(\n subscriber=djstripe_settings.subscriber_request_callback(self.request)\n )\n customer.add_card(self.request.POST.get(\"stripe_token\"))\n customer.subscribe(form.cleaned_data[\"plan\"])\n except StripeError as exc:\n form.add_error(None, str(exc))\n return self.form_invalid(form)\n return self.form_valid(form)\n else:\n return self.form_invalid(form)\n\n\nclass SubscribeView(LoginRequiredMixin, SubscriptionMixin, TemplateView):\n \"\"\"A view to render the subscribe template.\"\"\"\n\n template_name = \"djstripe/subscribe.html\"\n\n\nclass ChangePlanView(LoginRequiredMixin, FormValidMessageMixin, SubscriptionMixin, FormView):\n \"\"\"\n A view used to change a Customers plan.\n\n TODO: Work in a trial_days kwarg.\n\n Also, this should be combined with ConfirmFormView.\n \"\"\"\n\n form_class = PlanForm\n template_name = \"djstripe/confirm_form.html\"\n success_url = reverse_lazy(\"djstripe:history\")\n form_valid_message = \"You've just changed your plan!\"\n\n def post(self, request, *args, **kwargs):\n \"\"\"Handle a Customer changing a plan.\n\n Handles upgrading a plan as well. Throws an error when Customer is not subscribed to any plan.\n \"\"\"\n form = PlanForm(request.POST)\n\n customer, _created = Customer.get_or_create(\n subscriber=djstripe_settings.subscriber_request_callback(self.request)\n )\n\n if not customer.subscription:\n form.add_error(None, \"You must already be subscribed to a plan before you can change it.\")\n return self.form_invalid(form)\n\n if form.is_valid():\n try:\n selected_plan = form.cleaned_data[\"plan\"]\n\n # When a customer upgrades their plan, and DJSTRIPE_PRORATION_POLICY_FOR_UPGRADES is set to True,\n # we force the proration of the current plan and use it towards the upgraded plan,\n # no matter what DJSTRIPE_PRORATION_POLICY is set to.\n if djstripe_settings.PRORATION_POLICY_FOR_UPGRADES:\n # Is it an upgrade?\n if selected_plan.amount > customer.subscription.plan.amount:\n customer.subscription.update(plan=selected_plan, prorate=True)\n else:\n customer.subscription.update(plan=selected_plan)\n else:\n customer.subscription.update(plan=selected_plan)\n except StripeError as exc:\n form.add_error(None, str(exc))\n return self.form_invalid(form)\n return self.form_valid(form)\n else:\n return self.form_invalid(form)\n\n\nclass CancelSubscriptionView(LoginRequiredMixin, SubscriptionMixin, FormView):\n \"\"\"A view used to cancel a Customer's subscription.\"\"\"\n\n template_name = \"djstripe/cancel_subscription.html\"\n form_class = CancelSubscriptionForm\n success_url = reverse_lazy(\"djstripe:account\")\n redirect_url = reverse_lazy(\"home\")\n\n # messages\n subscription_cancel_message = \"Your subscription is now cancelled.\"\n subscription_status_message = \"Your subscription status is now '{status}' until '{period_end}'\"\n\n def get_redirect_url(self):\n \"\"\"\n Return the URL to redirect to when canceling is successful.\n Looks in query string for ?next, ensuring it is on the same domain.\n \"\"\"\n next = self.request.GET.get(REDIRECT_FIELD_NAME)\n\n # is_safe_url() will ensure we don't redirect to another domain\n if next and is_safe_url(next):\n return next\n else:\n return self.redirect_url\n\n def form_valid(self, form):\n \"\"\"Handle canceling the Customer's subscription.\"\"\"\n customer, _created = Customer.get_or_create(\n subscriber=djstripe_settings.subscriber_request_callback(self.request)\n )\n subscription = customer.subscription.cancel()\n\n if subscription.status == subscription.STATUS_CANCELED:\n return self.status_cancel()\n else:\n # If pro-rate, they get some time to stay.\n messages.info(self.request, self.subscription_status_message.format(\n status=subscription.status, period_end=subscription.current_period_end)\n )\n\n return super(CancelSubscriptionView, self).form_valid(form)\n\n def status_cancel(self):\n \"\"\"Triggered when the subscription is immediately canceled (not pro-rated)\"\"\"\n # If no pro-rate, they get kicked right out.\n messages.info(self.request, self.subscription_cancel_message)\n # logout the user\n auth_logout(self.request)\n # Redirect to next url\n return redirect(self.get_redirect_url())\n\n\n# ============================================================================ #\n# Web Services #\n# ============================================================================ #\n\n\nclass WebHook(CsrfExemptMixin, View):\n \"\"\"A view used to handle webhooks.\"\"\"\n\n def post(self, request, *args, **kwargs):\n \"\"\"\n Create an Event object based on request data.\n\n Creates an EventProcessingException if the webhook Event is a duplicate.\n \"\"\"\n body = smart_str(request.body)\n data = json.loads(body)\n\n if data['id'] == TEST_EVENT_ID:\n logger.info(\"Test webhook received: {}\".format(data['type']))\n return HttpResponse()\n\n if Event.stripe_objects.exists_by_json(data):\n EventProcessingException.objects.create(\n data=data,\n message=\"Duplicate event record\",\n traceback=\"\"\n )\n else:\n event = Event._create_from_stripe_object(data)\n event.validate()\n\n if djstripe_settings.WEBHOOK_EVENT_CALLBACK:\n djstripe_settings.WEBHOOK_EVENT_CALLBACK(event)\n else:\n event.process()\n\n return HttpResponse()\n", "path": "djstripe/views.py"}]} | 3,410 | 664 |
gh_patches_debug_32194 | rasdani/github-patches | git_diff | conan-io__conan-center-index-19174 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[package] onetbb/2021.3.0: Invalid character escape '\l'
### Description
For details on this, see : https://github.com/conan-io/conan/issues/10539
`conan_toolchain.cmake` is broken due to unescaped backslashes on windows. See log.
### Package and Environment Details
* Package Name/Version: **onetbb/2021.3.0#e447f12564bd0a5df3f2bd5781cf5cc7**
* Operating System+version: **Windows 11**
* Compiler+version: **MSVC 2022**
* Docker image: **?**
* Conan version: **conan 2.0.9**
* Python version: **Python 3.7.9**
### Conan profile
======== Input profiles ========
Profile host:
[settings]
arch=x86
build_type=Debug
compiler=msvc
compiler.cppstd=14
compiler.runtime=dynamic
compiler.runtime_type=Debug
compiler.version=193
os=Windows
Profile build:
[settings]
arch=x86_64
build_type=Release
compiler=msvc
compiler.cppstd=14
compiler.runtime=dynamic
compiler.runtime_type=Release
compiler.version=193
os=Windows
### Steps to reproduce
Install (build) onetbb/2021.3.0#e447f12564bd0a5df3f2bd5781cf5cc7 with cmakedeps / cmaketoolchain generator.
### Logs
<details><summary>Click to expand log</summary>
```
CMake Error at C:/Users/pgroarke/.conan2/p/b/onetb3e83bbcb22a9a/b/build/generators/conan_toolchain.cmake:117 (set):
Syntax error in cmake code at
C:/Users/pgroarke/.conan2/p/b/onetb3e83bbcb22a9a/b/build/generators/conan_toolchain.cmake:117
when parsing string
C:/Users/pgroarke/.conan2/p/b/hwloc93107a9ec9dfe/p\lib\hwloc.lib
Invalid character escape '\l'.
```
</details>
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `recipes/onetbb/all/conanfile.py`
Content:
```
1 from conan import ConanFile
2 from conan.errors import ConanInvalidConfiguration
3 from conan.tools.build import cross_building
4 from conan.tools.cmake import CMake, CMakeToolchain, cmake_layout
5 from conan.tools.files import apply_conandata_patches, export_conandata_patches, copy, get, load, rmdir
6 from conan.tools.gnu import PkgConfigDeps
7 from conan.tools.scm import Version
8 import os
9 import re
10
11 required_conan_version = ">=1.53.0"
12
13
14 class OneTBBConan(ConanFile):
15 name = "onetbb"
16 license = "Apache-2.0"
17 url = "https://github.com/conan-io/conan-center-index"
18 homepage = "https://github.com/oneapi-src/oneTBB"
19 description = (
20 "oneAPI Threading Building Blocks (oneTBB) lets you easily write parallel C++"
21 " programs that take full advantage of multicore performance, that are portable, composable"
22 " and have future-proof scalability.")
23 topics = ("tbb", "threading", "parallelism", "tbbmalloc")
24
25 settings = "os", "arch", "compiler", "build_type"
26 options = {
27 "shared": [True, False],
28 "fPIC": [True, False],
29 "tbbmalloc": [True, False],
30 "tbbproxy": [True, False],
31 "tbbbind": [True, False],
32 "interprocedural_optimization": [True, False],
33 }
34 default_options = {
35 "shared": True,
36 "fPIC": True,
37 "tbbmalloc": True,
38 "tbbproxy": True,
39 "tbbbind": True,
40 "interprocedural_optimization": True,
41 }
42
43 @property
44 def _tbbbind_hwloc_version(self):
45 # TBB expects different variables depending on the version
46 return "2_5" if Version(self.version) >= "2021.4.0" else "2_4"
47
48 @property
49 def _tbbbind_supported(self):
50 return Version(self.version) >= "2021.1.1" and not self.settings.os == "Macos"
51
52 @property
53 def _tbbbind_build(self):
54 return self.options.get_safe("tbbbind", False) and self._tbbbind_supported
55
56 @property
57 def _tbbbind_explicit_hwloc(self):
58 # during cross-compilation, oneTBB does not search for HWLOC and we need to specify it explicitly
59 # but then oneTBB creates an imported SHARED target from provided paths, so we have to set shared=True
60 return self._tbbbind_build and cross_building(self)
61
62 def export_sources(self):
63 export_conandata_patches(self)
64
65 def config_options(self):
66 if self.settings.os == "Windows":
67 del self.options.fPIC
68 if not self._tbbbind_supported:
69 del self.options.tbbbind
70 if Version(self.version) < "2021.6.0" or self.settings.os == "Android":
71 del self.options.interprocedural_optimization
72 if Version(self.version) < "2021.2.0":
73 del self.options.shared
74 self.options.rm_safe("fPIC")
75
76 def configure(self):
77 if self.options.get_safe("shared", True):
78 self.options.rm_safe("fPIC")
79 else:
80 del self.options.tbbproxy
81 self.options.rm_safe("tbbbind")
82 if not self.options.tbbmalloc:
83 self.options.rm_safe("tbbproxy")
84 if self._tbbbind_explicit_hwloc:
85 self.options["hwloc"].shared = True
86
87 def requirements(self):
88 if self._tbbbind_build:
89 self.requires("hwloc/2.9.1")
90
91 def layout(self):
92 cmake_layout(self, src_folder="src")
93
94 def package_id(self):
95 if Version(self.version) < "2021.5.0":
96 self.info.options.tbbmalloc = True
97 if Version(self.version) < "2021.6.0" and self.info.options.get_safe("tbbproxy"):
98 self.info.options.tbbproxy = True
99
100 def validate_build(self):
101 if self.settings.compiler == "apple-clang" and Version(self.settings.compiler.version) < "11.0":
102 raise ConanInvalidConfiguration(f"{self.ref} couldn't be built by apple-clang < 11.0")
103 if not self.options.get_safe("shared", True):
104 if Version(self.version) >= "2021.6.0":
105 raise ConanInvalidConfiguration(
106 "Building oneTBB as a static library is highly discouraged and not supported "
107 "to avoid unforeseen issues like https://github.com/oneapi-src/oneTBB/issues/920. "
108 "Please consider fixing at least the aforementioned issue in upstream."
109 )
110 self.output.warning("oneTBB strongly discourages usage of static linkage")
111 if self._tbbbind_explicit_hwloc and not self.dependencies["hwloc"].options.shared:
112 raise ConanInvalidConfiguration(f"{self.ref} requires hwloc:shared=True to be built.")
113
114 def source(self):
115 get(self, **self.conan_data["sources"][self.version], strip_root=True)
116
117 def generate(self):
118 toolchain = CMakeToolchain(self)
119 toolchain.variables["TBB_TEST"] = False
120 toolchain.variables["TBB_STRICT"] = False
121 if Version(self.version) >= "2021.5.0":
122 toolchain.variables["TBBMALLOC_BUILD"] = self.options.tbbmalloc
123 if self.options.get_safe("interprocedural_optimization"):
124 toolchain.variables["TBB_ENABLE_IPO"] = self.options.interprocedural_optimization
125 if Version(self.version) >= "2021.6.0" and self.options.get_safe("tbbproxy"):
126 toolchain.variables["TBBMALLOC_PROXY_BUILD"] = self.options.tbbproxy
127 toolchain.variables["TBB_DISABLE_HWLOC_AUTOMATIC_SEARCH"] = not self._tbbbind_build
128 if self._tbbbind_build:
129 deps = PkgConfigDeps(self)
130 deps.generate()
131 if self._tbbbind_explicit_hwloc:
132 hwloc_package_folder = self.dependencies["hwloc"].package_folder.replace("\\", "/")
133 hwloc_lib_name = "hwloc.lib" if self.settings.os == "Windows" else "libhwloc.so"
134 toolchain.variables[f"CMAKE_HWLOC_{self._tbbbind_hwloc_version}_LIBRARY_PATH"] = os.path.join(hwloc_package_folder, "lib", hwloc_lib_name)
135 toolchain.variables[f"CMAKE_HWLOC_{self._tbbbind_hwloc_version}_INCLUDE_PATH"] = os.path.join(hwloc_package_folder, "include")
136 if self.settings.os == "Windows":
137 toolchain.variables[f"CMAKE_HWLOC_{self._tbbbind_hwloc_version}_DLL_PATH"] = os.path.join(hwloc_package_folder, "bin", "hwloc.dll")
138 toolchain.generate()
139
140 def build(self):
141 apply_conandata_patches(self)
142 cmake = CMake(self)
143 cmake.configure()
144 cmake.build()
145
146 def package(self):
147 cmake = CMake(self)
148 cmake.install()
149 copy(self, "LICENSE.txt", src=self.source_folder, dst=os.path.join(self.package_folder, "licenses"))
150 rmdir(self, os.path.join(self.package_folder, "lib", "cmake"))
151 rmdir(self, os.path.join(self.package_folder, "lib", "pkgconfig"))
152 rmdir(self, os.path.join(self.package_folder, "share"))
153
154 def package_info(self):
155 self.cpp_info.set_property("cmake_file_name", "TBB")
156 self.cpp_info.set_property("pkg_config_name", "tbb")
157 self.cpp_info.set_property("cmake_config_version_compat", "AnyNewerVersion")
158
159 def lib_name(name):
160 if self.settings.build_type == "Debug":
161 return name + "_debug"
162 return name
163
164 # tbb
165 tbb = self.cpp_info.components["libtbb"]
166
167 tbb.set_property("cmake_target_name", "TBB::tbb")
168 tbb.libs = [lib_name("tbb")]
169 if self.settings.os == "Windows":
170 version_info = load(self,
171 os.path.join(self.package_folder, "include", "oneapi", "tbb",
172 "version.h"))
173 binary_version = re.sub(
174 r".*" + re.escape("#define __TBB_BINARY_VERSION ") +
175 r"(\d+).*",
176 r"\1",
177 version_info,
178 flags=re.MULTILINE | re.DOTALL,
179 )
180 tbb.libs.append(lib_name(f"tbb{binary_version}"))
181 if self.settings.os in ["Linux", "FreeBSD"]:
182 tbb.system_libs = ["m", "dl", "rt", "pthread"]
183
184 # tbbmalloc
185 if self.options.tbbmalloc:
186 tbbmalloc = self.cpp_info.components["tbbmalloc"]
187
188 tbbmalloc.set_property("cmake_target_name", "TBB::tbbmalloc")
189 tbbmalloc.libs = [lib_name("tbbmalloc")]
190 if self.settings.os in ["Linux", "FreeBSD"]:
191 tbbmalloc.system_libs = ["dl", "pthread"]
192
193 # tbbmalloc_proxy
194 if self.options.get_safe("tbbproxy", False):
195 tbbproxy = self.cpp_info.components["tbbmalloc_proxy"]
196
197 tbbproxy.set_property("cmake_target_name", "TBB::tbbmalloc_proxy")
198 tbbproxy.libs = [lib_name("tbbmalloc_proxy")]
199 tbbproxy.requires = ["tbbmalloc"]
200 if self.settings.os in ["Linux", "FreeBSD"]:
201 tbbproxy.system_libs = ["m", "dl", "pthread"]
202
203 # TODO: to remove in conan v2 once cmake_find_package* & pkg_config generators removed
204 self.cpp_info.names["cmake_find_package"] = "TBB"
205 self.cpp_info.names["cmake_find_package_multi"] = "TBB"
206 self.cpp_info.names["pkg_config"] = "tbb"
207 tbb.names["cmake_find_package"] = "tbb"
208 tbb.names["cmake_find_package_multi"] = "tbb"
209
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/recipes/onetbb/all/conanfile.py b/recipes/onetbb/all/conanfile.py
--- a/recipes/onetbb/all/conanfile.py
+++ b/recipes/onetbb/all/conanfile.py
@@ -125,18 +125,22 @@
if Version(self.version) >= "2021.6.0" and self.options.get_safe("tbbproxy"):
toolchain.variables["TBBMALLOC_PROXY_BUILD"] = self.options.tbbproxy
toolchain.variables["TBB_DISABLE_HWLOC_AUTOMATIC_SEARCH"] = not self._tbbbind_build
- if self._tbbbind_build:
- deps = PkgConfigDeps(self)
- deps.generate()
if self._tbbbind_explicit_hwloc:
- hwloc_package_folder = self.dependencies["hwloc"].package_folder.replace("\\", "/")
+ hwloc_package_folder = self.dependencies["hwloc"].package_folder
hwloc_lib_name = "hwloc.lib" if self.settings.os == "Windows" else "libhwloc.so"
- toolchain.variables[f"CMAKE_HWLOC_{self._tbbbind_hwloc_version}_LIBRARY_PATH"] = os.path.join(hwloc_package_folder, "lib", hwloc_lib_name)
- toolchain.variables[f"CMAKE_HWLOC_{self._tbbbind_hwloc_version}_INCLUDE_PATH"] = os.path.join(hwloc_package_folder, "include")
+ toolchain.variables[f"CMAKE_HWLOC_{self._tbbbind_hwloc_version}_LIBRARY_PATH"] = \
+ os.path.join(hwloc_package_folder, "lib", hwloc_lib_name).replace("\\", "/")
+ toolchain.variables[f"CMAKE_HWLOC_{self._tbbbind_hwloc_version}_INCLUDE_PATH"] = \
+ os.path.join(hwloc_package_folder, "include").replace("\\", "/")
if self.settings.os == "Windows":
- toolchain.variables[f"CMAKE_HWLOC_{self._tbbbind_hwloc_version}_DLL_PATH"] = os.path.join(hwloc_package_folder, "bin", "hwloc.dll")
+ toolchain.variables[f"CMAKE_HWLOC_{self._tbbbind_hwloc_version}_DLL_PATH"] = \
+ os.path.join(hwloc_package_folder, "bin", "hwloc.dll").replace("\\", "/")
toolchain.generate()
+ if self._tbbbind_build:
+ deps = PkgConfigDeps(self)
+ deps.generate()
+
def build(self):
apply_conandata_patches(self)
cmake = CMake(self)
| {"golden_diff": "diff --git a/recipes/onetbb/all/conanfile.py b/recipes/onetbb/all/conanfile.py\n--- a/recipes/onetbb/all/conanfile.py\n+++ b/recipes/onetbb/all/conanfile.py\n@@ -125,18 +125,22 @@\n if Version(self.version) >= \"2021.6.0\" and self.options.get_safe(\"tbbproxy\"):\n toolchain.variables[\"TBBMALLOC_PROXY_BUILD\"] = self.options.tbbproxy\n toolchain.variables[\"TBB_DISABLE_HWLOC_AUTOMATIC_SEARCH\"] = not self._tbbbind_build\n- if self._tbbbind_build:\n- deps = PkgConfigDeps(self)\n- deps.generate()\n if self._tbbbind_explicit_hwloc:\n- hwloc_package_folder = self.dependencies[\"hwloc\"].package_folder.replace(\"\\\\\", \"/\")\n+ hwloc_package_folder = self.dependencies[\"hwloc\"].package_folder\n hwloc_lib_name = \"hwloc.lib\" if self.settings.os == \"Windows\" else \"libhwloc.so\"\n- toolchain.variables[f\"CMAKE_HWLOC_{self._tbbbind_hwloc_version}_LIBRARY_PATH\"] = os.path.join(hwloc_package_folder, \"lib\", hwloc_lib_name)\n- toolchain.variables[f\"CMAKE_HWLOC_{self._tbbbind_hwloc_version}_INCLUDE_PATH\"] = os.path.join(hwloc_package_folder, \"include\")\n+ toolchain.variables[f\"CMAKE_HWLOC_{self._tbbbind_hwloc_version}_LIBRARY_PATH\"] = \\\n+ os.path.join(hwloc_package_folder, \"lib\", hwloc_lib_name).replace(\"\\\\\", \"/\")\n+ toolchain.variables[f\"CMAKE_HWLOC_{self._tbbbind_hwloc_version}_INCLUDE_PATH\"] = \\\n+ os.path.join(hwloc_package_folder, \"include\").replace(\"\\\\\", \"/\")\n if self.settings.os == \"Windows\":\n- toolchain.variables[f\"CMAKE_HWLOC_{self._tbbbind_hwloc_version}_DLL_PATH\"] = os.path.join(hwloc_package_folder, \"bin\", \"hwloc.dll\")\n+ toolchain.variables[f\"CMAKE_HWLOC_{self._tbbbind_hwloc_version}_DLL_PATH\"] = \\\n+ os.path.join(hwloc_package_folder, \"bin\", \"hwloc.dll\").replace(\"\\\\\", \"/\")\n toolchain.generate()\n \n+ if self._tbbbind_build:\n+ deps = PkgConfigDeps(self)\n+ deps.generate()\n+\n def build(self):\n apply_conandata_patches(self)\n cmake = CMake(self)\n", "issue": "[package] onetbb/2021.3.0: Invalid character escape '\\l'\n### Description\n\nFor details on this, see : https://github.com/conan-io/conan/issues/10539\r\n\r\n`conan_toolchain.cmake` is broken due to unescaped backslashes on windows. See log.\n\n### Package and Environment Details\n\n* Package Name/Version: **onetbb/2021.3.0#e447f12564bd0a5df3f2bd5781cf5cc7**\r\n* Operating System+version: **Windows 11**\r\n* Compiler+version: **MSVC 2022**\r\n* Docker image: **?**\r\n* Conan version: **conan 2.0.9**\r\n* Python version: **Python 3.7.9**\r\n\n\n### Conan profile\n\n======== Input profiles ========\r\nProfile host:\r\n[settings]\r\narch=x86\r\nbuild_type=Debug\r\ncompiler=msvc\r\ncompiler.cppstd=14\r\ncompiler.runtime=dynamic\r\ncompiler.runtime_type=Debug\r\ncompiler.version=193\r\nos=Windows\r\n\r\nProfile build:\r\n[settings]\r\narch=x86_64\r\nbuild_type=Release\r\ncompiler=msvc\r\ncompiler.cppstd=14\r\ncompiler.runtime=dynamic\r\ncompiler.runtime_type=Release\r\ncompiler.version=193\r\nos=Windows\r\n\n\n### Steps to reproduce\n\nInstall (build) onetbb/2021.3.0#e447f12564bd0a5df3f2bd5781cf5cc7 with cmakedeps / cmaketoolchain generator.\n\n### Logs\n\n<details><summary>Click to expand log</summary>\r\n\r\n```\r\nCMake Error at C:/Users/pgroarke/.conan2/p/b/onetb3e83bbcb22a9a/b/build/generators/conan_toolchain.cmake:117 (set):\r\n Syntax error in cmake code at\r\n\r\n C:/Users/pgroarke/.conan2/p/b/onetb3e83bbcb22a9a/b/build/generators/conan_toolchain.cmake:117\r\n\r\n when parsing string\r\n\r\n C:/Users/pgroarke/.conan2/p/b/hwloc93107a9ec9dfe/p\\lib\\hwloc.lib\r\n\r\n Invalid character escape '\\l'.\r\n```\r\n\r\n</details>\r\n\n", "before_files": [{"content": "from conan import ConanFile\nfrom conan.errors import ConanInvalidConfiguration\nfrom conan.tools.build import cross_building\nfrom conan.tools.cmake import CMake, CMakeToolchain, cmake_layout\nfrom conan.tools.files import apply_conandata_patches, export_conandata_patches, copy, get, load, rmdir\nfrom conan.tools.gnu import PkgConfigDeps\nfrom conan.tools.scm import Version\nimport os\nimport re\n\nrequired_conan_version = \">=1.53.0\"\n\n\nclass OneTBBConan(ConanFile):\n name = \"onetbb\"\n license = \"Apache-2.0\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://github.com/oneapi-src/oneTBB\"\n description = (\n \"oneAPI Threading Building Blocks (oneTBB) lets you easily write parallel C++\"\n \" programs that take full advantage of multicore performance, that are portable, composable\"\n \" and have future-proof scalability.\")\n topics = (\"tbb\", \"threading\", \"parallelism\", \"tbbmalloc\")\n\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n \"tbbmalloc\": [True, False],\n \"tbbproxy\": [True, False],\n \"tbbbind\": [True, False],\n \"interprocedural_optimization\": [True, False],\n }\n default_options = {\n \"shared\": True,\n \"fPIC\": True,\n \"tbbmalloc\": True,\n \"tbbproxy\": True,\n \"tbbbind\": True,\n \"interprocedural_optimization\": True,\n }\n\n @property\n def _tbbbind_hwloc_version(self):\n # TBB expects different variables depending on the version\n return \"2_5\" if Version(self.version) >= \"2021.4.0\" else \"2_4\"\n\n @property\n def _tbbbind_supported(self):\n return Version(self.version) >= \"2021.1.1\" and not self.settings.os == \"Macos\"\n\n @property\n def _tbbbind_build(self):\n return self.options.get_safe(\"tbbbind\", False) and self._tbbbind_supported\n\n @property\n def _tbbbind_explicit_hwloc(self):\n # during cross-compilation, oneTBB does not search for HWLOC and we need to specify it explicitly\n # but then oneTBB creates an imported SHARED target from provided paths, so we have to set shared=True\n return self._tbbbind_build and cross_building(self)\n\n def export_sources(self):\n export_conandata_patches(self)\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n if not self._tbbbind_supported:\n del self.options.tbbbind\n if Version(self.version) < \"2021.6.0\" or self.settings.os == \"Android\":\n del self.options.interprocedural_optimization\n if Version(self.version) < \"2021.2.0\":\n del self.options.shared\n self.options.rm_safe(\"fPIC\")\n\n def configure(self):\n if self.options.get_safe(\"shared\", True):\n self.options.rm_safe(\"fPIC\")\n else:\n del self.options.tbbproxy\n self.options.rm_safe(\"tbbbind\")\n if not self.options.tbbmalloc:\n self.options.rm_safe(\"tbbproxy\")\n if self._tbbbind_explicit_hwloc:\n self.options[\"hwloc\"].shared = True\n\n def requirements(self):\n if self._tbbbind_build:\n self.requires(\"hwloc/2.9.1\")\n\n def layout(self):\n cmake_layout(self, src_folder=\"src\")\n\n def package_id(self):\n if Version(self.version) < \"2021.5.0\":\n self.info.options.tbbmalloc = True\n if Version(self.version) < \"2021.6.0\" and self.info.options.get_safe(\"tbbproxy\"):\n self.info.options.tbbproxy = True\n\n def validate_build(self):\n if self.settings.compiler == \"apple-clang\" and Version(self.settings.compiler.version) < \"11.0\":\n raise ConanInvalidConfiguration(f\"{self.ref} couldn't be built by apple-clang < 11.0\")\n if not self.options.get_safe(\"shared\", True):\n if Version(self.version) >= \"2021.6.0\":\n raise ConanInvalidConfiguration(\n \"Building oneTBB as a static library is highly discouraged and not supported \"\n \"to avoid unforeseen issues like https://github.com/oneapi-src/oneTBB/issues/920. \"\n \"Please consider fixing at least the aforementioned issue in upstream.\"\n )\n self.output.warning(\"oneTBB strongly discourages usage of static linkage\")\n if self._tbbbind_explicit_hwloc and not self.dependencies[\"hwloc\"].options.shared:\n raise ConanInvalidConfiguration(f\"{self.ref} requires hwloc:shared=True to be built.\")\n\n def source(self):\n get(self, **self.conan_data[\"sources\"][self.version], strip_root=True)\n\n def generate(self):\n toolchain = CMakeToolchain(self)\n toolchain.variables[\"TBB_TEST\"] = False\n toolchain.variables[\"TBB_STRICT\"] = False\n if Version(self.version) >= \"2021.5.0\":\n toolchain.variables[\"TBBMALLOC_BUILD\"] = self.options.tbbmalloc\n if self.options.get_safe(\"interprocedural_optimization\"):\n toolchain.variables[\"TBB_ENABLE_IPO\"] = self.options.interprocedural_optimization\n if Version(self.version) >= \"2021.6.0\" and self.options.get_safe(\"tbbproxy\"):\n toolchain.variables[\"TBBMALLOC_PROXY_BUILD\"] = self.options.tbbproxy\n toolchain.variables[\"TBB_DISABLE_HWLOC_AUTOMATIC_SEARCH\"] = not self._tbbbind_build\n if self._tbbbind_build:\n deps = PkgConfigDeps(self)\n deps.generate()\n if self._tbbbind_explicit_hwloc:\n hwloc_package_folder = self.dependencies[\"hwloc\"].package_folder.replace(\"\\\\\", \"/\")\n hwloc_lib_name = \"hwloc.lib\" if self.settings.os == \"Windows\" else \"libhwloc.so\"\n toolchain.variables[f\"CMAKE_HWLOC_{self._tbbbind_hwloc_version}_LIBRARY_PATH\"] = os.path.join(hwloc_package_folder, \"lib\", hwloc_lib_name)\n toolchain.variables[f\"CMAKE_HWLOC_{self._tbbbind_hwloc_version}_INCLUDE_PATH\"] = os.path.join(hwloc_package_folder, \"include\")\n if self.settings.os == \"Windows\":\n toolchain.variables[f\"CMAKE_HWLOC_{self._tbbbind_hwloc_version}_DLL_PATH\"] = os.path.join(hwloc_package_folder, \"bin\", \"hwloc.dll\")\n toolchain.generate()\n\n def build(self):\n apply_conandata_patches(self)\n cmake = CMake(self)\n cmake.configure()\n cmake.build()\n\n def package(self):\n cmake = CMake(self)\n cmake.install()\n copy(self, \"LICENSE.txt\", src=self.source_folder, dst=os.path.join(self.package_folder, \"licenses\"))\n rmdir(self, os.path.join(self.package_folder, \"lib\", \"cmake\"))\n rmdir(self, os.path.join(self.package_folder, \"lib\", \"pkgconfig\"))\n rmdir(self, os.path.join(self.package_folder, \"share\"))\n\n def package_info(self):\n self.cpp_info.set_property(\"cmake_file_name\", \"TBB\")\n self.cpp_info.set_property(\"pkg_config_name\", \"tbb\")\n self.cpp_info.set_property(\"cmake_config_version_compat\", \"AnyNewerVersion\")\n\n def lib_name(name):\n if self.settings.build_type == \"Debug\":\n return name + \"_debug\"\n return name\n\n # tbb\n tbb = self.cpp_info.components[\"libtbb\"]\n\n tbb.set_property(\"cmake_target_name\", \"TBB::tbb\")\n tbb.libs = [lib_name(\"tbb\")]\n if self.settings.os == \"Windows\":\n version_info = load(self,\n os.path.join(self.package_folder, \"include\", \"oneapi\", \"tbb\",\n \"version.h\"))\n binary_version = re.sub(\n r\".*\" + re.escape(\"#define __TBB_BINARY_VERSION \") +\n r\"(\\d+).*\",\n r\"\\1\",\n version_info,\n flags=re.MULTILINE | re.DOTALL,\n )\n tbb.libs.append(lib_name(f\"tbb{binary_version}\"))\n if self.settings.os in [\"Linux\", \"FreeBSD\"]:\n tbb.system_libs = [\"m\", \"dl\", \"rt\", \"pthread\"]\n\n # tbbmalloc\n if self.options.tbbmalloc:\n tbbmalloc = self.cpp_info.components[\"tbbmalloc\"]\n\n tbbmalloc.set_property(\"cmake_target_name\", \"TBB::tbbmalloc\")\n tbbmalloc.libs = [lib_name(\"tbbmalloc\")]\n if self.settings.os in [\"Linux\", \"FreeBSD\"]:\n tbbmalloc.system_libs = [\"dl\", \"pthread\"]\n\n # tbbmalloc_proxy\n if self.options.get_safe(\"tbbproxy\", False):\n tbbproxy = self.cpp_info.components[\"tbbmalloc_proxy\"]\n\n tbbproxy.set_property(\"cmake_target_name\", \"TBB::tbbmalloc_proxy\")\n tbbproxy.libs = [lib_name(\"tbbmalloc_proxy\")]\n tbbproxy.requires = [\"tbbmalloc\"]\n if self.settings.os in [\"Linux\", \"FreeBSD\"]:\n tbbproxy.system_libs = [\"m\", \"dl\", \"pthread\"]\n\n # TODO: to remove in conan v2 once cmake_find_package* & pkg_config generators removed\n self.cpp_info.names[\"cmake_find_package\"] = \"TBB\"\n self.cpp_info.names[\"cmake_find_package_multi\"] = \"TBB\"\n self.cpp_info.names[\"pkg_config\"] = \"tbb\"\n tbb.names[\"cmake_find_package\"] = \"tbb\"\n tbb.names[\"cmake_find_package_multi\"] = \"tbb\"\n", "path": "recipes/onetbb/all/conanfile.py"}], "after_files": [{"content": "from conan import ConanFile\nfrom conan.errors import ConanInvalidConfiguration\nfrom conan.tools.build import cross_building\nfrom conan.tools.cmake import CMake, CMakeToolchain, cmake_layout\nfrom conan.tools.files import apply_conandata_patches, export_conandata_patches, copy, get, load, rmdir\nfrom conan.tools.gnu import PkgConfigDeps\nfrom conan.tools.scm import Version\nimport os\nimport re\n\nrequired_conan_version = \">=1.53.0\"\n\n\nclass OneTBBConan(ConanFile):\n name = \"onetbb\"\n license = \"Apache-2.0\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://github.com/oneapi-src/oneTBB\"\n description = (\n \"oneAPI Threading Building Blocks (oneTBB) lets you easily write parallel C++\"\n \" programs that take full advantage of multicore performance, that are portable, composable\"\n \" and have future-proof scalability.\")\n topics = (\"tbb\", \"threading\", \"parallelism\", \"tbbmalloc\")\n\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n \"tbbmalloc\": [True, False],\n \"tbbproxy\": [True, False],\n \"tbbbind\": [True, False],\n \"interprocedural_optimization\": [True, False],\n }\n default_options = {\n \"shared\": True,\n \"fPIC\": True,\n \"tbbmalloc\": True,\n \"tbbproxy\": True,\n \"tbbbind\": True,\n \"interprocedural_optimization\": True,\n }\n\n @property\n def _tbbbind_hwloc_version(self):\n # TBB expects different variables depending on the version\n return \"2_5\" if Version(self.version) >= \"2021.4.0\" else \"2_4\"\n\n @property\n def _tbbbind_supported(self):\n return Version(self.version) >= \"2021.1.1\" and not self.settings.os == \"Macos\"\n\n @property\n def _tbbbind_build(self):\n return self.options.get_safe(\"tbbbind\", False) and self._tbbbind_supported\n\n @property\n def _tbbbind_explicit_hwloc(self):\n # during cross-compilation, oneTBB does not search for HWLOC and we need to specify it explicitly\n # but then oneTBB creates an imported SHARED target from provided paths, so we have to set shared=True\n return self._tbbbind_build and cross_building(self)\n\n def export_sources(self):\n export_conandata_patches(self)\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n if not self._tbbbind_supported:\n del self.options.tbbbind\n if Version(self.version) < \"2021.6.0\" or self.settings.os == \"Android\":\n del self.options.interprocedural_optimization\n if Version(self.version) < \"2021.2.0\":\n del self.options.shared\n self.options.rm_safe(\"fPIC\")\n\n def configure(self):\n if self.options.get_safe(\"shared\", True):\n self.options.rm_safe(\"fPIC\")\n else:\n del self.options.tbbproxy\n self.options.rm_safe(\"tbbbind\")\n if not self.options.tbbmalloc:\n self.options.rm_safe(\"tbbproxy\")\n if self._tbbbind_explicit_hwloc:\n self.options[\"hwloc\"].shared = True\n\n def requirements(self):\n if self._tbbbind_build:\n self.requires(\"hwloc/2.9.1\")\n\n def layout(self):\n cmake_layout(self, src_folder=\"src\")\n\n def package_id(self):\n if Version(self.version) < \"2021.5.0\":\n self.info.options.tbbmalloc = True\n if Version(self.version) < \"2021.6.0\" and self.info.options.get_safe(\"tbbproxy\"):\n self.info.options.tbbproxy = True\n\n def validate_build(self):\n if self.settings.compiler == \"apple-clang\" and Version(self.settings.compiler.version) < \"11.0\":\n raise ConanInvalidConfiguration(f\"{self.ref} couldn't be built by apple-clang < 11.0\")\n if not self.options.get_safe(\"shared\", True):\n if Version(self.version) >= \"2021.6.0\":\n raise ConanInvalidConfiguration(\n \"Building oneTBB as a static library is highly discouraged and not supported \"\n \"to avoid unforeseen issues like https://github.com/oneapi-src/oneTBB/issues/920. \"\n \"Please consider fixing at least the aforementioned issue in upstream.\"\n )\n self.output.warning(\"oneTBB strongly discourages usage of static linkage\")\n if self._tbbbind_explicit_hwloc and not self.dependencies[\"hwloc\"].options.shared:\n raise ConanInvalidConfiguration(f\"{self.ref} requires hwloc:shared=True to be built.\")\n\n def source(self):\n get(self, **self.conan_data[\"sources\"][self.version], strip_root=True)\n\n def generate(self):\n toolchain = CMakeToolchain(self)\n toolchain.variables[\"TBB_TEST\"] = False\n toolchain.variables[\"TBB_STRICT\"] = False\n if Version(self.version) >= \"2021.5.0\":\n toolchain.variables[\"TBBMALLOC_BUILD\"] = self.options.tbbmalloc\n if self.options.get_safe(\"interprocedural_optimization\"):\n toolchain.variables[\"TBB_ENABLE_IPO\"] = self.options.interprocedural_optimization\n if Version(self.version) >= \"2021.6.0\" and self.options.get_safe(\"tbbproxy\"):\n toolchain.variables[\"TBBMALLOC_PROXY_BUILD\"] = self.options.tbbproxy\n toolchain.variables[\"TBB_DISABLE_HWLOC_AUTOMATIC_SEARCH\"] = not self._tbbbind_build\n if self._tbbbind_explicit_hwloc:\n hwloc_package_folder = self.dependencies[\"hwloc\"].package_folder\n hwloc_lib_name = \"hwloc.lib\" if self.settings.os == \"Windows\" else \"libhwloc.so\"\n toolchain.variables[f\"CMAKE_HWLOC_{self._tbbbind_hwloc_version}_LIBRARY_PATH\"] = \\\n os.path.join(hwloc_package_folder, \"lib\", hwloc_lib_name).replace(\"\\\\\", \"/\")\n toolchain.variables[f\"CMAKE_HWLOC_{self._tbbbind_hwloc_version}_INCLUDE_PATH\"] = \\\n os.path.join(hwloc_package_folder, \"include\").replace(\"\\\\\", \"/\")\n if self.settings.os == \"Windows\":\n toolchain.variables[f\"CMAKE_HWLOC_{self._tbbbind_hwloc_version}_DLL_PATH\"] = \\\n os.path.join(hwloc_package_folder, \"bin\", \"hwloc.dll\").replace(\"\\\\\", \"/\")\n toolchain.generate()\n\n if self._tbbbind_build:\n deps = PkgConfigDeps(self)\n deps.generate()\n\n def build(self):\n apply_conandata_patches(self)\n cmake = CMake(self)\n cmake.configure()\n cmake.build()\n\n def package(self):\n cmake = CMake(self)\n cmake.install()\n copy(self, \"LICENSE.txt\", src=self.source_folder, dst=os.path.join(self.package_folder, \"licenses\"))\n rmdir(self, os.path.join(self.package_folder, \"lib\", \"cmake\"))\n rmdir(self, os.path.join(self.package_folder, \"lib\", \"pkgconfig\"))\n rmdir(self, os.path.join(self.package_folder, \"share\"))\n\n def package_info(self):\n self.cpp_info.set_property(\"cmake_file_name\", \"TBB\")\n self.cpp_info.set_property(\"pkg_config_name\", \"tbb\")\n self.cpp_info.set_property(\"cmake_config_version_compat\", \"AnyNewerVersion\")\n\n def lib_name(name):\n if self.settings.build_type == \"Debug\":\n return name + \"_debug\"\n return name\n\n # tbb\n tbb = self.cpp_info.components[\"libtbb\"]\n\n tbb.set_property(\"cmake_target_name\", \"TBB::tbb\")\n tbb.libs = [lib_name(\"tbb\")]\n if self.settings.os == \"Windows\":\n version_info = load(self,\n os.path.join(self.package_folder, \"include\", \"oneapi\", \"tbb\",\n \"version.h\"))\n binary_version = re.sub(\n r\".*\" + re.escape(\"#define __TBB_BINARY_VERSION \") +\n r\"(\\d+).*\",\n r\"\\1\",\n version_info,\n flags=re.MULTILINE | re.DOTALL,\n )\n tbb.libs.append(lib_name(f\"tbb{binary_version}\"))\n if self.settings.os in [\"Linux\", \"FreeBSD\"]:\n tbb.system_libs = [\"m\", \"dl\", \"rt\", \"pthread\"]\n\n # tbbmalloc\n if self.options.tbbmalloc:\n tbbmalloc = self.cpp_info.components[\"tbbmalloc\"]\n\n tbbmalloc.set_property(\"cmake_target_name\", \"TBB::tbbmalloc\")\n tbbmalloc.libs = [lib_name(\"tbbmalloc\")]\n if self.settings.os in [\"Linux\", \"FreeBSD\"]:\n tbbmalloc.system_libs = [\"dl\", \"pthread\"]\n\n # tbbmalloc_proxy\n if self.options.get_safe(\"tbbproxy\", False):\n tbbproxy = self.cpp_info.components[\"tbbmalloc_proxy\"]\n\n tbbproxy.set_property(\"cmake_target_name\", \"TBB::tbbmalloc_proxy\")\n tbbproxy.libs = [lib_name(\"tbbmalloc_proxy\")]\n tbbproxy.requires = [\"tbbmalloc\"]\n if self.settings.os in [\"Linux\", \"FreeBSD\"]:\n tbbproxy.system_libs = [\"m\", \"dl\", \"pthread\"]\n\n # TODO: to remove in conan v2 once cmake_find_package* & pkg_config generators removed\n self.cpp_info.names[\"cmake_find_package\"] = \"TBB\"\n self.cpp_info.names[\"cmake_find_package_multi\"] = \"TBB\"\n self.cpp_info.names[\"pkg_config\"] = \"tbb\"\n tbb.names[\"cmake_find_package\"] = \"tbb\"\n tbb.names[\"cmake_find_package_multi\"] = \"tbb\"\n", "path": "recipes/onetbb/all/conanfile.py"}]} | 3,574 | 555 |
gh_patches_debug_5391 | rasdani/github-patches | git_diff | vyperlang__vyper-3174 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Disallow `++a` unary operations
## Simple Summary
Disallow the (pre-increment) `++a` expression in Vyper for all signed and unsigned types.
```vy
x: uint256 = 0
y: uint256 = 0
# throws
y = x++
# does currently not throw
y = ++x
```
## Motivation
Currently, the (post-increment) expression `a++` throws correctly as it complies with the Python syntax. Unfortunately, Python allows for the expression `++a` which is parsed as `+(+(a)) = a` and therefore is an identity operation on the integer (`+` is here a unary operator). Vyper currently supports this Syntax as well and I think it's more ergonomic & consistent with other languages to disallow it (many non-Python engineers will get confused if they are not aware of the semantics of the syntax). This is one of the design decisions of Python that am not happy about and would recommend not implementing in Vyper either.
## Specification
The compiler should throw an error for any `++a`-like expression for all signed and unsigned types.
## Backwards Compatibility
Will break any previous code that uses a `++a` syntax.
## Dependencies
N/A.
## References
N/A.
## Copyright
Copyright and related rights waived via [CC0](https://creativecommons.org/publicdomain/zero/1.0/)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `vyper/ast/annotation.py`
Content:
```
1 import ast as python_ast
2 import tokenize
3 from decimal import Decimal
4 from typing import Optional, cast
5
6 import asttokens
7
8 from vyper.exceptions import CompilerPanic, SyntaxException
9 from vyper.typing import ModificationOffsets
10
11
12 class AnnotatingVisitor(python_ast.NodeTransformer):
13 _source_code: str
14 _modification_offsets: ModificationOffsets
15
16 def __init__(
17 self,
18 source_code: str,
19 modification_offsets: Optional[ModificationOffsets],
20 tokens: asttokens.ASTTokens,
21 source_id: int,
22 contract_name: Optional[str],
23 ):
24 self._tokens = tokens
25 self._source_id = source_id
26 self._contract_name = contract_name
27 self._source_code: str = source_code
28 self.counter: int = 0
29 self._modification_offsets = {}
30 if modification_offsets is not None:
31 self._modification_offsets = modification_offsets
32
33 def generic_visit(self, node):
34 """
35 Annotate a node with information that simplifies Vyper node generation.
36 """
37 # Decorate every node with the original source code to allow pretty-printing errors
38 node.full_source_code = self._source_code
39 node.node_id = self.counter
40 node.ast_type = node.__class__.__name__
41 self.counter += 1
42
43 # Decorate every node with source end offsets
44 start = node.first_token.start if hasattr(node, "first_token") else (None, None)
45 end = (None, None)
46 if hasattr(node, "last_token"):
47 end = node.last_token.end
48 if node.last_token.type == 4:
49 # token type 4 is a `\n`, some nodes include a trailing newline
50 # here we ignore it when building the node offsets
51 end = (end[0], end[1] - 1)
52
53 node.lineno = start[0]
54 node.col_offset = start[1]
55 node.end_lineno = end[0]
56 node.end_col_offset = end[1]
57
58 if hasattr(node, "last_token"):
59 start_pos = node.first_token.startpos
60 end_pos = node.last_token.endpos
61 if node.last_token.type == 4:
62 # ignore trailing newline once more
63 end_pos -= 1
64 node.src = f"{start_pos}:{end_pos-start_pos}:{self._source_id}"
65 node.node_source_code = self._source_code[start_pos:end_pos]
66
67 return super().generic_visit(node)
68
69 def _visit_docstring(self, node):
70 """
71 Move a node docstring from body to `doc_string` and annotate it as `DocStr`.
72 """
73 self.generic_visit(node)
74
75 if node.body:
76 n = node.body[0]
77 if isinstance(n, python_ast.Expr) and isinstance(n.value, python_ast.Str):
78 self.generic_visit(n.value)
79 n.value.ast_type = "DocStr"
80 del node.body[0]
81 node.doc_string = n.value
82
83 return node
84
85 def visit_Module(self, node):
86 node.name = self._contract_name
87 return self._visit_docstring(node)
88
89 def visit_FunctionDef(self, node):
90 if node.decorator_list:
91 # start the source highlight at `def` to improve annotation readability
92 decorator_token = node.decorator_list[-1].last_token
93 def_token = self._tokens.find_token(decorator_token, tokenize.NAME, tok_str="def")
94 node.first_token = def_token
95
96 return self._visit_docstring(node)
97
98 def visit_ClassDef(self, node):
99 """
100 Convert the `ClassDef` node into a Vyper-specific node type.
101
102 Vyper uses `struct` and `interface` in place of `class`, however these
103 values must be substituted out to create parseable Python. The Python
104 node is annotated with the desired Vyper type via the `ast_type` member.
105 """
106 self.generic_visit(node)
107
108 node.ast_type = self._modification_offsets[(node.lineno, node.col_offset)]
109 return node
110
111 def visit_Expr(self, node):
112 """
113 Convert the `Yield` node into a Vyper-specific node type.
114
115 Vyper substitutes `yield` for non-pythonic statement such as `log`. Prior
116 to generating Vyper AST, we must annotate `Yield` nodes with their original
117 value.
118
119 Because `Yield` is an expression-statement, we also remove it from it's
120 enclosing `Expr` node.
121 """
122 self.generic_visit(node)
123
124 if isinstance(node.value, python_ast.Yield):
125 node = node.value
126 node.ast_type = self._modification_offsets[(node.lineno, node.col_offset)]
127
128 return node
129
130 def visit_Subscript(self, node):
131 """
132 Maintain consistency of `Subscript.slice` across python versions.
133
134 Starting from python 3.9, the `Index` node type has been deprecated,
135 and made impossible to instantiate via regular means. Here we do awful
136 hacky black magic to create an `Index` node. We need our own parser.
137 """
138 self.generic_visit(node)
139
140 if not isinstance(node.slice, python_ast.Index):
141 index = python_ast.Constant(value=node.slice, ast_type="Index")
142 index.__class__ = python_ast.Index
143 self.generic_visit(index)
144 node.slice = index
145
146 return node
147
148 def visit_Constant(self, node):
149 """
150 Handle `Constant` when using Python >=3.8
151
152 In Python 3.8, `NameConstant`, `Num`, `Str`, and `Bytes` are deprecated
153 in favor of `Constant`. To maintain consistency across versions, `ast_type`
154 is modified to create the <=3.7 node classes.
155 """
156 if not isinstance(node.value, bool) and isinstance(node.value, (int, float)):
157 return self.visit_Num(node)
158
159 self.generic_visit(node)
160 if node.value is None or isinstance(node.value, bool):
161 node.ast_type = "NameConstant"
162 elif isinstance(node.value, str):
163 node.ast_type = "Str"
164 elif isinstance(node.value, bytes):
165 node.ast_type = "Bytes"
166 else:
167 raise SyntaxException(
168 "Invalid syntax (unsupported Python Constant AST node).",
169 self._source_code,
170 node.lineno,
171 node.col_offset,
172 )
173
174 return node
175
176 def visit_Num(self, node):
177 """
178 Adjust numeric node class based on the value type.
179
180 Python uses `Num` to represent floats and integers. Integers may also
181 be given in binary, octal, decimal, or hexadecimal format. This method
182 modifies `ast_type` to separate `Num` into more granular Vyper node
183 classes.
184 """
185 # modify vyper AST type according to the format of the literal value
186 self.generic_visit(node)
187 value = node.node_source_code
188
189 # deduce non base-10 types based on prefix
190 if value.lower()[:2] == "0x":
191 if len(value) % 2:
192 raise SyntaxException(
193 "Hex notation requires an even number of digits",
194 self._source_code,
195 node.lineno,
196 node.col_offset,
197 )
198 node.ast_type = "Hex"
199 node.n = value
200
201 elif value.lower()[:2] == "0b":
202 node.ast_type = "Bytes"
203 mod = (len(value) - 2) % 8
204 if mod:
205 raise SyntaxException(
206 f"Bit notation requires a multiple of 8 bits. {8-mod} bit(s) are missing.",
207 self._source_code,
208 node.lineno,
209 node.col_offset,
210 )
211 node.value = int(value, 2).to_bytes(len(value) // 8, "big")
212
213 elif isinstance(node.n, float):
214 node.ast_type = "Decimal"
215 node.n = Decimal(value)
216
217 elif isinstance(node.n, int):
218 node.ast_type = "Int"
219
220 else:
221 raise CompilerPanic(f"Unexpected type for Constant value: {type(node.n).__name__}")
222
223 return node
224
225 def visit_UnaryOp(self, node):
226 """
227 Adjust operand value and discard unary operations, where possible.
228
229 This is done so that negative decimal literals are accurately represented.
230 """
231 self.generic_visit(node)
232
233 # TODO once grammar is updated, remove this
234 # UAdd has no effect on the value of it's operand, so it is discarded
235 if isinstance(node.op, python_ast.UAdd):
236 return node.operand
237
238 is_sub = isinstance(node.op, python_ast.USub)
239 is_num = (
240 hasattr(node.operand, "n")
241 and not isinstance(node.operand.n, bool)
242 and isinstance(node.operand.n, (int, Decimal))
243 )
244 if is_sub and is_num:
245 node.operand.n = 0 - node.operand.n
246 node.operand.col_offset = node.col_offset
247 node.operand.node_source_code = node.node_source_code
248 return node.operand
249 else:
250 return node
251
252
253 def annotate_python_ast(
254 parsed_ast: python_ast.AST,
255 source_code: str,
256 modification_offsets: Optional[ModificationOffsets] = None,
257 source_id: int = 0,
258 contract_name: Optional[str] = None,
259 ) -> python_ast.AST:
260 """
261 Annotate and optimize a Python AST in preparation conversion to a Vyper AST.
262
263 Parameters
264 ----------
265 parsed_ast : AST
266 The AST to be annotated and optimized.
267 source_code : str
268 The originating source code of the AST.
269 modification_offsets : dict, optional
270 A mapping of class names to their original class types.
271
272 Returns
273 -------
274 The annotated and optimized AST.
275 """
276
277 tokens = asttokens.ASTTokens(source_code, tree=cast(Optional[python_ast.Module], parsed_ast))
278 visitor = AnnotatingVisitor(source_code, modification_offsets, tokens, source_id, contract_name)
279 visitor.visit(parsed_ast)
280
281 return parsed_ast
282
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/vyper/ast/annotation.py b/vyper/ast/annotation.py
--- a/vyper/ast/annotation.py
+++ b/vyper/ast/annotation.py
@@ -230,11 +230,6 @@
"""
self.generic_visit(node)
- # TODO once grammar is updated, remove this
- # UAdd has no effect on the value of it's operand, so it is discarded
- if isinstance(node.op, python_ast.UAdd):
- return node.operand
-
is_sub = isinstance(node.op, python_ast.USub)
is_num = (
hasattr(node.operand, "n")
| {"golden_diff": "diff --git a/vyper/ast/annotation.py b/vyper/ast/annotation.py\n--- a/vyper/ast/annotation.py\n+++ b/vyper/ast/annotation.py\n@@ -230,11 +230,6 @@\n \"\"\"\n self.generic_visit(node)\n \n- # TODO once grammar is updated, remove this\n- # UAdd has no effect on the value of it's operand, so it is discarded\n- if isinstance(node.op, python_ast.UAdd):\n- return node.operand\n-\n is_sub = isinstance(node.op, python_ast.USub)\n is_num = (\n hasattr(node.operand, \"n\")\n", "issue": "Disallow `++a` unary operations\n## Simple Summary\r\n\r\nDisallow the (pre-increment) `++a` expression in Vyper for all signed and unsigned types.\r\n\r\n```vy\r\nx: uint256 = 0\r\ny: uint256 = 0\r\n\r\n# throws\r\ny = x++\r\n# does currently not throw\r\ny = ++x\r\n```\r\n\r\n## Motivation\r\n\r\nCurrently, the (post-increment) expression `a++` throws correctly as it complies with the Python syntax. Unfortunately, Python allows for the expression `++a` which is parsed as `+(+(a)) = a` and therefore is an identity operation on the integer (`+` is here a unary operator). Vyper currently supports this Syntax as well and I think it's more ergonomic & consistent with other languages to disallow it (many non-Python engineers will get confused if they are not aware of the semantics of the syntax). This is one of the design decisions of Python that am not happy about and would recommend not implementing in Vyper either.\r\n\r\n## Specification\r\n\r\nThe compiler should throw an error for any `++a`-like expression for all signed and unsigned types.\r\n\r\n## Backwards Compatibility\r\n\r\nWill break any previous code that uses a `++a` syntax.\r\n\r\n## Dependencies\r\n\r\nN/A.\r\n\r\n## References\r\n\r\nN/A.\r\n\r\n## Copyright\r\n\r\nCopyright and related rights waived via [CC0](https://creativecommons.org/publicdomain/zero/1.0/)\r\n\n", "before_files": [{"content": "import ast as python_ast\nimport tokenize\nfrom decimal import Decimal\nfrom typing import Optional, cast\n\nimport asttokens\n\nfrom vyper.exceptions import CompilerPanic, SyntaxException\nfrom vyper.typing import ModificationOffsets\n\n\nclass AnnotatingVisitor(python_ast.NodeTransformer):\n _source_code: str\n _modification_offsets: ModificationOffsets\n\n def __init__(\n self,\n source_code: str,\n modification_offsets: Optional[ModificationOffsets],\n tokens: asttokens.ASTTokens,\n source_id: int,\n contract_name: Optional[str],\n ):\n self._tokens = tokens\n self._source_id = source_id\n self._contract_name = contract_name\n self._source_code: str = source_code\n self.counter: int = 0\n self._modification_offsets = {}\n if modification_offsets is not None:\n self._modification_offsets = modification_offsets\n\n def generic_visit(self, node):\n \"\"\"\n Annotate a node with information that simplifies Vyper node generation.\n \"\"\"\n # Decorate every node with the original source code to allow pretty-printing errors\n node.full_source_code = self._source_code\n node.node_id = self.counter\n node.ast_type = node.__class__.__name__\n self.counter += 1\n\n # Decorate every node with source end offsets\n start = node.first_token.start if hasattr(node, \"first_token\") else (None, None)\n end = (None, None)\n if hasattr(node, \"last_token\"):\n end = node.last_token.end\n if node.last_token.type == 4:\n # token type 4 is a `\\n`, some nodes include a trailing newline\n # here we ignore it when building the node offsets\n end = (end[0], end[1] - 1)\n\n node.lineno = start[0]\n node.col_offset = start[1]\n node.end_lineno = end[0]\n node.end_col_offset = end[1]\n\n if hasattr(node, \"last_token\"):\n start_pos = node.first_token.startpos\n end_pos = node.last_token.endpos\n if node.last_token.type == 4:\n # ignore trailing newline once more\n end_pos -= 1\n node.src = f\"{start_pos}:{end_pos-start_pos}:{self._source_id}\"\n node.node_source_code = self._source_code[start_pos:end_pos]\n\n return super().generic_visit(node)\n\n def _visit_docstring(self, node):\n \"\"\"\n Move a node docstring from body to `doc_string` and annotate it as `DocStr`.\n \"\"\"\n self.generic_visit(node)\n\n if node.body:\n n = node.body[0]\n if isinstance(n, python_ast.Expr) and isinstance(n.value, python_ast.Str):\n self.generic_visit(n.value)\n n.value.ast_type = \"DocStr\"\n del node.body[0]\n node.doc_string = n.value\n\n return node\n\n def visit_Module(self, node):\n node.name = self._contract_name\n return self._visit_docstring(node)\n\n def visit_FunctionDef(self, node):\n if node.decorator_list:\n # start the source highlight at `def` to improve annotation readability\n decorator_token = node.decorator_list[-1].last_token\n def_token = self._tokens.find_token(decorator_token, tokenize.NAME, tok_str=\"def\")\n node.first_token = def_token\n\n return self._visit_docstring(node)\n\n def visit_ClassDef(self, node):\n \"\"\"\n Convert the `ClassDef` node into a Vyper-specific node type.\n\n Vyper uses `struct` and `interface` in place of `class`, however these\n values must be substituted out to create parseable Python. The Python\n node is annotated with the desired Vyper type via the `ast_type` member.\n \"\"\"\n self.generic_visit(node)\n\n node.ast_type = self._modification_offsets[(node.lineno, node.col_offset)]\n return node\n\n def visit_Expr(self, node):\n \"\"\"\n Convert the `Yield` node into a Vyper-specific node type.\n\n Vyper substitutes `yield` for non-pythonic statement such as `log`. Prior\n to generating Vyper AST, we must annotate `Yield` nodes with their original\n value.\n\n Because `Yield` is an expression-statement, we also remove it from it's\n enclosing `Expr` node.\n \"\"\"\n self.generic_visit(node)\n\n if isinstance(node.value, python_ast.Yield):\n node = node.value\n node.ast_type = self._modification_offsets[(node.lineno, node.col_offset)]\n\n return node\n\n def visit_Subscript(self, node):\n \"\"\"\n Maintain consistency of `Subscript.slice` across python versions.\n\n Starting from python 3.9, the `Index` node type has been deprecated,\n and made impossible to instantiate via regular means. Here we do awful\n hacky black magic to create an `Index` node. We need our own parser.\n \"\"\"\n self.generic_visit(node)\n\n if not isinstance(node.slice, python_ast.Index):\n index = python_ast.Constant(value=node.slice, ast_type=\"Index\")\n index.__class__ = python_ast.Index\n self.generic_visit(index)\n node.slice = index\n\n return node\n\n def visit_Constant(self, node):\n \"\"\"\n Handle `Constant` when using Python >=3.8\n\n In Python 3.8, `NameConstant`, `Num`, `Str`, and `Bytes` are deprecated\n in favor of `Constant`. To maintain consistency across versions, `ast_type`\n is modified to create the <=3.7 node classes.\n \"\"\"\n if not isinstance(node.value, bool) and isinstance(node.value, (int, float)):\n return self.visit_Num(node)\n\n self.generic_visit(node)\n if node.value is None or isinstance(node.value, bool):\n node.ast_type = \"NameConstant\"\n elif isinstance(node.value, str):\n node.ast_type = \"Str\"\n elif isinstance(node.value, bytes):\n node.ast_type = \"Bytes\"\n else:\n raise SyntaxException(\n \"Invalid syntax (unsupported Python Constant AST node).\",\n self._source_code,\n node.lineno,\n node.col_offset,\n )\n\n return node\n\n def visit_Num(self, node):\n \"\"\"\n Adjust numeric node class based on the value type.\n\n Python uses `Num` to represent floats and integers. Integers may also\n be given in binary, octal, decimal, or hexadecimal format. This method\n modifies `ast_type` to separate `Num` into more granular Vyper node\n classes.\n \"\"\"\n # modify vyper AST type according to the format of the literal value\n self.generic_visit(node)\n value = node.node_source_code\n\n # deduce non base-10 types based on prefix\n if value.lower()[:2] == \"0x\":\n if len(value) % 2:\n raise SyntaxException(\n \"Hex notation requires an even number of digits\",\n self._source_code,\n node.lineno,\n node.col_offset,\n )\n node.ast_type = \"Hex\"\n node.n = value\n\n elif value.lower()[:2] == \"0b\":\n node.ast_type = \"Bytes\"\n mod = (len(value) - 2) % 8\n if mod:\n raise SyntaxException(\n f\"Bit notation requires a multiple of 8 bits. {8-mod} bit(s) are missing.\",\n self._source_code,\n node.lineno,\n node.col_offset,\n )\n node.value = int(value, 2).to_bytes(len(value) // 8, \"big\")\n\n elif isinstance(node.n, float):\n node.ast_type = \"Decimal\"\n node.n = Decimal(value)\n\n elif isinstance(node.n, int):\n node.ast_type = \"Int\"\n\n else:\n raise CompilerPanic(f\"Unexpected type for Constant value: {type(node.n).__name__}\")\n\n return node\n\n def visit_UnaryOp(self, node):\n \"\"\"\n Adjust operand value and discard unary operations, where possible.\n\n This is done so that negative decimal literals are accurately represented.\n \"\"\"\n self.generic_visit(node)\n\n # TODO once grammar is updated, remove this\n # UAdd has no effect on the value of it's operand, so it is discarded\n if isinstance(node.op, python_ast.UAdd):\n return node.operand\n\n is_sub = isinstance(node.op, python_ast.USub)\n is_num = (\n hasattr(node.operand, \"n\")\n and not isinstance(node.operand.n, bool)\n and isinstance(node.operand.n, (int, Decimal))\n )\n if is_sub and is_num:\n node.operand.n = 0 - node.operand.n\n node.operand.col_offset = node.col_offset\n node.operand.node_source_code = node.node_source_code\n return node.operand\n else:\n return node\n\n\ndef annotate_python_ast(\n parsed_ast: python_ast.AST,\n source_code: str,\n modification_offsets: Optional[ModificationOffsets] = None,\n source_id: int = 0,\n contract_name: Optional[str] = None,\n) -> python_ast.AST:\n \"\"\"\n Annotate and optimize a Python AST in preparation conversion to a Vyper AST.\n\n Parameters\n ----------\n parsed_ast : AST\n The AST to be annotated and optimized.\n source_code : str\n The originating source code of the AST.\n modification_offsets : dict, optional\n A mapping of class names to their original class types.\n\n Returns\n -------\n The annotated and optimized AST.\n \"\"\"\n\n tokens = asttokens.ASTTokens(source_code, tree=cast(Optional[python_ast.Module], parsed_ast))\n visitor = AnnotatingVisitor(source_code, modification_offsets, tokens, source_id, contract_name)\n visitor.visit(parsed_ast)\n\n return parsed_ast\n", "path": "vyper/ast/annotation.py"}], "after_files": [{"content": "import ast as python_ast\nimport tokenize\nfrom decimal import Decimal\nfrom typing import Optional, cast\n\nimport asttokens\n\nfrom vyper.exceptions import CompilerPanic, SyntaxException\nfrom vyper.typing import ModificationOffsets\n\n\nclass AnnotatingVisitor(python_ast.NodeTransformer):\n _source_code: str\n _modification_offsets: ModificationOffsets\n\n def __init__(\n self,\n source_code: str,\n modification_offsets: Optional[ModificationOffsets],\n tokens: asttokens.ASTTokens,\n source_id: int,\n contract_name: Optional[str],\n ):\n self._tokens = tokens\n self._source_id = source_id\n self._contract_name = contract_name\n self._source_code: str = source_code\n self.counter: int = 0\n self._modification_offsets = {}\n if modification_offsets is not None:\n self._modification_offsets = modification_offsets\n\n def generic_visit(self, node):\n \"\"\"\n Annotate a node with information that simplifies Vyper node generation.\n \"\"\"\n # Decorate every node with the original source code to allow pretty-printing errors\n node.full_source_code = self._source_code\n node.node_id = self.counter\n node.ast_type = node.__class__.__name__\n self.counter += 1\n\n # Decorate every node with source end offsets\n start = node.first_token.start if hasattr(node, \"first_token\") else (None, None)\n end = (None, None)\n if hasattr(node, \"last_token\"):\n end = node.last_token.end\n if node.last_token.type == 4:\n # token type 4 is a `\\n`, some nodes include a trailing newline\n # here we ignore it when building the node offsets\n end = (end[0], end[1] - 1)\n\n node.lineno = start[0]\n node.col_offset = start[1]\n node.end_lineno = end[0]\n node.end_col_offset = end[1]\n\n if hasattr(node, \"last_token\"):\n start_pos = node.first_token.startpos\n end_pos = node.last_token.endpos\n if node.last_token.type == 4:\n # ignore trailing newline once more\n end_pos -= 1\n node.src = f\"{start_pos}:{end_pos-start_pos}:{self._source_id}\"\n node.node_source_code = self._source_code[start_pos:end_pos]\n\n return super().generic_visit(node)\n\n def _visit_docstring(self, node):\n \"\"\"\n Move a node docstring from body to `doc_string` and annotate it as `DocStr`.\n \"\"\"\n self.generic_visit(node)\n\n if node.body:\n n = node.body[0]\n if isinstance(n, python_ast.Expr) and isinstance(n.value, python_ast.Str):\n self.generic_visit(n.value)\n n.value.ast_type = \"DocStr\"\n del node.body[0]\n node.doc_string = n.value\n\n return node\n\n def visit_Module(self, node):\n node.name = self._contract_name\n return self._visit_docstring(node)\n\n def visit_FunctionDef(self, node):\n if node.decorator_list:\n # start the source highlight at `def` to improve annotation readability\n decorator_token = node.decorator_list[-1].last_token\n def_token = self._tokens.find_token(decorator_token, tokenize.NAME, tok_str=\"def\")\n node.first_token = def_token\n\n return self._visit_docstring(node)\n\n def visit_ClassDef(self, node):\n \"\"\"\n Convert the `ClassDef` node into a Vyper-specific node type.\n\n Vyper uses `struct` and `interface` in place of `class`, however these\n values must be substituted out to create parseable Python. The Python\n node is annotated with the desired Vyper type via the `ast_type` member.\n \"\"\"\n self.generic_visit(node)\n\n node.ast_type = self._modification_offsets[(node.lineno, node.col_offset)]\n return node\n\n def visit_Expr(self, node):\n \"\"\"\n Convert the `Yield` node into a Vyper-specific node type.\n\n Vyper substitutes `yield` for non-pythonic statement such as `log`. Prior\n to generating Vyper AST, we must annotate `Yield` nodes with their original\n value.\n\n Because `Yield` is an expression-statement, we also remove it from it's\n enclosing `Expr` node.\n \"\"\"\n self.generic_visit(node)\n\n if isinstance(node.value, python_ast.Yield):\n node = node.value\n node.ast_type = self._modification_offsets[(node.lineno, node.col_offset)]\n\n return node\n\n def visit_Subscript(self, node):\n \"\"\"\n Maintain consistency of `Subscript.slice` across python versions.\n\n Starting from python 3.9, the `Index` node type has been deprecated,\n and made impossible to instantiate via regular means. Here we do awful\n hacky black magic to create an `Index` node. We need our own parser.\n \"\"\"\n self.generic_visit(node)\n\n if not isinstance(node.slice, python_ast.Index):\n index = python_ast.Constant(value=node.slice, ast_type=\"Index\")\n index.__class__ = python_ast.Index\n self.generic_visit(index)\n node.slice = index\n\n return node\n\n def visit_Constant(self, node):\n \"\"\"\n Handle `Constant` when using Python >=3.8\n\n In Python 3.8, `NameConstant`, `Num`, `Str`, and `Bytes` are deprecated\n in favor of `Constant`. To maintain consistency across versions, `ast_type`\n is modified to create the <=3.7 node classes.\n \"\"\"\n if not isinstance(node.value, bool) and isinstance(node.value, (int, float)):\n return self.visit_Num(node)\n\n self.generic_visit(node)\n if node.value is None or isinstance(node.value, bool):\n node.ast_type = \"NameConstant\"\n elif isinstance(node.value, str):\n node.ast_type = \"Str\"\n elif isinstance(node.value, bytes):\n node.ast_type = \"Bytes\"\n else:\n raise SyntaxException(\n \"Invalid syntax (unsupported Python Constant AST node).\",\n self._source_code,\n node.lineno,\n node.col_offset,\n )\n\n return node\n\n def visit_Num(self, node):\n \"\"\"\n Adjust numeric node class based on the value type.\n\n Python uses `Num` to represent floats and integers. Integers may also\n be given in binary, octal, decimal, or hexadecimal format. This method\n modifies `ast_type` to separate `Num` into more granular Vyper node\n classes.\n \"\"\"\n # modify vyper AST type according to the format of the literal value\n self.generic_visit(node)\n value = node.node_source_code\n\n # deduce non base-10 types based on prefix\n if value.lower()[:2] == \"0x\":\n if len(value) % 2:\n raise SyntaxException(\n \"Hex notation requires an even number of digits\",\n self._source_code,\n node.lineno,\n node.col_offset,\n )\n node.ast_type = \"Hex\"\n node.n = value\n\n elif value.lower()[:2] == \"0b\":\n node.ast_type = \"Bytes\"\n mod = (len(value) - 2) % 8\n if mod:\n raise SyntaxException(\n f\"Bit notation requires a multiple of 8 bits. {8-mod} bit(s) are missing.\",\n self._source_code,\n node.lineno,\n node.col_offset,\n )\n node.value = int(value, 2).to_bytes(len(value) // 8, \"big\")\n\n elif isinstance(node.n, float):\n node.ast_type = \"Decimal\"\n node.n = Decimal(value)\n\n elif isinstance(node.n, int):\n node.ast_type = \"Int\"\n\n else:\n raise CompilerPanic(f\"Unexpected type for Constant value: {type(node.n).__name__}\")\n\n return node\n\n def visit_UnaryOp(self, node):\n \"\"\"\n Adjust operand value and discard unary operations, where possible.\n\n This is done so that negative decimal literals are accurately represented.\n \"\"\"\n self.generic_visit(node)\n\n is_sub = isinstance(node.op, python_ast.USub)\n is_num = (\n hasattr(node.operand, \"n\")\n and not isinstance(node.operand.n, bool)\n and isinstance(node.operand.n, (int, Decimal))\n )\n if is_sub and is_num:\n node.operand.n = 0 - node.operand.n\n node.operand.col_offset = node.col_offset\n node.operand.node_source_code = node.node_source_code\n return node.operand\n else:\n return node\n\n\ndef annotate_python_ast(\n parsed_ast: python_ast.AST,\n source_code: str,\n modification_offsets: Optional[ModificationOffsets] = None,\n source_id: int = 0,\n contract_name: Optional[str] = None,\n) -> python_ast.AST:\n \"\"\"\n Annotate and optimize a Python AST in preparation conversion to a Vyper AST.\n\n Parameters\n ----------\n parsed_ast : AST\n The AST to be annotated and optimized.\n source_code : str\n The originating source code of the AST.\n modification_offsets : dict, optional\n A mapping of class names to their original class types.\n\n Returns\n -------\n The annotated and optimized AST.\n \"\"\"\n\n tokens = asttokens.ASTTokens(source_code, tree=cast(Optional[python_ast.Module], parsed_ast))\n visitor = AnnotatingVisitor(source_code, modification_offsets, tokens, source_id, contract_name)\n visitor.visit(parsed_ast)\n\n return parsed_ast\n", "path": "vyper/ast/annotation.py"}]} | 3,458 | 144 |
gh_patches_debug_14906 | rasdani/github-patches | git_diff | readthedocs__readthedocs.org-6224 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Version's page is slow
Currently, we list all versions in one page. This is a problem for projects that have thousands of versions (it's difficult to search the versions and very slow for the db)
https://readthedocs.org/projects/docs/versions/
One idea is to don't list deactivated versions and use a form instead. Not really sure how that looks like, we still need a way to list all versions. If we put an auto complete widget, does the user know what version wants to activate?
Ref https://github.com/readthedocs/readthedocs.org/issues/6068#issuecomment-520908761
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `readthedocs/projects/views/public.py`
Content:
```
1 """Public project views."""
2
3 import json
4 import logging
5 import mimetypes
6 import operator
7 import os
8 from collections import OrderedDict
9
10 import requests
11 from django.conf import settings
12 from django.contrib import messages
13 from django.contrib.auth.models import User
14 from django.core.cache import cache
15 from django.core.files.storage import get_storage_class
16 from django.db.models import prefetch_related_objects
17 from django.http import HttpResponse, HttpResponseRedirect
18 from django.shortcuts import get_object_or_404, render, redirect
19 from django.urls import reverse
20 from django.views.decorators.cache import never_cache
21 from django.views.generic import DetailView, ListView
22 from taggit.models import Tag
23
24 from readthedocs.analytics.tasks import analytics_event
25 from readthedocs.analytics.utils import get_client_ip
26 from readthedocs.builds.constants import LATEST
27 from readthedocs.builds.models import Version
28 from readthedocs.builds.views import BuildTriggerMixin
29 from readthedocs.projects.models import Project
30 from readthedocs.projects.templatetags.projects_tags import sort_version_aware
31
32 from .base import ProjectOnboardMixin
33
34
35 log = logging.getLogger(__name__)
36 search_log = logging.getLogger(__name__ + '.search')
37 mimetypes.add_type('application/epub+zip', '.epub')
38
39
40 class ProjectIndex(ListView):
41
42 """List view of public :py:class:`Project` instances."""
43
44 model = Project
45
46 def get_queryset(self):
47 queryset = Project.objects.public(self.request.user)
48 queryset = queryset.exclude(users__profile__banned=True)
49
50 if self.kwargs.get('tag'):
51 self.tag = get_object_or_404(Tag, slug=self.kwargs.get('tag'))
52 queryset = queryset.filter(tags__slug__in=[self.tag.slug])
53 else:
54 self.tag = None
55
56 if self.kwargs.get('username'):
57 self.user = get_object_or_404(
58 User,
59 username=self.kwargs.get('username'),
60 )
61 queryset = queryset.filter(user=self.user)
62 else:
63 self.user = None
64
65 return queryset
66
67 def get_context_data(self, **kwargs):
68 context = super().get_context_data(**kwargs)
69 context['person'] = self.user
70 context['tag'] = self.tag
71 return context
72
73
74 def project_redirect(request, invalid_project_slug):
75 """
76 Redirect project slugs that have underscores (``_``).
77
78 Slugs with underscores are no longer allowed.
79 Underscores are replaced by ``-`` and then redirected to that URL.
80 """
81 new_project_slug = invalid_project_slug.replace('_', '-')
82 new_path = request.path.replace(invalid_project_slug, new_project_slug)
83 return redirect('{}?{}'.format(
84 new_path,
85 request.GET.urlencode(),
86 ))
87
88
89 class ProjectDetailView(BuildTriggerMixin, ProjectOnboardMixin, DetailView):
90
91 """Display project onboard steps."""
92
93 model = Project
94 slug_url_kwarg = 'project_slug'
95
96 def get_queryset(self):
97 return Project.objects.protected(self.request.user)
98
99 def get_context_data(self, **kwargs):
100 context = super().get_context_data(**kwargs)
101
102 project = self.get_object()
103 context['versions'] = self._get_versions(project)
104
105 protocol = 'http'
106 if self.request.is_secure():
107 protocol = 'https'
108
109 version_slug = project.get_default_version()
110
111 context['badge_url'] = '{}://{}{}?version={}'.format(
112 protocol,
113 settings.PRODUCTION_DOMAIN,
114 reverse('project_badge', args=[project.slug]),
115 project.get_default_version(),
116 )
117 context['site_url'] = '{url}?badge={version}'.format(
118 url=project.get_docs_url(version_slug),
119 version=version_slug,
120 )
121
122 return context
123
124
125 @never_cache
126 def project_badge(request, project_slug):
127 """Return a sweet badge for the project."""
128 style = request.GET.get('style', 'flat')
129 if style not in (
130 'flat',
131 'plastic',
132 'flat-square',
133 'for-the-badge',
134 'social',
135 ):
136 style = 'flat'
137
138 # Get the local path to the badge files
139 badge_path = os.path.join(
140 os.path.dirname(__file__),
141 '..',
142 'static',
143 'projects',
144 'badges',
145 '%s-' + style + '.svg',
146 )
147
148 version_slug = request.GET.get('version', LATEST)
149 file_path = badge_path % 'unknown'
150
151 version = Version.objects.public(request.user).filter(
152 project__slug=project_slug,
153 slug=version_slug,
154 ).first()
155
156 if version:
157 last_build = version.builds.filter(
158 type='html',
159 state='finished',
160 ).order_by('-date').first()
161 if last_build:
162 if last_build.success:
163 file_path = badge_path % 'passing'
164 else:
165 file_path = badge_path % 'failing'
166
167 try:
168 with open(file_path) as fd:
169 return HttpResponse(
170 fd.read(),
171 content_type='image/svg+xml',
172 )
173 except (IOError, OSError):
174 log.exception(
175 'Failed to read local filesystem while serving a docs badge',
176 )
177 return HttpResponse(status=503)
178
179
180 def project_downloads(request, project_slug):
181 """A detail view for a project with various downloads."""
182 project = get_object_or_404(
183 Project.objects.protected(request.user),
184 slug=project_slug,
185 )
186 versions = Version.internal.public(user=request.user, project=project)
187 versions = sort_version_aware(versions)
188 version_data = OrderedDict()
189 for version in versions:
190 data = version.get_downloads()
191 # Don't show ones that have no downloads.
192 if data:
193 version_data[version] = data
194
195 return render(
196 request,
197 'projects/project_downloads.html',
198 {
199 'project': project,
200 'version_data': version_data,
201 'versions': versions,
202 },
203 )
204
205
206 def project_download_media(request, project_slug, type_, version_slug):
207 """
208 Download a specific piece of media.
209
210 Perform an auth check if serving in private mode.
211
212 .. warning:: This is linked directly from the HTML pages.
213 It should only care about the Version permissions,
214 not the actual Project permissions.
215 """
216 version = get_object_or_404(
217 Version.objects.public(user=request.user),
218 project__slug=project_slug,
219 slug=version_slug,
220 )
221
222 # Send media download to analytics - sensitive data is anonymized
223 analytics_event.delay(
224 event_category='Build Media',
225 event_action=f'Download {type_}',
226 event_label=str(version),
227 ua=request.META.get('HTTP_USER_AGENT'),
228 uip=get_client_ip(request),
229 )
230
231 if settings.DEFAULT_PRIVACY_LEVEL == 'public' or settings.DEBUG:
232
233 storage = get_storage_class(settings.RTD_BUILD_MEDIA_STORAGE)()
234 storage_path = version.project.get_storage_path(
235 type_=type_, version_slug=version_slug,
236 version_type=version.type,
237 )
238 if storage.exists(storage_path):
239 return HttpResponseRedirect(storage.url(storage_path))
240
241 media_path = os.path.join(
242 settings.MEDIA_URL,
243 type_,
244 project_slug,
245 version_slug,
246 '%s.%s' % (project_slug, type_.replace('htmlzip', 'zip')),
247 )
248 return HttpResponseRedirect(media_path)
249
250 # Get relative media path
251 path = (
252 version.project.get_production_media_path(
253 type_=type_,
254 version_slug=version_slug,
255 ).replace(settings.PRODUCTION_ROOT, '/prod_artifacts')
256 )
257 content_type, encoding = mimetypes.guess_type(path)
258 content_type = content_type or 'application/octet-stream'
259 response = HttpResponse(content_type=content_type)
260 if encoding:
261 response['Content-Encoding'] = encoding
262 response['X-Accel-Redirect'] = path
263 # Include version in filename; this fixes a long-standing bug
264 filename = '{}-{}.{}'.format(
265 project_slug,
266 version_slug,
267 path.split('.')[-1],
268 )
269 response['Content-Disposition'] = 'filename=%s' % filename
270 return response
271
272
273 def project_versions(request, project_slug):
274 """
275 Project version list view.
276
277 Shows the available versions and lets the user choose which ones to build.
278 """
279 project = get_object_or_404(
280 Project.objects.protected(request.user),
281 slug=project_slug,
282 )
283
284 versions = Version.internal.public(
285 user=request.user,
286 project=project,
287 only_active=False,
288 )
289 active_versions = versions.filter(active=True)
290 inactive_versions = versions.filter(active=False)
291
292 # If there's a wiped query string, check the string against the versions
293 # list and display a success message. Deleting directories doesn't know how
294 # to fail. :)
295 wiped = request.GET.get('wipe', '')
296 wiped_version = versions.filter(slug=wiped)
297 if wiped and wiped_version.count():
298 messages.success(request, 'Version wiped: ' + wiped)
299
300 # Optimize project permission checks
301 prefetch_related_objects([project], 'users')
302
303 return render(
304 request,
305 'projects/project_version_list.html',
306 {
307 'inactive_versions': inactive_versions,
308 'active_versions': active_versions,
309 'project': project,
310 },
311 )
312
313
314 def project_analytics(request, project_slug):
315 """Have a analytics API placeholder."""
316 project = get_object_or_404(
317 Project.objects.protected(request.user),
318 slug=project_slug,
319 )
320 analytics_cache = cache.get('analytics:%s' % project_slug)
321 if analytics_cache:
322 analytics = json.loads(analytics_cache)
323 else:
324 try:
325 resp = requests.get(
326 '{host}/api/v1/index/1/heatmap/'.format(
327 host=settings.GROK_API_HOST,
328 ),
329 params={'project': project.slug, 'days': 7, 'compare': True},
330 )
331 analytics = resp.json()
332 cache.set('analytics:%s' % project_slug, resp.content, 1800)
333 except requests.exceptions.RequestException:
334 analytics = None
335
336 if analytics:
337 page_list = list(
338 reversed(
339 sorted(
340 list(analytics['page'].items()),
341 key=operator.itemgetter(1),
342 ),
343 ),
344 )
345 version_list = list(
346 reversed(
347 sorted(
348 list(analytics['version'].items()),
349 key=operator.itemgetter(1),
350 ),
351 ),
352 )
353 else:
354 page_list = []
355 version_list = []
356
357 full = request.GET.get('full')
358 if not full:
359 page_list = page_list[:20]
360 version_list = version_list[:20]
361
362 return render(
363 request,
364 'projects/project_analytics.html',
365 {
366 'project': project,
367 'analytics': analytics,
368 'page_list': page_list,
369 'version_list': version_list,
370 'full': full,
371 },
372 )
373
374
375 def project_embed(request, project_slug):
376 """Have a content API placeholder."""
377 project = get_object_or_404(
378 Project.objects.protected(request.user),
379 slug=project_slug,
380 )
381 version = project.versions.get(slug=LATEST)
382 files = version.imported_files.filter(
383 name__endswith='.html',
384 ).order_by('path')
385
386 return render(
387 request,
388 'projects/project_embed.html',
389 {
390 'project': project,
391 'files': files,
392 'settings': {
393 'PUBLIC_API_URL': settings.PUBLIC_API_URL,
394 'URI': request.build_absolute_uri(location='/').rstrip('/'),
395 },
396 },
397 )
398
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/readthedocs/projects/views/public.py b/readthedocs/projects/views/public.py
--- a/readthedocs/projects/views/public.py
+++ b/readthedocs/projects/views/public.py
@@ -287,7 +287,14 @@
only_active=False,
)
active_versions = versions.filter(active=True)
+
+ # Limit inactive versions in case a project has a large number of branches or tags
+ # Filter inactive versions based on the query string
inactive_versions = versions.filter(active=False)
+ version_filter = request.GET.get('version_filter', '')
+ if version_filter:
+ inactive_versions = inactive_versions.filter(verbose_name__icontains=version_filter)
+ inactive_versions = inactive_versions[:100]
# If there's a wiped query string, check the string against the versions
# list and display a success message. Deleting directories doesn't know how
| {"golden_diff": "diff --git a/readthedocs/projects/views/public.py b/readthedocs/projects/views/public.py\n--- a/readthedocs/projects/views/public.py\n+++ b/readthedocs/projects/views/public.py\n@@ -287,7 +287,14 @@\n only_active=False,\n )\n active_versions = versions.filter(active=True)\n+\n+ # Limit inactive versions in case a project has a large number of branches or tags\n+ # Filter inactive versions based on the query string\n inactive_versions = versions.filter(active=False)\n+ version_filter = request.GET.get('version_filter', '')\n+ if version_filter:\n+ inactive_versions = inactive_versions.filter(verbose_name__icontains=version_filter)\n+ inactive_versions = inactive_versions[:100]\n \n # If there's a wiped query string, check the string against the versions\n # list and display a success message. Deleting directories doesn't know how\n", "issue": "Version's page is slow\nCurrently, we list all versions in one page. This is a problem for projects that have thousands of versions (it's difficult to search the versions and very slow for the db)\r\n\r\nhttps://readthedocs.org/projects/docs/versions/\r\n\r\nOne idea is to don't list deactivated versions and use a form instead. Not really sure how that looks like, we still need a way to list all versions. If we put an auto complete widget, does the user know what version wants to activate?\r\n\r\nRef https://github.com/readthedocs/readthedocs.org/issues/6068#issuecomment-520908761\n", "before_files": [{"content": "\"\"\"Public project views.\"\"\"\n\nimport json\nimport logging\nimport mimetypes\nimport operator\nimport os\nfrom collections import OrderedDict\n\nimport requests\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom django.contrib.auth.models import User\nfrom django.core.cache import cache\nfrom django.core.files.storage import get_storage_class\nfrom django.db.models import prefetch_related_objects\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.shortcuts import get_object_or_404, render, redirect\nfrom django.urls import reverse\nfrom django.views.decorators.cache import never_cache\nfrom django.views.generic import DetailView, ListView\nfrom taggit.models import Tag\n\nfrom readthedocs.analytics.tasks import analytics_event\nfrom readthedocs.analytics.utils import get_client_ip\nfrom readthedocs.builds.constants import LATEST\nfrom readthedocs.builds.models import Version\nfrom readthedocs.builds.views import BuildTriggerMixin\nfrom readthedocs.projects.models import Project\nfrom readthedocs.projects.templatetags.projects_tags import sort_version_aware\n\nfrom .base import ProjectOnboardMixin\n\n\nlog = logging.getLogger(__name__)\nsearch_log = logging.getLogger(__name__ + '.search')\nmimetypes.add_type('application/epub+zip', '.epub')\n\n\nclass ProjectIndex(ListView):\n\n \"\"\"List view of public :py:class:`Project` instances.\"\"\"\n\n model = Project\n\n def get_queryset(self):\n queryset = Project.objects.public(self.request.user)\n queryset = queryset.exclude(users__profile__banned=True)\n\n if self.kwargs.get('tag'):\n self.tag = get_object_or_404(Tag, slug=self.kwargs.get('tag'))\n queryset = queryset.filter(tags__slug__in=[self.tag.slug])\n else:\n self.tag = None\n\n if self.kwargs.get('username'):\n self.user = get_object_or_404(\n User,\n username=self.kwargs.get('username'),\n )\n queryset = queryset.filter(user=self.user)\n else:\n self.user = None\n\n return queryset\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['person'] = self.user\n context['tag'] = self.tag\n return context\n\n\ndef project_redirect(request, invalid_project_slug):\n \"\"\"\n Redirect project slugs that have underscores (``_``).\n\n Slugs with underscores are no longer allowed.\n Underscores are replaced by ``-`` and then redirected to that URL.\n \"\"\"\n new_project_slug = invalid_project_slug.replace('_', '-')\n new_path = request.path.replace(invalid_project_slug, new_project_slug)\n return redirect('{}?{}'.format(\n new_path,\n request.GET.urlencode(),\n ))\n\n\nclass ProjectDetailView(BuildTriggerMixin, ProjectOnboardMixin, DetailView):\n\n \"\"\"Display project onboard steps.\"\"\"\n\n model = Project\n slug_url_kwarg = 'project_slug'\n\n def get_queryset(self):\n return Project.objects.protected(self.request.user)\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n project = self.get_object()\n context['versions'] = self._get_versions(project)\n\n protocol = 'http'\n if self.request.is_secure():\n protocol = 'https'\n\n version_slug = project.get_default_version()\n\n context['badge_url'] = '{}://{}{}?version={}'.format(\n protocol,\n settings.PRODUCTION_DOMAIN,\n reverse('project_badge', args=[project.slug]),\n project.get_default_version(),\n )\n context['site_url'] = '{url}?badge={version}'.format(\n url=project.get_docs_url(version_slug),\n version=version_slug,\n )\n\n return context\n\n\n@never_cache\ndef project_badge(request, project_slug):\n \"\"\"Return a sweet badge for the project.\"\"\"\n style = request.GET.get('style', 'flat')\n if style not in (\n 'flat',\n 'plastic',\n 'flat-square',\n 'for-the-badge',\n 'social',\n ):\n style = 'flat'\n\n # Get the local path to the badge files\n badge_path = os.path.join(\n os.path.dirname(__file__),\n '..',\n 'static',\n 'projects',\n 'badges',\n '%s-' + style + '.svg',\n )\n\n version_slug = request.GET.get('version', LATEST)\n file_path = badge_path % 'unknown'\n\n version = Version.objects.public(request.user).filter(\n project__slug=project_slug,\n slug=version_slug,\n ).first()\n\n if version:\n last_build = version.builds.filter(\n type='html',\n state='finished',\n ).order_by('-date').first()\n if last_build:\n if last_build.success:\n file_path = badge_path % 'passing'\n else:\n file_path = badge_path % 'failing'\n\n try:\n with open(file_path) as fd:\n return HttpResponse(\n fd.read(),\n content_type='image/svg+xml',\n )\n except (IOError, OSError):\n log.exception(\n 'Failed to read local filesystem while serving a docs badge',\n )\n return HttpResponse(status=503)\n\n\ndef project_downloads(request, project_slug):\n \"\"\"A detail view for a project with various downloads.\"\"\"\n project = get_object_or_404(\n Project.objects.protected(request.user),\n slug=project_slug,\n )\n versions = Version.internal.public(user=request.user, project=project)\n versions = sort_version_aware(versions)\n version_data = OrderedDict()\n for version in versions:\n data = version.get_downloads()\n # Don't show ones that have no downloads.\n if data:\n version_data[version] = data\n\n return render(\n request,\n 'projects/project_downloads.html',\n {\n 'project': project,\n 'version_data': version_data,\n 'versions': versions,\n },\n )\n\n\ndef project_download_media(request, project_slug, type_, version_slug):\n \"\"\"\n Download a specific piece of media.\n\n Perform an auth check if serving in private mode.\n\n .. warning:: This is linked directly from the HTML pages.\n It should only care about the Version permissions,\n not the actual Project permissions.\n \"\"\"\n version = get_object_or_404(\n Version.objects.public(user=request.user),\n project__slug=project_slug,\n slug=version_slug,\n )\n\n # Send media download to analytics - sensitive data is anonymized\n analytics_event.delay(\n event_category='Build Media',\n event_action=f'Download {type_}',\n event_label=str(version),\n ua=request.META.get('HTTP_USER_AGENT'),\n uip=get_client_ip(request),\n )\n\n if settings.DEFAULT_PRIVACY_LEVEL == 'public' or settings.DEBUG:\n\n storage = get_storage_class(settings.RTD_BUILD_MEDIA_STORAGE)()\n storage_path = version.project.get_storage_path(\n type_=type_, version_slug=version_slug,\n version_type=version.type,\n )\n if storage.exists(storage_path):\n return HttpResponseRedirect(storage.url(storage_path))\n\n media_path = os.path.join(\n settings.MEDIA_URL,\n type_,\n project_slug,\n version_slug,\n '%s.%s' % (project_slug, type_.replace('htmlzip', 'zip')),\n )\n return HttpResponseRedirect(media_path)\n\n # Get relative media path\n path = (\n version.project.get_production_media_path(\n type_=type_,\n version_slug=version_slug,\n ).replace(settings.PRODUCTION_ROOT, '/prod_artifacts')\n )\n content_type, encoding = mimetypes.guess_type(path)\n content_type = content_type or 'application/octet-stream'\n response = HttpResponse(content_type=content_type)\n if encoding:\n response['Content-Encoding'] = encoding\n response['X-Accel-Redirect'] = path\n # Include version in filename; this fixes a long-standing bug\n filename = '{}-{}.{}'.format(\n project_slug,\n version_slug,\n path.split('.')[-1],\n )\n response['Content-Disposition'] = 'filename=%s' % filename\n return response\n\n\ndef project_versions(request, project_slug):\n \"\"\"\n Project version list view.\n\n Shows the available versions and lets the user choose which ones to build.\n \"\"\"\n project = get_object_or_404(\n Project.objects.protected(request.user),\n slug=project_slug,\n )\n\n versions = Version.internal.public(\n user=request.user,\n project=project,\n only_active=False,\n )\n active_versions = versions.filter(active=True)\n inactive_versions = versions.filter(active=False)\n\n # If there's a wiped query string, check the string against the versions\n # list and display a success message. Deleting directories doesn't know how\n # to fail. :)\n wiped = request.GET.get('wipe', '')\n wiped_version = versions.filter(slug=wiped)\n if wiped and wiped_version.count():\n messages.success(request, 'Version wiped: ' + wiped)\n\n # Optimize project permission checks\n prefetch_related_objects([project], 'users')\n\n return render(\n request,\n 'projects/project_version_list.html',\n {\n 'inactive_versions': inactive_versions,\n 'active_versions': active_versions,\n 'project': project,\n },\n )\n\n\ndef project_analytics(request, project_slug):\n \"\"\"Have a analytics API placeholder.\"\"\"\n project = get_object_or_404(\n Project.objects.protected(request.user),\n slug=project_slug,\n )\n analytics_cache = cache.get('analytics:%s' % project_slug)\n if analytics_cache:\n analytics = json.loads(analytics_cache)\n else:\n try:\n resp = requests.get(\n '{host}/api/v1/index/1/heatmap/'.format(\n host=settings.GROK_API_HOST,\n ),\n params={'project': project.slug, 'days': 7, 'compare': True},\n )\n analytics = resp.json()\n cache.set('analytics:%s' % project_slug, resp.content, 1800)\n except requests.exceptions.RequestException:\n analytics = None\n\n if analytics:\n page_list = list(\n reversed(\n sorted(\n list(analytics['page'].items()),\n key=operator.itemgetter(1),\n ),\n ),\n )\n version_list = list(\n reversed(\n sorted(\n list(analytics['version'].items()),\n key=operator.itemgetter(1),\n ),\n ),\n )\n else:\n page_list = []\n version_list = []\n\n full = request.GET.get('full')\n if not full:\n page_list = page_list[:20]\n version_list = version_list[:20]\n\n return render(\n request,\n 'projects/project_analytics.html',\n {\n 'project': project,\n 'analytics': analytics,\n 'page_list': page_list,\n 'version_list': version_list,\n 'full': full,\n },\n )\n\n\ndef project_embed(request, project_slug):\n \"\"\"Have a content API placeholder.\"\"\"\n project = get_object_or_404(\n Project.objects.protected(request.user),\n slug=project_slug,\n )\n version = project.versions.get(slug=LATEST)\n files = version.imported_files.filter(\n name__endswith='.html',\n ).order_by('path')\n\n return render(\n request,\n 'projects/project_embed.html',\n {\n 'project': project,\n 'files': files,\n 'settings': {\n 'PUBLIC_API_URL': settings.PUBLIC_API_URL,\n 'URI': request.build_absolute_uri(location='/').rstrip('/'),\n },\n },\n )\n", "path": "readthedocs/projects/views/public.py"}], "after_files": [{"content": "\"\"\"Public project views.\"\"\"\n\nimport json\nimport logging\nimport mimetypes\nimport operator\nimport os\nfrom collections import OrderedDict\n\nimport requests\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom django.contrib.auth.models import User\nfrom django.core.cache import cache\nfrom django.core.files.storage import get_storage_class\nfrom django.db.models import prefetch_related_objects\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.shortcuts import get_object_or_404, render, redirect\nfrom django.urls import reverse\nfrom django.views.decorators.cache import never_cache\nfrom django.views.generic import DetailView, ListView\nfrom taggit.models import Tag\n\nfrom readthedocs.analytics.tasks import analytics_event\nfrom readthedocs.analytics.utils import get_client_ip\nfrom readthedocs.builds.constants import LATEST\nfrom readthedocs.builds.models import Version\nfrom readthedocs.builds.views import BuildTriggerMixin\nfrom readthedocs.projects.models import Project\nfrom readthedocs.projects.templatetags.projects_tags import sort_version_aware\n\nfrom .base import ProjectOnboardMixin\n\n\nlog = logging.getLogger(__name__)\nsearch_log = logging.getLogger(__name__ + '.search')\nmimetypes.add_type('application/epub+zip', '.epub')\n\n\nclass ProjectIndex(ListView):\n\n \"\"\"List view of public :py:class:`Project` instances.\"\"\"\n\n model = Project\n\n def get_queryset(self):\n queryset = Project.objects.public(self.request.user)\n queryset = queryset.exclude(users__profile__banned=True)\n\n if self.kwargs.get('tag'):\n self.tag = get_object_or_404(Tag, slug=self.kwargs.get('tag'))\n queryset = queryset.filter(tags__slug__in=[self.tag.slug])\n else:\n self.tag = None\n\n if self.kwargs.get('username'):\n self.user = get_object_or_404(\n User,\n username=self.kwargs.get('username'),\n )\n queryset = queryset.filter(user=self.user)\n else:\n self.user = None\n\n return queryset\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['person'] = self.user\n context['tag'] = self.tag\n return context\n\n\ndef project_redirect(request, invalid_project_slug):\n \"\"\"\n Redirect project slugs that have underscores (``_``).\n\n Slugs with underscores are no longer allowed.\n Underscores are replaced by ``-`` and then redirected to that URL.\n \"\"\"\n new_project_slug = invalid_project_slug.replace('_', '-')\n new_path = request.path.replace(invalid_project_slug, new_project_slug)\n return redirect('{}?{}'.format(\n new_path,\n request.GET.urlencode(),\n ))\n\n\nclass ProjectDetailView(BuildTriggerMixin, ProjectOnboardMixin, DetailView):\n\n \"\"\"Display project onboard steps.\"\"\"\n\n model = Project\n slug_url_kwarg = 'project_slug'\n\n def get_queryset(self):\n return Project.objects.protected(self.request.user)\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n project = self.get_object()\n context['versions'] = self._get_versions(project)\n\n protocol = 'http'\n if self.request.is_secure():\n protocol = 'https'\n\n version_slug = project.get_default_version()\n\n context['badge_url'] = '{}://{}{}?version={}'.format(\n protocol,\n settings.PRODUCTION_DOMAIN,\n reverse('project_badge', args=[project.slug]),\n project.get_default_version(),\n )\n context['site_url'] = '{url}?badge={version}'.format(\n url=project.get_docs_url(version_slug),\n version=version_slug,\n )\n\n return context\n\n\n@never_cache\ndef project_badge(request, project_slug):\n \"\"\"Return a sweet badge for the project.\"\"\"\n style = request.GET.get('style', 'flat')\n if style not in (\n 'flat',\n 'plastic',\n 'flat-square',\n 'for-the-badge',\n 'social',\n ):\n style = 'flat'\n\n # Get the local path to the badge files\n badge_path = os.path.join(\n os.path.dirname(__file__),\n '..',\n 'static',\n 'projects',\n 'badges',\n '%s-' + style + '.svg',\n )\n\n version_slug = request.GET.get('version', LATEST)\n file_path = badge_path % 'unknown'\n\n version = Version.objects.public(request.user).filter(\n project__slug=project_slug,\n slug=version_slug,\n ).first()\n\n if version:\n last_build = version.builds.filter(\n type='html',\n state='finished',\n ).order_by('-date').first()\n if last_build:\n if last_build.success:\n file_path = badge_path % 'passing'\n else:\n file_path = badge_path % 'failing'\n\n try:\n with open(file_path) as fd:\n return HttpResponse(\n fd.read(),\n content_type='image/svg+xml',\n )\n except (IOError, OSError):\n log.exception(\n 'Failed to read local filesystem while serving a docs badge',\n )\n return HttpResponse(status=503)\n\n\ndef project_downloads(request, project_slug):\n \"\"\"A detail view for a project with various downloads.\"\"\"\n project = get_object_or_404(\n Project.objects.protected(request.user),\n slug=project_slug,\n )\n versions = Version.internal.public(user=request.user, project=project)\n versions = sort_version_aware(versions)\n version_data = OrderedDict()\n for version in versions:\n data = version.get_downloads()\n # Don't show ones that have no downloads.\n if data:\n version_data[version] = data\n\n return render(\n request,\n 'projects/project_downloads.html',\n {\n 'project': project,\n 'version_data': version_data,\n 'versions': versions,\n },\n )\n\n\ndef project_download_media(request, project_slug, type_, version_slug):\n \"\"\"\n Download a specific piece of media.\n\n Perform an auth check if serving in private mode.\n\n .. warning:: This is linked directly from the HTML pages.\n It should only care about the Version permissions,\n not the actual Project permissions.\n \"\"\"\n version = get_object_or_404(\n Version.objects.public(user=request.user),\n project__slug=project_slug,\n slug=version_slug,\n )\n\n # Send media download to analytics - sensitive data is anonymized\n analytics_event.delay(\n event_category='Build Media',\n event_action=f'Download {type_}',\n event_label=str(version),\n ua=request.META.get('HTTP_USER_AGENT'),\n uip=get_client_ip(request),\n )\n\n if settings.DEFAULT_PRIVACY_LEVEL == 'public' or settings.DEBUG:\n\n storage = get_storage_class(settings.RTD_BUILD_MEDIA_STORAGE)()\n storage_path = version.project.get_storage_path(\n type_=type_, version_slug=version_slug,\n version_type=version.type,\n )\n if storage.exists(storage_path):\n return HttpResponseRedirect(storage.url(storage_path))\n\n media_path = os.path.join(\n settings.MEDIA_URL,\n type_,\n project_slug,\n version_slug,\n '%s.%s' % (project_slug, type_.replace('htmlzip', 'zip')),\n )\n return HttpResponseRedirect(media_path)\n\n # Get relative media path\n path = (\n version.project.get_production_media_path(\n type_=type_,\n version_slug=version_slug,\n ).replace(settings.PRODUCTION_ROOT, '/prod_artifacts')\n )\n content_type, encoding = mimetypes.guess_type(path)\n content_type = content_type or 'application/octet-stream'\n response = HttpResponse(content_type=content_type)\n if encoding:\n response['Content-Encoding'] = encoding\n response['X-Accel-Redirect'] = path\n # Include version in filename; this fixes a long-standing bug\n filename = '{}-{}.{}'.format(\n project_slug,\n version_slug,\n path.split('.')[-1],\n )\n response['Content-Disposition'] = 'filename=%s' % filename\n return response\n\n\ndef project_versions(request, project_slug):\n \"\"\"\n Project version list view.\n\n Shows the available versions and lets the user choose which ones to build.\n \"\"\"\n project = get_object_or_404(\n Project.objects.protected(request.user),\n slug=project_slug,\n )\n\n versions = Version.internal.public(\n user=request.user,\n project=project,\n only_active=False,\n )\n active_versions = versions.filter(active=True)\n\n # Limit inactive versions in case a project has a large number of branches or tags\n # Filter inactive versions based on the query string\n inactive_versions = versions.filter(active=False)\n version_filter = request.GET.get('version_filter', '')\n if version_filter:\n inactive_versions = inactive_versions.filter(verbose_name__icontains=version_filter)\n inactive_versions = inactive_versions[:100]\n\n # If there's a wiped query string, check the string against the versions\n # list and display a success message. Deleting directories doesn't know how\n # to fail. :)\n wiped = request.GET.get('wipe', '')\n wiped_version = versions.filter(slug=wiped)\n if wiped and wiped_version.count():\n messages.success(request, 'Version wiped: ' + wiped)\n\n # Optimize project permission checks\n prefetch_related_objects([project], 'users')\n\n return render(\n request,\n 'projects/project_version_list.html',\n {\n 'inactive_versions': inactive_versions,\n 'active_versions': active_versions,\n 'project': project,\n },\n )\n\n\ndef project_analytics(request, project_slug):\n \"\"\"Have a analytics API placeholder.\"\"\"\n project = get_object_or_404(\n Project.objects.protected(request.user),\n slug=project_slug,\n )\n analytics_cache = cache.get('analytics:%s' % project_slug)\n if analytics_cache:\n analytics = json.loads(analytics_cache)\n else:\n try:\n resp = requests.get(\n '{host}/api/v1/index/1/heatmap/'.format(\n host=settings.GROK_API_HOST,\n ),\n params={'project': project.slug, 'days': 7, 'compare': True},\n )\n analytics = resp.json()\n cache.set('analytics:%s' % project_slug, resp.content, 1800)\n except requests.exceptions.RequestException:\n analytics = None\n\n if analytics:\n page_list = list(\n reversed(\n sorted(\n list(analytics['page'].items()),\n key=operator.itemgetter(1),\n ),\n ),\n )\n version_list = list(\n reversed(\n sorted(\n list(analytics['version'].items()),\n key=operator.itemgetter(1),\n ),\n ),\n )\n else:\n page_list = []\n version_list = []\n\n full = request.GET.get('full')\n if not full:\n page_list = page_list[:20]\n version_list = version_list[:20]\n\n return render(\n request,\n 'projects/project_analytics.html',\n {\n 'project': project,\n 'analytics': analytics,\n 'page_list': page_list,\n 'version_list': version_list,\n 'full': full,\n },\n )\n\n\ndef project_embed(request, project_slug):\n \"\"\"Have a content API placeholder.\"\"\"\n project = get_object_or_404(\n Project.objects.protected(request.user),\n slug=project_slug,\n )\n version = project.versions.get(slug=LATEST)\n files = version.imported_files.filter(\n name__endswith='.html',\n ).order_by('path')\n\n return render(\n request,\n 'projects/project_embed.html',\n {\n 'project': project,\n 'files': files,\n 'settings': {\n 'PUBLIC_API_URL': settings.PUBLIC_API_URL,\n 'URI': request.build_absolute_uri(location='/').rstrip('/'),\n },\n },\n )\n", "path": "readthedocs/projects/views/public.py"}]} | 3,977 | 193 |
gh_patches_debug_52443 | rasdani/github-patches | git_diff | ipython__ipython-3901 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
under Windows, "ipython3 nbconvert "C:/blabla/first_try.ipynb" --to latex --post PDF" POST processing action fails because of a bad parameter
Hello,
The "one single step" option to create a ".pdf" from a .ipynb" fails on my windows python3 pc
Nbconvert apparently tries compile ".TEX" result with
"pdflatex .\first_try.tex"
==> It generates a bad behaviour of pdflatex, which picks "pdfTex" option instead of "PdfLatex".
The working option, on my Windows PC and when I do it by hand, is not to put the ".\"
"pdflatex first_try.tex"
UPDATE : replacing ".\" per "./" seems also to be a solution.
"pdflatex ./first_try.tex"
Hint to the problem comes from here
http://tex.stackexchange.com/questions/78178/miktex-how-to-run-pdflatex-from-cmd-prompt-on-windows-7-compared-to-windows-xp
Details below.
Sheers
*\* instruction *\*
ipython3 nbconvert "C:/blabla/first_try.ipynb" --to latex --post PDF"
**\* (start of the output ) ***
C:\Users\parent\Desktop\winpython\WinPython-32bit-3.3.2.1rc1\python-3.3.2>ipytho
n3 nbconvert "C:/blabla//first_try.ipynb" --to latex --po
st PDF
[NbConvertApp] Using existing profile dir: 'C:\Users\parent\Desktop\winpytho
n\WinPython-32bit-3.3.2.1rc1\settings\.ipython\profile_default'
[NbConvertApp] Converting notebook C:/blabla/first_try.ipynb to latex
[NbConvertApp] Support files will be in first_try_files\
[NbConvertApp] Loaded template latex_article.tplx
[NbConvertApp] Writing 53680 bytes to .\first_try.tex
[NbConvertApp] Building PDF: `pdflatex .\first_try.tex`
This is pdfTeX, Version 3.1415926-2.4-1.40.13 (TeX Live 2012/W32TeX)
restricted \write18 enabled.
entering extended mode
! Undefined control sequence.
<_> .\first
_try.tex
?
*_\* (end of the output ) ***
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `IPython/nbconvert/writers/files.py`
Content:
```
1 """
2 Contains writer for writing nbconvert output to filesystem.
3 """
4 #-----------------------------------------------------------------------------
5 #Copyright (c) 2013, the IPython Development Team.
6 #
7 #Distributed under the terms of the Modified BSD License.
8 #
9 #The full license is in the file COPYING.txt, distributed with this software.
10 #-----------------------------------------------------------------------------
11
12 #-----------------------------------------------------------------------------
13 # Imports
14 #-----------------------------------------------------------------------------
15
16 import io
17 import os
18 import glob
19
20 from IPython.utils.traitlets import Unicode
21 from IPython.utils.path import link_or_copy
22
23 from .base import WriterBase
24
25 #-----------------------------------------------------------------------------
26 # Classes
27 #-----------------------------------------------------------------------------
28
29 class FilesWriter(WriterBase):
30 """Consumes nbconvert output and produces files."""
31
32
33 build_directory = Unicode(".", config=True,
34 help="""Directory to write output to. Leave blank
35 to output to the current directory""")
36
37
38 # Make sure that the output directory exists.
39 def _build_directory_changed(self, name, old, new):
40 if new and not os.path.isdir(new):
41 os.makedirs(new)
42
43
44 def __init__(self, **kw):
45 super(FilesWriter, self).__init__(**kw)
46 self._build_directory_changed('build_directory', self.build_directory,
47 self.build_directory)
48
49 def _makedir(self, path):
50 """Make a directory if it doesn't already exist"""
51 if not os.path.isdir(path):
52 self.log.info("Making directory %s", path)
53 os.makedirs(path)
54
55 def write(self, output, resources, notebook_name=None, **kw):
56 """
57 Consume and write Jinja output to the file system. Output directory
58 is set via the 'build_directory' variable of this instance (a
59 configurable).
60
61 See base for more...
62 """
63
64 # Pull the extension and subdir from the resources dict.
65 output_extension = resources['output_extension']
66
67 # Write all of the extracted resources to the destination directory.
68 # NOTE: WE WRITE EVERYTHING AS-IF IT'S BINARY. THE EXTRACT FIG
69 # TRANSFORMER SHOULD HANDLE UNIX/WINDOWS LINE ENDINGS...
70 for filename, data in resources.get('outputs', {}).items():
71
72 # Determine where to write the file to
73 dest = os.path.join(self.build_directory, filename)
74 path = os.path.dirname(dest)
75 self._makedir(path)
76
77 # Write file
78 self.log.debug("Writing %i bytes to support file %s", len(data), dest)
79 with io.open(dest, 'wb') as f:
80 f.write(data)
81
82 # Copy referenced files to output directory
83 if self.build_directory:
84 for filename in self.files:
85
86 # Copy files that match search pattern
87 for matching_filename in glob.glob(filename):
88
89 # Make sure folder exists.
90 dest = os.path.join(self.build_directory, filename)
91 path = os.path.dirname(dest)
92 self._makedir(path)
93
94 # Copy if destination is different.
95 if not os.path.normpath(dest) == os.path.normpath(matching_filename):
96 self.log.info("Linking %s -> %s", matching_filename, dest)
97 link_or_copy(matching_filename, dest)
98
99 # Determine where to write conversion results.
100 dest = notebook_name + '.' + output_extension
101 if self.build_directory:
102 dest = os.path.join(self.build_directory, dest)
103
104 # Write conversion results.
105 self.log.info("Writing %i bytes to %s", len(output), dest)
106 with io.open(dest, 'w', encoding='utf-8') as f:
107 f.write(output)
108 return dest
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/IPython/nbconvert/writers/files.py b/IPython/nbconvert/writers/files.py
--- a/IPython/nbconvert/writers/files.py
+++ b/IPython/nbconvert/writers/files.py
@@ -30,7 +30,7 @@
"""Consumes nbconvert output and produces files."""
- build_directory = Unicode(".", config=True,
+ build_directory = Unicode("", config=True,
help="""Directory to write output to. Leave blank
to output to the current directory""")
| {"golden_diff": "diff --git a/IPython/nbconvert/writers/files.py b/IPython/nbconvert/writers/files.py\n--- a/IPython/nbconvert/writers/files.py\n+++ b/IPython/nbconvert/writers/files.py\n@@ -30,7 +30,7 @@\n \"\"\"Consumes nbconvert output and produces files.\"\"\"\n \n \n- build_directory = Unicode(\".\", config=True, \n+ build_directory = Unicode(\"\", config=True,\n help=\"\"\"Directory to write output to. Leave blank\n to output to the current directory\"\"\")\n", "issue": "under Windows, \"ipython3 nbconvert \"C:/blabla/first_try.ipynb\" --to latex --post PDF\" POST processing action fails because of a bad parameter\nHello,\n\nThe \"one single step\" option to create a \".pdf\" from a .ipynb\" fails on my windows python3 pc \n\nNbconvert apparently tries compile \".TEX\" result with \n\n\"pdflatex .\\first_try.tex\" \n\n==> It generates a bad behaviour of pdflatex, which picks \"pdfTex\" option instead of \"PdfLatex\".\n\nThe working option, on my Windows PC and when I do it by hand, is not to put the \".\\\" \n\n\"pdflatex first_try.tex\" \n\nUPDATE : replacing \".\\\" per \"./\" seems also to be a solution.\n\"pdflatex ./first_try.tex\" \n\nHint to the problem comes from here \nhttp://tex.stackexchange.com/questions/78178/miktex-how-to-run-pdflatex-from-cmd-prompt-on-windows-7-compared-to-windows-xp \n\nDetails below.\n\nSheers\n\n*\\* instruction *\\* \nipython3 nbconvert \"C:/blabla/first_try.ipynb\" --to latex --post PDF\"\n\n**\\* (start of the output ) ***\nC:\\Users\\parent\\Desktop\\winpython\\WinPython-32bit-3.3.2.1rc1\\python-3.3.2>ipytho\nn3 nbconvert \"C:/blabla//first_try.ipynb\" --to latex --po\nst PDF\n[NbConvertApp] Using existing profile dir: 'C:\\Users\\parent\\Desktop\\winpytho\nn\\WinPython-32bit-3.3.2.1rc1\\settings\\.ipython\\profile_default'\n[NbConvertApp] Converting notebook C:/blabla/first_try.ipynb to latex\n[NbConvertApp] Support files will be in first_try_files\\\n[NbConvertApp] Loaded template latex_article.tplx\n[NbConvertApp] Writing 53680 bytes to .\\first_try.tex\n[NbConvertApp] Building PDF: `pdflatex .\\first_try.tex`\nThis is pdfTeX, Version 3.1415926-2.4-1.40.13 (TeX Live 2012/W32TeX)\n restricted \\write18 enabled.\nentering extended mode\n! Undefined control sequence.\n<_> .\\first\n _try.tex\n?\n*_\\* (end of the output ) ***\n\n", "before_files": [{"content": "\"\"\"\nContains writer for writing nbconvert output to filesystem.\n\"\"\"\n#-----------------------------------------------------------------------------\n#Copyright (c) 2013, the IPython Development Team.\n#\n#Distributed under the terms of the Modified BSD License.\n#\n#The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\n\nimport io\nimport os\nimport glob\n\nfrom IPython.utils.traitlets import Unicode\nfrom IPython.utils.path import link_or_copy\n\nfrom .base import WriterBase\n\n#-----------------------------------------------------------------------------\n# Classes\n#-----------------------------------------------------------------------------\n\nclass FilesWriter(WriterBase):\n \"\"\"Consumes nbconvert output and produces files.\"\"\"\n\n\n build_directory = Unicode(\".\", config=True, \n help=\"\"\"Directory to write output to. Leave blank\n to output to the current directory\"\"\")\n\n\n # Make sure that the output directory exists.\n def _build_directory_changed(self, name, old, new):\n if new and not os.path.isdir(new):\n os.makedirs(new)\n\n\n def __init__(self, **kw):\n super(FilesWriter, self).__init__(**kw)\n self._build_directory_changed('build_directory', self.build_directory, \n self.build_directory)\n \n def _makedir(self, path):\n \"\"\"Make a directory if it doesn't already exist\"\"\"\n if not os.path.isdir(path):\n self.log.info(\"Making directory %s\", path)\n os.makedirs(path)\n\n def write(self, output, resources, notebook_name=None, **kw):\n \"\"\"\n Consume and write Jinja output to the file system. Output directory\n is set via the 'build_directory' variable of this instance (a \n configurable).\n\n See base for more...\n \"\"\"\n\n # Pull the extension and subdir from the resources dict.\n output_extension = resources['output_extension']\n\n # Write all of the extracted resources to the destination directory.\n # NOTE: WE WRITE EVERYTHING AS-IF IT'S BINARY. THE EXTRACT FIG\n # TRANSFORMER SHOULD HANDLE UNIX/WINDOWS LINE ENDINGS...\n for filename, data in resources.get('outputs', {}).items():\n\n # Determine where to write the file to\n dest = os.path.join(self.build_directory, filename)\n path = os.path.dirname(dest)\n self._makedir(path)\n\n # Write file\n self.log.debug(\"Writing %i bytes to support file %s\", len(data), dest)\n with io.open(dest, 'wb') as f:\n f.write(data)\n\n # Copy referenced files to output directory\n if self.build_directory:\n for filename in self.files:\n\n # Copy files that match search pattern\n for matching_filename in glob.glob(filename):\n\n # Make sure folder exists.\n dest = os.path.join(self.build_directory, filename)\n path = os.path.dirname(dest)\n self._makedir(path)\n\n # Copy if destination is different.\n if not os.path.normpath(dest) == os.path.normpath(matching_filename):\n self.log.info(\"Linking %s -> %s\", matching_filename, dest)\n link_or_copy(matching_filename, dest)\n\n # Determine where to write conversion results.\n dest = notebook_name + '.' + output_extension\n if self.build_directory:\n dest = os.path.join(self.build_directory, dest)\n\n # Write conversion results.\n self.log.info(\"Writing %i bytes to %s\", len(output), dest)\n with io.open(dest, 'w', encoding='utf-8') as f:\n f.write(output)\n return dest", "path": "IPython/nbconvert/writers/files.py"}], "after_files": [{"content": "\"\"\"\nContains writer for writing nbconvert output to filesystem.\n\"\"\"\n#-----------------------------------------------------------------------------\n#Copyright (c) 2013, the IPython Development Team.\n#\n#Distributed under the terms of the Modified BSD License.\n#\n#The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\n\nimport io\nimport os\nimport glob\n\nfrom IPython.utils.traitlets import Unicode\nfrom IPython.utils.path import link_or_copy\n\nfrom .base import WriterBase\n\n#-----------------------------------------------------------------------------\n# Classes\n#-----------------------------------------------------------------------------\n\nclass FilesWriter(WriterBase):\n \"\"\"Consumes nbconvert output and produces files.\"\"\"\n\n\n build_directory = Unicode(\"\", config=True,\n help=\"\"\"Directory to write output to. Leave blank\n to output to the current directory\"\"\")\n\n\n # Make sure that the output directory exists.\n def _build_directory_changed(self, name, old, new):\n if new and not os.path.isdir(new):\n os.makedirs(new)\n\n\n def __init__(self, **kw):\n super(FilesWriter, self).__init__(**kw)\n self._build_directory_changed('build_directory', self.build_directory, \n self.build_directory)\n \n def _makedir(self, path):\n \"\"\"Make a directory if it doesn't already exist\"\"\"\n if not os.path.isdir(path):\n self.log.info(\"Making directory %s\", path)\n os.makedirs(path)\n\n def write(self, output, resources, notebook_name=None, **kw):\n \"\"\"\n Consume and write Jinja output to the file system. Output directory\n is set via the 'build_directory' variable of this instance (a \n configurable).\n\n See base for more...\n \"\"\"\n\n # Pull the extension and subdir from the resources dict.\n output_extension = resources['output_extension']\n\n # Write all of the extracted resources to the destination directory.\n # NOTE: WE WRITE EVERYTHING AS-IF IT'S BINARY. THE EXTRACT FIG\n # TRANSFORMER SHOULD HANDLE UNIX/WINDOWS LINE ENDINGS...\n for filename, data in resources.get('outputs', {}).items():\n\n # Determine where to write the file to\n dest = os.path.join(self.build_directory, filename)\n path = os.path.dirname(dest)\n self._makedir(path)\n\n # Write file\n self.log.debug(\"Writing %i bytes to support file %s\", len(data), dest)\n with io.open(dest, 'wb') as f:\n f.write(data)\n\n # Copy referenced files to output directory\n if self.build_directory:\n for filename in self.files:\n\n # Copy files that match search pattern\n for matching_filename in glob.glob(filename):\n\n # Make sure folder exists.\n dest = os.path.join(self.build_directory, filename)\n path = os.path.dirname(dest)\n self._makedir(path)\n\n # Copy if destination is different.\n if not os.path.normpath(dest) == os.path.normpath(matching_filename):\n self.log.info(\"Linking %s -> %s\", matching_filename, dest)\n link_or_copy(matching_filename, dest)\n\n # Determine where to write conversion results.\n dest = notebook_name + '.' + output_extension\n if self.build_directory:\n dest = os.path.join(self.build_directory, dest)\n\n # Write conversion results.\n self.log.info(\"Writing %i bytes to %s\", len(output), dest)\n with io.open(dest, 'w', encoding='utf-8') as f:\n f.write(output)\n return dest", "path": "IPython/nbconvert/writers/files.py"}]} | 1,801 | 114 |
gh_patches_debug_23598 | rasdani/github-patches | git_diff | facebookresearch__hydra-1588 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Feature Request] RQ Launcher SSL support
# 🚀 Feature Request
RQ (Redis-Queue) has [support](https://github.com/rq/rq/blob/master/rq/cli/helpers.py#L56) for SSL connections. We can easily port this over to the plugin.
## Motivation
Allows users to use SSL Redis connections in the RQ Hydra Launcher. e.g. some cloud providers require you to connect with SSL (`rediss` instead of `redis`) and don't let you connect otherwise.
## Pitch
Will open PR 👍🏻
## Additional context
Currently Hydra RQ launcher only supports the following options:
```shell
export REDIS_HOST=localhost
export REDIS_PORT=6379
export REDIS_DB=0
export REDIS_PASSWORD=
python run.py hydra/launcher=rq random_state=0,1,2,3 --multirun
```
→ no way to configure SSL if server requires so.
Therefore, we require an option to configure Redis connections over SSL, e.g.:
```shell
export REDIS_SSL=True
python run.py hydra/launcher=rq random_state=0,1,2,3 --multirun
```
✨
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `plugins/hydra_rq_launcher/hydra_plugins/hydra_rq_launcher/config.py`
Content:
```
1 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2 from dataclasses import dataclass
3 from typing import Optional
4
5 from hydra.core.config_store import ConfigStore
6 from omegaconf import II
7
8
9 @dataclass
10 class RedisConf:
11 # host address via REDIS_HOST environment variable, default: localhost
12 host: str = II("oc.env:REDIS_HOST,localhost")
13 # port via REDIS_PORT environment variable, default: 6379
14 port: int = II("oc.env:REDIS_PORT,'6379'")
15 # database via REDIS_DB environment variable, default: 0
16 db: Optional[str] = II("oc.env:REDIS_DB,'0'")
17 # password via REDIS_PASSWORD environment variable, default: no password
18 password: Optional[str] = II("oc.env:REDIS_PASSWORD,null")
19 # switch to run without redis server in single thread, for testing purposes only
20 mock: bool = II("oc.env:REDIS_MOCK,'False'")
21
22
23 @dataclass
24 class EnqueueConf:
25 # maximum runtime of the job before it's killed (e.g. "1d" for 1 day, units: d/h/m/s), default: no limit
26 job_timeout: Optional[str] = None
27 # maximum queued time before the job before is discarded (e.g. "1d" for 1 day, units: d/h/m/s), default: no limit
28 ttl: Optional[str] = None
29 # how long successful jobs and their results are kept (e.g. "1d" for 1 day, units: d/h/m/s), default: no limit
30 result_ttl: Optional[str] = None
31 # specifies how long failed jobs are kept (e.g. "1d" for 1 day, units: d/h/m/s), default: no limit
32 failure_ttl: Optional[str] = None
33 # place job at the front of the queue, instead of the back
34 at_front: bool = False
35 # job id, will be overidden automatically by a uuid unless specified explicitly
36 job_id: Optional[str] = None
37 # description, will be overidden automatically unless specified explicitly
38 description: Optional[str] = None
39
40
41 @dataclass
42 class RQLauncherConf:
43 _target_: str = "hydra_plugins.hydra_rq_launcher.rq_launcher.RQLauncher"
44 # enqueue configuration
45 enqueue: EnqueueConf = EnqueueConf()
46 # queue name
47 queue: str = "default"
48 # redis configuration
49 redis: RedisConf = RedisConf()
50 # stop after enqueueing by raising custom exception
51 stop_after_enqueue: bool = False
52 # wait time in seconds when polling results
53 wait_polling: float = 1.0
54
55
56 ConfigStore.instance().store(
57 group="hydra/launcher", name="rq", node=RQLauncherConf, provider="rq_launcher"
58 )
59
```
Path: `plugins/hydra_rq_launcher/hydra_plugins/hydra_rq_launcher/_core.py`
Content:
```
1 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2 import logging
3 import time
4 import uuid
5 from pathlib import Path
6 from typing import Any, Dict, List, Sequence
7
8 import cloudpickle # type: ignore
9 from fakeredis import FakeStrictRedis # type: ignore
10 from hydra.core.hydra_config import HydraConfig
11 from hydra.core.singleton import Singleton
12 from hydra.core.utils import (
13 JobReturn,
14 configure_log,
15 filter_overrides,
16 run_job,
17 setup_globals,
18 )
19 from hydra.types import HydraContext, TaskFunction
20 from omegaconf import DictConfig, OmegaConf, open_dict
21 from redis import Redis
22 from rq import Queue # type: ignore
23
24 from .rq_launcher import RQLauncher
25
26 log = logging.getLogger(__name__)
27
28
29 def execute_job(
30 hydra_context: HydraContext,
31 sweep_config: DictConfig,
32 task_function: TaskFunction,
33 singleton_state: Dict[Any, Any],
34 ) -> JobReturn:
35 setup_globals()
36 Singleton.set_state(singleton_state)
37
38 HydraConfig.instance().set_config(sweep_config)
39
40 ret = run_job(
41 hydra_context=hydra_context,
42 task_function=task_function,
43 config=sweep_config,
44 job_dir_key="hydra.sweep.dir",
45 job_subdir_key="hydra.sweep.subdir",
46 )
47
48 return ret
49
50
51 def launch(
52 launcher: RQLauncher, job_overrides: Sequence[Sequence[str]], initial_job_idx: int
53 ) -> Sequence[JobReturn]:
54 """
55 :param job_overrides: a List of List<String>, where each inner list is the arguments for one job run.
56 :param initial_job_idx: Initial job idx in batch.
57 :return: an array of return values from run_job with indexes corresponding to the input list indexes.
58 """
59 setup_globals()
60 assert launcher.config is not None
61 assert launcher.task_function is not None
62 assert launcher.hydra_context is not None
63
64 configure_log(launcher.config.hydra.hydra_logging, launcher.config.hydra.verbose)
65 sweep_dir = Path(str(launcher.config.hydra.sweep.dir))
66 sweep_dir.mkdir(parents=True, exist_ok=True)
67
68 # RQ configuration
69 rq_cfg = launcher.rq
70
71 # Redis configuration
72 is_async = not rq_cfg.redis.mock
73 if is_async:
74 connection = Redis(
75 host=rq_cfg.redis.host,
76 port=rq_cfg.redis.port,
77 db=rq_cfg.redis.db,
78 password=rq_cfg.redis.password,
79 )
80 else:
81 log.info("Running in synchronous mode")
82 connection = FakeStrictRedis()
83 queue = Queue(
84 name=rq_cfg.queue,
85 connection=connection,
86 is_async=is_async,
87 serializer=cloudpickle,
88 )
89
90 # Enqueue jobs
91 jobs: List[Any] = []
92 singleton_state = Singleton.get_state()
93 log.info(
94 f"RQ Launcher is enqueuing {len(job_overrides)} job(s) in queue : {rq_cfg.queue}"
95 )
96 log.info("Sweep output dir : {}".format(sweep_dir))
97 if not sweep_dir.is_absolute():
98 log.warn(
99 "Using relative sweep dir: Please be aware that dir will be relative to where workers are started from."
100 )
101
102 for idx, overrides in enumerate(job_overrides):
103 description = " ".join(filter_overrides(overrides))
104
105 enqueue_keywords = OmegaConf.to_container(rq_cfg.enqueue, resolve=True)
106 assert isinstance(enqueue_keywords, dict)
107 if enqueue_keywords["job_timeout"] is None:
108 enqueue_keywords["job_timeout"] = -1
109 if enqueue_keywords["result_ttl"] is None:
110 enqueue_keywords["result_ttl"] = -1
111 if enqueue_keywords["failure_ttl"] is None:
112 enqueue_keywords["failure_ttl"] = -1
113 if enqueue_keywords["job_id"] is None:
114 enqueue_keywords["job_id"] = str(uuid.uuid4())
115 if enqueue_keywords["description"] is None:
116 enqueue_keywords["description"] = description
117
118 sweep_config = launcher.hydra_context.config_loader.load_sweep_config(
119 launcher.config, list(overrides)
120 )
121 with open_dict(sweep_config):
122 sweep_config.hydra.job.id = enqueue_keywords["job_id"]
123 sweep_config.hydra.job.num = initial_job_idx + idx
124
125 job = queue.enqueue(
126 execute_job,
127 hydra_context=launcher.hydra_context,
128 sweep_config=sweep_config,
129 task_function=launcher.task_function,
130 singleton_state=singleton_state,
131 **enqueue_keywords,
132 )
133 jobs.append(job)
134
135 log.info(f"Enqueued {job.get_id()}")
136 log.info(f"\t#{idx+1} : {description}")
137
138 log.info("Finished enqueuing")
139 if rq_cfg.stop_after_enqueue:
140 raise StopAfterEnqueue
141
142 log.info(f"Polling job statuses every {rq_cfg.wait_polling} sec")
143 while True:
144 job_ids_done = [
145 job.get_id() for job in jobs if job.get_status() in ["finished", "failed"]
146 ]
147 if len(job_ids_done) == len(jobs):
148 break
149 else:
150 time.sleep(rq_cfg.wait_polling)
151
152 runs: List[JobReturn] = []
153 for job in jobs:
154 result = job.result if job.result is not None else None
155 runs.append(result)
156
157 return runs
158
159
160 class StopAfterEnqueue(Exception):
161 pass
162
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/plugins/hydra_rq_launcher/hydra_plugins/hydra_rq_launcher/_core.py b/plugins/hydra_rq_launcher/hydra_plugins/hydra_rq_launcher/_core.py
--- a/plugins/hydra_rq_launcher/hydra_plugins/hydra_rq_launcher/_core.py
+++ b/plugins/hydra_rq_launcher/hydra_plugins/hydra_rq_launcher/_core.py
@@ -76,6 +76,8 @@
port=rq_cfg.redis.port,
db=rq_cfg.redis.db,
password=rq_cfg.redis.password,
+ ssl=rq_cfg.redis.ssl,
+ ssl_ca_certs=rq_cfg.redis.ssl_ca_certs,
)
else:
log.info("Running in synchronous mode")
diff --git a/plugins/hydra_rq_launcher/hydra_plugins/hydra_rq_launcher/config.py b/plugins/hydra_rq_launcher/hydra_plugins/hydra_rq_launcher/config.py
--- a/plugins/hydra_rq_launcher/hydra_plugins/hydra_rq_launcher/config.py
+++ b/plugins/hydra_rq_launcher/hydra_plugins/hydra_rq_launcher/config.py
@@ -16,6 +16,10 @@
db: Optional[str] = II("oc.env:REDIS_DB,'0'")
# password via REDIS_PASSWORD environment variable, default: no password
password: Optional[str] = II("oc.env:REDIS_PASSWORD,null")
+ # enable/disable SSL, via REDIS_SSL environment variable, default False
+ ssl: bool = II("oc.env:REDIS_SSL,'False'")
+ # path to custom certs, via REDIS_SSL_CA_CERTS env veriable, default none
+ ssl_ca_certs: Optional[str] = II("oc.env:REDIS_SSL_CA_CERTS,null")
# switch to run without redis server in single thread, for testing purposes only
mock: bool = II("oc.env:REDIS_MOCK,'False'")
| {"golden_diff": "diff --git a/plugins/hydra_rq_launcher/hydra_plugins/hydra_rq_launcher/_core.py b/plugins/hydra_rq_launcher/hydra_plugins/hydra_rq_launcher/_core.py\n--- a/plugins/hydra_rq_launcher/hydra_plugins/hydra_rq_launcher/_core.py\n+++ b/plugins/hydra_rq_launcher/hydra_plugins/hydra_rq_launcher/_core.py\n@@ -76,6 +76,8 @@\n port=rq_cfg.redis.port,\n db=rq_cfg.redis.db,\n password=rq_cfg.redis.password,\n+ ssl=rq_cfg.redis.ssl,\n+ ssl_ca_certs=rq_cfg.redis.ssl_ca_certs,\n )\n else:\n log.info(\"Running in synchronous mode\")\ndiff --git a/plugins/hydra_rq_launcher/hydra_plugins/hydra_rq_launcher/config.py b/plugins/hydra_rq_launcher/hydra_plugins/hydra_rq_launcher/config.py\n--- a/plugins/hydra_rq_launcher/hydra_plugins/hydra_rq_launcher/config.py\n+++ b/plugins/hydra_rq_launcher/hydra_plugins/hydra_rq_launcher/config.py\n@@ -16,6 +16,10 @@\n db: Optional[str] = II(\"oc.env:REDIS_DB,'0'\")\n # password via REDIS_PASSWORD environment variable, default: no password\n password: Optional[str] = II(\"oc.env:REDIS_PASSWORD,null\")\n+ # enable/disable SSL, via REDIS_SSL environment variable, default False\n+ ssl: bool = II(\"oc.env:REDIS_SSL,'False'\")\n+ # path to custom certs, via REDIS_SSL_CA_CERTS env veriable, default none\n+ ssl_ca_certs: Optional[str] = II(\"oc.env:REDIS_SSL_CA_CERTS,null\")\n # switch to run without redis server in single thread, for testing purposes only\n mock: bool = II(\"oc.env:REDIS_MOCK,'False'\")\n", "issue": "[Feature Request] RQ Launcher SSL support\n# \ud83d\ude80 Feature Request\r\nRQ (Redis-Queue) has [support](https://github.com/rq/rq/blob/master/rq/cli/helpers.py#L56) for SSL connections. We can easily port this over to the plugin.\r\n\r\n## Motivation\r\nAllows users to use SSL Redis connections in the RQ Hydra Launcher. e.g. some cloud providers require you to connect with SSL (`rediss` instead of `redis`) and don't let you connect otherwise.\r\n\r\n## Pitch\r\nWill open PR \ud83d\udc4d\ud83c\udffb\r\n\r\n## Additional context\r\nCurrently Hydra RQ launcher only supports the following options:\r\n```shell\r\nexport REDIS_HOST=localhost\r\nexport REDIS_PORT=6379\r\nexport REDIS_DB=0\r\nexport REDIS_PASSWORD=\r\npython run.py hydra/launcher=rq random_state=0,1,2,3 --multirun\r\n```\r\n\u2192 no way to configure SSL if server requires so.\r\n\r\nTherefore, we require an option to configure Redis connections over SSL, e.g.:\r\n```shell\r\nexport REDIS_SSL=True\r\npython run.py hydra/launcher=rq random_state=0,1,2,3 --multirun\r\n```\r\n\r\n\u2728\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nfrom dataclasses import dataclass\nfrom typing import Optional\n\nfrom hydra.core.config_store import ConfigStore\nfrom omegaconf import II\n\n\n@dataclass\nclass RedisConf:\n # host address via REDIS_HOST environment variable, default: localhost\n host: str = II(\"oc.env:REDIS_HOST,localhost\")\n # port via REDIS_PORT environment variable, default: 6379\n port: int = II(\"oc.env:REDIS_PORT,'6379'\")\n # database via REDIS_DB environment variable, default: 0\n db: Optional[str] = II(\"oc.env:REDIS_DB,'0'\")\n # password via REDIS_PASSWORD environment variable, default: no password\n password: Optional[str] = II(\"oc.env:REDIS_PASSWORD,null\")\n # switch to run without redis server in single thread, for testing purposes only\n mock: bool = II(\"oc.env:REDIS_MOCK,'False'\")\n\n\n@dataclass\nclass EnqueueConf:\n # maximum runtime of the job before it's killed (e.g. \"1d\" for 1 day, units: d/h/m/s), default: no limit\n job_timeout: Optional[str] = None\n # maximum queued time before the job before is discarded (e.g. \"1d\" for 1 day, units: d/h/m/s), default: no limit\n ttl: Optional[str] = None\n # how long successful jobs and their results are kept (e.g. \"1d\" for 1 day, units: d/h/m/s), default: no limit\n result_ttl: Optional[str] = None\n # specifies how long failed jobs are kept (e.g. \"1d\" for 1 day, units: d/h/m/s), default: no limit\n failure_ttl: Optional[str] = None\n # place job at the front of the queue, instead of the back\n at_front: bool = False\n # job id, will be overidden automatically by a uuid unless specified explicitly\n job_id: Optional[str] = None\n # description, will be overidden automatically unless specified explicitly\n description: Optional[str] = None\n\n\n@dataclass\nclass RQLauncherConf:\n _target_: str = \"hydra_plugins.hydra_rq_launcher.rq_launcher.RQLauncher\"\n # enqueue configuration\n enqueue: EnqueueConf = EnqueueConf()\n # queue name\n queue: str = \"default\"\n # redis configuration\n redis: RedisConf = RedisConf()\n # stop after enqueueing by raising custom exception\n stop_after_enqueue: bool = False\n # wait time in seconds when polling results\n wait_polling: float = 1.0\n\n\nConfigStore.instance().store(\n group=\"hydra/launcher\", name=\"rq\", node=RQLauncherConf, provider=\"rq_launcher\"\n)\n", "path": "plugins/hydra_rq_launcher/hydra_plugins/hydra_rq_launcher/config.py"}, {"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport logging\nimport time\nimport uuid\nfrom pathlib import Path\nfrom typing import Any, Dict, List, Sequence\n\nimport cloudpickle # type: ignore\nfrom fakeredis import FakeStrictRedis # type: ignore\nfrom hydra.core.hydra_config import HydraConfig\nfrom hydra.core.singleton import Singleton\nfrom hydra.core.utils import (\n JobReturn,\n configure_log,\n filter_overrides,\n run_job,\n setup_globals,\n)\nfrom hydra.types import HydraContext, TaskFunction\nfrom omegaconf import DictConfig, OmegaConf, open_dict\nfrom redis import Redis\nfrom rq import Queue # type: ignore\n\nfrom .rq_launcher import RQLauncher\n\nlog = logging.getLogger(__name__)\n\n\ndef execute_job(\n hydra_context: HydraContext,\n sweep_config: DictConfig,\n task_function: TaskFunction,\n singleton_state: Dict[Any, Any],\n) -> JobReturn:\n setup_globals()\n Singleton.set_state(singleton_state)\n\n HydraConfig.instance().set_config(sweep_config)\n\n ret = run_job(\n hydra_context=hydra_context,\n task_function=task_function,\n config=sweep_config,\n job_dir_key=\"hydra.sweep.dir\",\n job_subdir_key=\"hydra.sweep.subdir\",\n )\n\n return ret\n\n\ndef launch(\n launcher: RQLauncher, job_overrides: Sequence[Sequence[str]], initial_job_idx: int\n) -> Sequence[JobReturn]:\n \"\"\"\n :param job_overrides: a List of List<String>, where each inner list is the arguments for one job run.\n :param initial_job_idx: Initial job idx in batch.\n :return: an array of return values from run_job with indexes corresponding to the input list indexes.\n \"\"\"\n setup_globals()\n assert launcher.config is not None\n assert launcher.task_function is not None\n assert launcher.hydra_context is not None\n\n configure_log(launcher.config.hydra.hydra_logging, launcher.config.hydra.verbose)\n sweep_dir = Path(str(launcher.config.hydra.sweep.dir))\n sweep_dir.mkdir(parents=True, exist_ok=True)\n\n # RQ configuration\n rq_cfg = launcher.rq\n\n # Redis configuration\n is_async = not rq_cfg.redis.mock\n if is_async:\n connection = Redis(\n host=rq_cfg.redis.host,\n port=rq_cfg.redis.port,\n db=rq_cfg.redis.db,\n password=rq_cfg.redis.password,\n )\n else:\n log.info(\"Running in synchronous mode\")\n connection = FakeStrictRedis()\n queue = Queue(\n name=rq_cfg.queue,\n connection=connection,\n is_async=is_async,\n serializer=cloudpickle,\n )\n\n # Enqueue jobs\n jobs: List[Any] = []\n singleton_state = Singleton.get_state()\n log.info(\n f\"RQ Launcher is enqueuing {len(job_overrides)} job(s) in queue : {rq_cfg.queue}\"\n )\n log.info(\"Sweep output dir : {}\".format(sweep_dir))\n if not sweep_dir.is_absolute():\n log.warn(\n \"Using relative sweep dir: Please be aware that dir will be relative to where workers are started from.\"\n )\n\n for idx, overrides in enumerate(job_overrides):\n description = \" \".join(filter_overrides(overrides))\n\n enqueue_keywords = OmegaConf.to_container(rq_cfg.enqueue, resolve=True)\n assert isinstance(enqueue_keywords, dict)\n if enqueue_keywords[\"job_timeout\"] is None:\n enqueue_keywords[\"job_timeout\"] = -1\n if enqueue_keywords[\"result_ttl\"] is None:\n enqueue_keywords[\"result_ttl\"] = -1\n if enqueue_keywords[\"failure_ttl\"] is None:\n enqueue_keywords[\"failure_ttl\"] = -1\n if enqueue_keywords[\"job_id\"] is None:\n enqueue_keywords[\"job_id\"] = str(uuid.uuid4())\n if enqueue_keywords[\"description\"] is None:\n enqueue_keywords[\"description\"] = description\n\n sweep_config = launcher.hydra_context.config_loader.load_sweep_config(\n launcher.config, list(overrides)\n )\n with open_dict(sweep_config):\n sweep_config.hydra.job.id = enqueue_keywords[\"job_id\"]\n sweep_config.hydra.job.num = initial_job_idx + idx\n\n job = queue.enqueue(\n execute_job,\n hydra_context=launcher.hydra_context,\n sweep_config=sweep_config,\n task_function=launcher.task_function,\n singleton_state=singleton_state,\n **enqueue_keywords,\n )\n jobs.append(job)\n\n log.info(f\"Enqueued {job.get_id()}\")\n log.info(f\"\\t#{idx+1} : {description}\")\n\n log.info(\"Finished enqueuing\")\n if rq_cfg.stop_after_enqueue:\n raise StopAfterEnqueue\n\n log.info(f\"Polling job statuses every {rq_cfg.wait_polling} sec\")\n while True:\n job_ids_done = [\n job.get_id() for job in jobs if job.get_status() in [\"finished\", \"failed\"]\n ]\n if len(job_ids_done) == len(jobs):\n break\n else:\n time.sleep(rq_cfg.wait_polling)\n\n runs: List[JobReturn] = []\n for job in jobs:\n result = job.result if job.result is not None else None\n runs.append(result)\n\n return runs\n\n\nclass StopAfterEnqueue(Exception):\n pass\n", "path": "plugins/hydra_rq_launcher/hydra_plugins/hydra_rq_launcher/_core.py"}], "after_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nfrom dataclasses import dataclass\nfrom typing import Optional\n\nfrom hydra.core.config_store import ConfigStore\nfrom omegaconf import II\n\n\n@dataclass\nclass RedisConf:\n # host address via REDIS_HOST environment variable, default: localhost\n host: str = II(\"oc.env:REDIS_HOST,localhost\")\n # port via REDIS_PORT environment variable, default: 6379\n port: int = II(\"oc.env:REDIS_PORT,'6379'\")\n # database via REDIS_DB environment variable, default: 0\n db: Optional[str] = II(\"oc.env:REDIS_DB,'0'\")\n # password via REDIS_PASSWORD environment variable, default: no password\n password: Optional[str] = II(\"oc.env:REDIS_PASSWORD,null\")\n # enable/disable SSL, via REDIS_SSL environment variable, default False\n ssl: bool = II(\"oc.env:REDIS_SSL,'False'\")\n # path to custom certs, via REDIS_SSL_CA_CERTS env veriable, default none\n ssl_ca_certs: Optional[str] = II(\"oc.env:REDIS_SSL_CA_CERTS,null\")\n # switch to run without redis server in single thread, for testing purposes only\n mock: bool = II(\"oc.env:REDIS_MOCK,'False'\")\n\n\n@dataclass\nclass EnqueueConf:\n # maximum runtime of the job before it's killed (e.g. \"1d\" for 1 day, units: d/h/m/s), default: no limit\n job_timeout: Optional[str] = None\n # maximum queued time before the job before is discarded (e.g. \"1d\" for 1 day, units: d/h/m/s), default: no limit\n ttl: Optional[str] = None\n # how long successful jobs and their results are kept (e.g. \"1d\" for 1 day, units: d/h/m/s), default: no limit\n result_ttl: Optional[str] = None\n # specifies how long failed jobs are kept (e.g. \"1d\" for 1 day, units: d/h/m/s), default: no limit\n failure_ttl: Optional[str] = None\n # place job at the front of the queue, instead of the back\n at_front: bool = False\n # job id, will be overidden automatically by a uuid unless specified explicitly\n job_id: Optional[str] = None\n # description, will be overidden automatically unless specified explicitly\n description: Optional[str] = None\n\n\n@dataclass\nclass RQLauncherConf:\n _target_: str = \"hydra_plugins.hydra_rq_launcher.rq_launcher.RQLauncher\"\n # enqueue configuration\n enqueue: EnqueueConf = EnqueueConf()\n # queue name\n queue: str = \"default\"\n # redis configuration\n redis: RedisConf = RedisConf()\n # stop after enqueueing by raising custom exception\n stop_after_enqueue: bool = False\n # wait time in seconds when polling results\n wait_polling: float = 1.0\n\n\nConfigStore.instance().store(\n group=\"hydra/launcher\", name=\"rq\", node=RQLauncherConf, provider=\"rq_launcher\"\n)\n", "path": "plugins/hydra_rq_launcher/hydra_plugins/hydra_rq_launcher/config.py"}, {"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport logging\nimport time\nimport uuid\nfrom pathlib import Path\nfrom typing import Any, Dict, List, Sequence\n\nimport cloudpickle # type: ignore\nfrom fakeredis import FakeStrictRedis # type: ignore\nfrom hydra.core.hydra_config import HydraConfig\nfrom hydra.core.singleton import Singleton\nfrom hydra.core.utils import (\n JobReturn,\n configure_log,\n filter_overrides,\n run_job,\n setup_globals,\n)\nfrom hydra.types import HydraContext, TaskFunction\nfrom omegaconf import DictConfig, OmegaConf, open_dict\nfrom redis import Redis\nfrom rq import Queue # type: ignore\n\nfrom .rq_launcher import RQLauncher\n\nlog = logging.getLogger(__name__)\n\n\ndef execute_job(\n hydra_context: HydraContext,\n sweep_config: DictConfig,\n task_function: TaskFunction,\n singleton_state: Dict[Any, Any],\n) -> JobReturn:\n setup_globals()\n Singleton.set_state(singleton_state)\n\n HydraConfig.instance().set_config(sweep_config)\n\n ret = run_job(\n hydra_context=hydra_context,\n task_function=task_function,\n config=sweep_config,\n job_dir_key=\"hydra.sweep.dir\",\n job_subdir_key=\"hydra.sweep.subdir\",\n )\n\n return ret\n\n\ndef launch(\n launcher: RQLauncher, job_overrides: Sequence[Sequence[str]], initial_job_idx: int\n) -> Sequence[JobReturn]:\n \"\"\"\n :param job_overrides: a List of List<String>, where each inner list is the arguments for one job run.\n :param initial_job_idx: Initial job idx in batch.\n :return: an array of return values from run_job with indexes corresponding to the input list indexes.\n \"\"\"\n setup_globals()\n assert launcher.config is not None\n assert launcher.task_function is not None\n assert launcher.hydra_context is not None\n\n configure_log(launcher.config.hydra.hydra_logging, launcher.config.hydra.verbose)\n sweep_dir = Path(str(launcher.config.hydra.sweep.dir))\n sweep_dir.mkdir(parents=True, exist_ok=True)\n\n # RQ configuration\n rq_cfg = launcher.rq\n\n # Redis configuration\n is_async = not rq_cfg.redis.mock\n if is_async:\n connection = Redis(\n host=rq_cfg.redis.host,\n port=rq_cfg.redis.port,\n db=rq_cfg.redis.db,\n password=rq_cfg.redis.password,\n ssl=rq_cfg.redis.ssl,\n ssl_ca_certs=rq_cfg.redis.ssl_ca_certs,\n )\n else:\n log.info(\"Running in synchronous mode\")\n connection = FakeStrictRedis()\n queue = Queue(\n name=rq_cfg.queue,\n connection=connection,\n is_async=is_async,\n serializer=cloudpickle,\n )\n\n # Enqueue jobs\n jobs: List[Any] = []\n singleton_state = Singleton.get_state()\n log.info(\n f\"RQ Launcher is enqueuing {len(job_overrides)} job(s) in queue : {rq_cfg.queue}\"\n )\n log.info(\"Sweep output dir : {}\".format(sweep_dir))\n if not sweep_dir.is_absolute():\n log.warn(\n \"Using relative sweep dir: Please be aware that dir will be relative to where workers are started from.\"\n )\n\n for idx, overrides in enumerate(job_overrides):\n description = \" \".join(filter_overrides(overrides))\n\n enqueue_keywords = OmegaConf.to_container(rq_cfg.enqueue, resolve=True)\n assert isinstance(enqueue_keywords, dict)\n if enqueue_keywords[\"job_timeout\"] is None:\n enqueue_keywords[\"job_timeout\"] = -1\n if enqueue_keywords[\"result_ttl\"] is None:\n enqueue_keywords[\"result_ttl\"] = -1\n if enqueue_keywords[\"failure_ttl\"] is None:\n enqueue_keywords[\"failure_ttl\"] = -1\n if enqueue_keywords[\"job_id\"] is None:\n enqueue_keywords[\"job_id\"] = str(uuid.uuid4())\n if enqueue_keywords[\"description\"] is None:\n enqueue_keywords[\"description\"] = description\n\n sweep_config = launcher.hydra_context.config_loader.load_sweep_config(\n launcher.config, list(overrides)\n )\n with open_dict(sweep_config):\n sweep_config.hydra.job.id = enqueue_keywords[\"job_id\"]\n sweep_config.hydra.job.num = initial_job_idx + idx\n\n job = queue.enqueue(\n execute_job,\n hydra_context=launcher.hydra_context,\n sweep_config=sweep_config,\n task_function=launcher.task_function,\n singleton_state=singleton_state,\n **enqueue_keywords,\n )\n jobs.append(job)\n\n log.info(f\"Enqueued {job.get_id()}\")\n log.info(f\"\\t#{idx+1} : {description}\")\n\n log.info(\"Finished enqueuing\")\n if rq_cfg.stop_after_enqueue:\n raise StopAfterEnqueue\n\n log.info(f\"Polling job statuses every {rq_cfg.wait_polling} sec\")\n while True:\n job_ids_done = [\n job.get_id() for job in jobs if job.get_status() in [\"finished\", \"failed\"]\n ]\n if len(job_ids_done) == len(jobs):\n break\n else:\n time.sleep(rq_cfg.wait_polling)\n\n runs: List[JobReturn] = []\n for job in jobs:\n result = job.result if job.result is not None else None\n runs.append(result)\n\n return runs\n\n\nclass StopAfterEnqueue(Exception):\n pass\n", "path": "plugins/hydra_rq_launcher/hydra_plugins/hydra_rq_launcher/_core.py"}]} | 2,850 | 410 |
gh_patches_debug_9627 | rasdani/github-patches | git_diff | pytorch__vision-4966 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
QuantizableMobileNetV3 Can not load pretrained model
### 🐛 Describe the bug
```python
import torchvision
quantized = torchvision.models.quantization.mobilenet_v3_large(pretrained=True)
```
It will occur
```
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/opt/conda/lib/python3.8/site-packages/torchvision/models/quantization/mobilenetv3.py", line 180, in mobilenet_v3_large
return _mobilenet_v3_model(arch, inverted_residual_setting, last_channel, pretrained, progress, quantize, **kwargs)
File "/opt/conda/lib/python3.8/site-packages/torchvision/models/quantization/mobilenetv3.py", line 154, in _mobilenet_v3_model
_load_weights(arch, model, model_urls.get(arch, None), progress)
File "/opt/conda/lib/python3.8/site-packages/torchvision/models/quantization/mobilenetv3.py", line 124, in _load_weights
model.load_state_dict(state_dict)
File "/opt/conda/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1482, in load_state_dict
raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format(
RuntimeError: Error(s) in loading state_dict for QuantizableMobileNetV3:
Unexpected key(s) in state_dict: "features.4.block.2.scale_activation.activation_post_process.scale", "features.4.block.2.scale_activation.activation_post_process.zero_point", "features.4.block.2.scale_activation.activation_post_process.fake_quant_enabled", "features.4.block.2.scale_activation.activation_post_process.observer_enabled", "features.5.block.2.scale_activation.activation_post_process.scale", "features.5.block.2.scale_activation.activation_post_process.zero_point", "features.5.block.2.scale_activation.activation_post_process.fake_quant_enabled", "features.5.block.2.scale_activation.activation_post_process.observer_enabled", "features.6.block.2.scale_activation.activation_post_process.scale", "features.6.block.2.scale_activation.activation_post_process.zero_point", "features.6.block.2.scale_activation.activation_post_process.fake_quant_enabled", "features.6.block.2.scale_activation.activation_post_process.observer_enabled", "features.11.block.2.scale_activation.activation_post_process.scale", "features.11.block.2.scale_activation.activation_post_process.zero_point", "features.11.block.2.scale_activation.activation_post_process.fake_quant_enabled", "features.11.block.2.scale_activation.activation_post_process.observer_enabled", "features.12.block.2.scale_activation.activation_post_process.scale", "features.12.block.2.scale_activation.activation_post_process.zero_point", "features.12.block.2.scale_activation.activation_post_process.fake_quant_enabled", "features.12.block.2.scale_activation.activation_post_process.observer_enabled", "features.13.block.2.scale_activation.activation_post_process.scale", "features.13.block.2.scale_activation.activation_post_process.zero_point", "features.13.block.2.scale_activation.activation_post_process.fake_quant_enabled", "features.13.block.2.scale_activation.activation_post_process.observer_enabled", "features.14.block.2.scale_activation.activation_post_process.scale", "features.14.block.2.scale_activation.activation_post_process.zero_point", "features.14.block.2.scale_activation.activation_post_process.fake_quant_enabled", "features.14.block.2.scale_activation.activation_post_process.observer_enabled", "features.15.block.2.scale_activation.activation_post_process.scale", "features.15.block.2.scale_activation.activation_post_process.zero_point", "features.15.block.2.scale_activation.activation_post_process.fake_quant_enabled", "features.15.block.2.scale_activation.activation_post_process.observer_enabled".
```
### Versions
PyTorch version: 1.10.0+cu113
Is debug build: False
CUDA used to build PyTorch: 11.3
ROCM used to build PyTorch: N/A
OS: Ubuntu 20.04.1 LTS (x86_64)
GCC version: (Ubuntu 9.3.0-17ubuntu1~20.04) 9.3.0
Clang version: Could not collect
CMake version: version 3.19.4
Libc version: glibc-2.31
Python version: 3.8.5 (default, Sep 4 2020, 07:30:14) [GCC 7.3.0] (64-bit runtime)
Python platform: Linux-5.4.0-90-generic-x86_64-with-glibc2.10
Is CUDA available: True
CUDA runtime version: 11.2.67
GPU models and configuration: GPU 0: NVIDIA GeForce RTX 3090
Nvidia driver version: 470.82.00
cuDNN version: Probably one of the following:
/usr/lib/x86_64-linux-gnu/libcudnn.so.8.1.0
/usr/lib/x86_64-linux-gnu/libcudnn_adv_infer.so.8.1.0
/usr/lib/x86_64-linux-gnu/libcudnn_adv_train.so.8.1.0
/usr/lib/x86_64-linux-gnu/libcudnn_cnn_infer.so.8.1.0
/usr/lib/x86_64-linux-gnu/libcudnn_cnn_train.so.8.1.0
/usr/lib/x86_64-linux-gnu/libcudnn_ops_infer.so.8.1.0
/usr/lib/x86_64-linux-gnu/libcudnn_ops_train.so.8.1.0
HIP runtime version: N/A
MIOpen runtime version: N/A
Versions of relevant libraries:
[pip3] numpy==1.19.2
[pip3] pytorch-transformers==1.1.0
[pip3] torch==1.10.0+cu113
[pip3] torchaudio==0.10.0+cu113
[pip3] torchtext==0.11.0
[pip3] torchvision==0.11.1+cu113
[conda] magma-cuda110 2.5.2 5 local
[conda] mkl 2019.4 243
[conda] mkl-include 2019.4 243
[conda] nomkl 3.0 0
[conda] numpy 1.19.2 py38h6163131_0
[conda] numpy-base 1.19.2 py38h75fe3a5_0
[conda] pytorch-transformers 1.1.0 pypi_0 pypi
[conda] torch 1.10.0+cu113 pypi_0 pypi
[conda] torchaudio 0.10.0+cu113 pypi_0 pypi
[conda] torchtext 0.11.0 pypi_0 pypi
[conda] torchvision 0.11.1+cu113 pypi_0 pypi
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `torchvision/models/quantization/mobilenetv3.py`
Content:
```
1 from typing import Any, List, Optional
2
3 import torch
4 from torch import nn, Tensor
5 from torch.quantization import QuantStub, DeQuantStub, fuse_modules
6
7 from ..._internally_replaced_utils import load_state_dict_from_url
8 from ...ops.misc import ConvNormActivation, SqueezeExcitation
9 from ..mobilenetv3 import InvertedResidual, InvertedResidualConfig, MobileNetV3, model_urls, _mobilenet_v3_conf
10 from .utils import _replace_relu
11
12
13 __all__ = ["QuantizableMobileNetV3", "mobilenet_v3_large"]
14
15 quant_model_urls = {
16 "mobilenet_v3_large_qnnpack": "https://download.pytorch.org/models/quantized/mobilenet_v3_large_qnnpack-5bcacf28.pth",
17 }
18
19
20 class QuantizableSqueezeExcitation(SqueezeExcitation):
21 _version = 2
22
23 def __init__(self, *args: Any, **kwargs: Any) -> None:
24 kwargs["scale_activation"] = nn.Hardsigmoid
25 super().__init__(*args, **kwargs)
26 self.skip_mul = nn.quantized.FloatFunctional()
27
28 def forward(self, input: Tensor) -> Tensor:
29 return self.skip_mul.mul(self._scale(input), input)
30
31 def fuse_model(self) -> None:
32 fuse_modules(self, ["fc1", "activation"], inplace=True)
33
34 def _load_from_state_dict(
35 self,
36 state_dict,
37 prefix,
38 local_metadata,
39 strict,
40 missing_keys,
41 unexpected_keys,
42 error_msgs,
43 ):
44 version = local_metadata.get("version", None)
45
46 if version is None or version < 2:
47 default_state_dict = {
48 "scale_activation.activation_post_process.scale": torch.tensor([1.0]),
49 "scale_activation.activation_post_process.zero_point": torch.tensor([0], dtype=torch.int32),
50 "scale_activation.activation_post_process.fake_quant_enabled": torch.tensor([1]),
51 "scale_activation.activation_post_process.observer_enabled": torch.tensor([1]),
52 }
53 for k, v in default_state_dict.items():
54 full_key = prefix + k
55 if full_key not in state_dict:
56 state_dict[full_key] = v
57
58 super()._load_from_state_dict(
59 state_dict,
60 prefix,
61 local_metadata,
62 strict,
63 missing_keys,
64 unexpected_keys,
65 error_msgs,
66 )
67
68
69 class QuantizableInvertedResidual(InvertedResidual):
70 # TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659
71 def __init__(self, *args: Any, **kwargs: Any) -> None:
72 super().__init__(se_layer=QuantizableSqueezeExcitation, *args, **kwargs) # type: ignore[misc]
73 self.skip_add = nn.quantized.FloatFunctional()
74
75 def forward(self, x: Tensor) -> Tensor:
76 if self.use_res_connect:
77 return self.skip_add.add(x, self.block(x))
78 else:
79 return self.block(x)
80
81
82 class QuantizableMobileNetV3(MobileNetV3):
83 def __init__(self, *args: Any, **kwargs: Any) -> None:
84 """
85 MobileNet V3 main class
86
87 Args:
88 Inherits args from floating point MobileNetV3
89 """
90 super().__init__(*args, **kwargs)
91 self.quant = QuantStub()
92 self.dequant = DeQuantStub()
93
94 def forward(self, x: Tensor) -> Tensor:
95 x = self.quant(x)
96 x = self._forward_impl(x)
97 x = self.dequant(x)
98 return x
99
100 def fuse_model(self) -> None:
101 for m in self.modules():
102 if type(m) is ConvNormActivation:
103 modules_to_fuse = ["0", "1"]
104 if len(m) == 3 and type(m[2]) is nn.ReLU:
105 modules_to_fuse.append("2")
106 fuse_modules(m, modules_to_fuse, inplace=True)
107 elif type(m) is QuantizableSqueezeExcitation:
108 m.fuse_model()
109
110
111 def _load_weights(arch: str, model: QuantizableMobileNetV3, model_url: Optional[str], progress: bool) -> None:
112 if model_url is None:
113 raise ValueError(f"No checkpoint is available for {arch}")
114 state_dict = load_state_dict_from_url(model_url, progress=progress)
115 model.load_state_dict(state_dict)
116
117
118 def _mobilenet_v3_model(
119 arch: str,
120 inverted_residual_setting: List[InvertedResidualConfig],
121 last_channel: int,
122 pretrained: bool,
123 progress: bool,
124 quantize: bool,
125 **kwargs: Any,
126 ) -> QuantizableMobileNetV3:
127
128 model = QuantizableMobileNetV3(inverted_residual_setting, last_channel, block=QuantizableInvertedResidual, **kwargs)
129 _replace_relu(model)
130
131 if quantize:
132 backend = "qnnpack"
133
134 model.fuse_model()
135 model.qconfig = torch.quantization.get_default_qat_qconfig(backend)
136 torch.quantization.prepare_qat(model, inplace=True)
137
138 if pretrained:
139 _load_weights(arch, model, quant_model_urls.get(arch + "_" + backend, None), progress)
140
141 torch.quantization.convert(model, inplace=True)
142 model.eval()
143 else:
144 if pretrained:
145 _load_weights(arch, model, model_urls.get(arch, None), progress)
146
147 return model
148
149
150 def mobilenet_v3_large(
151 pretrained: bool = False,
152 progress: bool = True,
153 quantize: bool = False,
154 **kwargs: Any,
155 ) -> QuantizableMobileNetV3:
156 """
157 Constructs a MobileNetV3 Large architecture from
158 `"Searching for MobileNetV3" <https://arxiv.org/abs/1905.02244>`_.
159
160 Note that quantize = True returns a quantized model with 8 bit
161 weights. Quantized models only support inference and run on CPUs.
162 GPU inference is not yet supported
163
164 Args:
165 pretrained (bool): If True, returns a model pre-trained on ImageNet.
166 progress (bool): If True, displays a progress bar of the download to stderr
167 quantize (bool): If True, returns a quantized model, else returns a float model
168 """
169 arch = "mobilenet_v3_large"
170 inverted_residual_setting, last_channel = _mobilenet_v3_conf(arch, **kwargs)
171 return _mobilenet_v3_model(arch, inverted_residual_setting, last_channel, pretrained, progress, quantize, **kwargs)
172
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/torchvision/models/quantization/mobilenetv3.py b/torchvision/models/quantization/mobilenetv3.py
--- a/torchvision/models/quantization/mobilenetv3.py
+++ b/torchvision/models/quantization/mobilenetv3.py
@@ -43,7 +43,7 @@
):
version = local_metadata.get("version", None)
- if version is None or version < 2:
+ if hasattr(self, "qconfig") and (version is None or version < 2):
default_state_dict = {
"scale_activation.activation_post_process.scale": torch.tensor([1.0]),
"scale_activation.activation_post_process.zero_point": torch.tensor([0], dtype=torch.int32),
| {"golden_diff": "diff --git a/torchvision/models/quantization/mobilenetv3.py b/torchvision/models/quantization/mobilenetv3.py\n--- a/torchvision/models/quantization/mobilenetv3.py\n+++ b/torchvision/models/quantization/mobilenetv3.py\n@@ -43,7 +43,7 @@\n ):\n version = local_metadata.get(\"version\", None)\n \n- if version is None or version < 2:\n+ if hasattr(self, \"qconfig\") and (version is None or version < 2):\n default_state_dict = {\n \"scale_activation.activation_post_process.scale\": torch.tensor([1.0]),\n \"scale_activation.activation_post_process.zero_point\": torch.tensor([0], dtype=torch.int32),\n", "issue": "QuantizableMobileNetV3 Can not load pretrained model\n### \ud83d\udc1b Describe the bug\n\n```python\r\n\r\nimport torchvision\r\nquantized = torchvision.models.quantization.mobilenet_v3_large(pretrained=True)\r\n```\r\nIt will occur\r\n```\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"/opt/conda/lib/python3.8/site-packages/torchvision/models/quantization/mobilenetv3.py\", line 180, in mobilenet_v3_large\r\n return _mobilenet_v3_model(arch, inverted_residual_setting, last_channel, pretrained, progress, quantize, **kwargs)\r\n File \"/opt/conda/lib/python3.8/site-packages/torchvision/models/quantization/mobilenetv3.py\", line 154, in _mobilenet_v3_model\r\n _load_weights(arch, model, model_urls.get(arch, None), progress)\r\n File \"/opt/conda/lib/python3.8/site-packages/torchvision/models/quantization/mobilenetv3.py\", line 124, in _load_weights\r\n model.load_state_dict(state_dict)\r\n File \"/opt/conda/lib/python3.8/site-packages/torch/nn/modules/module.py\", line 1482, in load_state_dict\r\n raise RuntimeError('Error(s) in loading state_dict for {}:\\n\\t{}'.format(\r\nRuntimeError: Error(s) in loading state_dict for QuantizableMobileNetV3:\r\n\tUnexpected key(s) in state_dict: \"features.4.block.2.scale_activation.activation_post_process.scale\", \"features.4.block.2.scale_activation.activation_post_process.zero_point\", \"features.4.block.2.scale_activation.activation_post_process.fake_quant_enabled\", \"features.4.block.2.scale_activation.activation_post_process.observer_enabled\", \"features.5.block.2.scale_activation.activation_post_process.scale\", \"features.5.block.2.scale_activation.activation_post_process.zero_point\", \"features.5.block.2.scale_activation.activation_post_process.fake_quant_enabled\", \"features.5.block.2.scale_activation.activation_post_process.observer_enabled\", \"features.6.block.2.scale_activation.activation_post_process.scale\", \"features.6.block.2.scale_activation.activation_post_process.zero_point\", \"features.6.block.2.scale_activation.activation_post_process.fake_quant_enabled\", \"features.6.block.2.scale_activation.activation_post_process.observer_enabled\", \"features.11.block.2.scale_activation.activation_post_process.scale\", \"features.11.block.2.scale_activation.activation_post_process.zero_point\", \"features.11.block.2.scale_activation.activation_post_process.fake_quant_enabled\", \"features.11.block.2.scale_activation.activation_post_process.observer_enabled\", \"features.12.block.2.scale_activation.activation_post_process.scale\", \"features.12.block.2.scale_activation.activation_post_process.zero_point\", \"features.12.block.2.scale_activation.activation_post_process.fake_quant_enabled\", \"features.12.block.2.scale_activation.activation_post_process.observer_enabled\", \"features.13.block.2.scale_activation.activation_post_process.scale\", \"features.13.block.2.scale_activation.activation_post_process.zero_point\", \"features.13.block.2.scale_activation.activation_post_process.fake_quant_enabled\", \"features.13.block.2.scale_activation.activation_post_process.observer_enabled\", \"features.14.block.2.scale_activation.activation_post_process.scale\", \"features.14.block.2.scale_activation.activation_post_process.zero_point\", \"features.14.block.2.scale_activation.activation_post_process.fake_quant_enabled\", \"features.14.block.2.scale_activation.activation_post_process.observer_enabled\", \"features.15.block.2.scale_activation.activation_post_process.scale\", \"features.15.block.2.scale_activation.activation_post_process.zero_point\", \"features.15.block.2.scale_activation.activation_post_process.fake_quant_enabled\", \"features.15.block.2.scale_activation.activation_post_process.observer_enabled\".\r\n```\n\n### Versions\n\nPyTorch version: 1.10.0+cu113\r\nIs debug build: False\r\nCUDA used to build PyTorch: 11.3\r\nROCM used to build PyTorch: N/A\r\n\r\nOS: Ubuntu 20.04.1 LTS (x86_64)\r\nGCC version: (Ubuntu 9.3.0-17ubuntu1~20.04) 9.3.0\r\nClang version: Could not collect\r\nCMake version: version 3.19.4\r\nLibc version: glibc-2.31\r\n\r\nPython version: 3.8.5 (default, Sep 4 2020, 07:30:14) [GCC 7.3.0] (64-bit runtime)\r\nPython platform: Linux-5.4.0-90-generic-x86_64-with-glibc2.10\r\nIs CUDA available: True\r\nCUDA runtime version: 11.2.67\r\nGPU models and configuration: GPU 0: NVIDIA GeForce RTX 3090\r\nNvidia driver version: 470.82.00\r\ncuDNN version: Probably one of the following:\r\n/usr/lib/x86_64-linux-gnu/libcudnn.so.8.1.0\r\n/usr/lib/x86_64-linux-gnu/libcudnn_adv_infer.so.8.1.0\r\n/usr/lib/x86_64-linux-gnu/libcudnn_adv_train.so.8.1.0\r\n/usr/lib/x86_64-linux-gnu/libcudnn_cnn_infer.so.8.1.0\r\n/usr/lib/x86_64-linux-gnu/libcudnn_cnn_train.so.8.1.0\r\n/usr/lib/x86_64-linux-gnu/libcudnn_ops_infer.so.8.1.0\r\n/usr/lib/x86_64-linux-gnu/libcudnn_ops_train.so.8.1.0\r\nHIP runtime version: N/A\r\nMIOpen runtime version: N/A\r\n\r\nVersions of relevant libraries:\r\n[pip3] numpy==1.19.2\r\n[pip3] pytorch-transformers==1.1.0\r\n[pip3] torch==1.10.0+cu113\r\n[pip3] torchaudio==0.10.0+cu113\r\n[pip3] torchtext==0.11.0\r\n[pip3] torchvision==0.11.1+cu113\r\n[conda] magma-cuda110 2.5.2 5 local\r\n[conda] mkl 2019.4 243\r\n[conda] mkl-include 2019.4 243\r\n[conda] nomkl 3.0 0\r\n[conda] numpy 1.19.2 py38h6163131_0\r\n[conda] numpy-base 1.19.2 py38h75fe3a5_0\r\n[conda] pytorch-transformers 1.1.0 pypi_0 pypi\r\n[conda] torch 1.10.0+cu113 pypi_0 pypi\r\n[conda] torchaudio 0.10.0+cu113 pypi_0 pypi\r\n[conda] torchtext 0.11.0 pypi_0 pypi\r\n[conda] torchvision 0.11.1+cu113 pypi_0 pypi\n", "before_files": [{"content": "from typing import Any, List, Optional\n\nimport torch\nfrom torch import nn, Tensor\nfrom torch.quantization import QuantStub, DeQuantStub, fuse_modules\n\nfrom ..._internally_replaced_utils import load_state_dict_from_url\nfrom ...ops.misc import ConvNormActivation, SqueezeExcitation\nfrom ..mobilenetv3 import InvertedResidual, InvertedResidualConfig, MobileNetV3, model_urls, _mobilenet_v3_conf\nfrom .utils import _replace_relu\n\n\n__all__ = [\"QuantizableMobileNetV3\", \"mobilenet_v3_large\"]\n\nquant_model_urls = {\n \"mobilenet_v3_large_qnnpack\": \"https://download.pytorch.org/models/quantized/mobilenet_v3_large_qnnpack-5bcacf28.pth\",\n}\n\n\nclass QuantizableSqueezeExcitation(SqueezeExcitation):\n _version = 2\n\n def __init__(self, *args: Any, **kwargs: Any) -> None:\n kwargs[\"scale_activation\"] = nn.Hardsigmoid\n super().__init__(*args, **kwargs)\n self.skip_mul = nn.quantized.FloatFunctional()\n\n def forward(self, input: Tensor) -> Tensor:\n return self.skip_mul.mul(self._scale(input), input)\n\n def fuse_model(self) -> None:\n fuse_modules(self, [\"fc1\", \"activation\"], inplace=True)\n\n def _load_from_state_dict(\n self,\n state_dict,\n prefix,\n local_metadata,\n strict,\n missing_keys,\n unexpected_keys,\n error_msgs,\n ):\n version = local_metadata.get(\"version\", None)\n\n if version is None or version < 2:\n default_state_dict = {\n \"scale_activation.activation_post_process.scale\": torch.tensor([1.0]),\n \"scale_activation.activation_post_process.zero_point\": torch.tensor([0], dtype=torch.int32),\n \"scale_activation.activation_post_process.fake_quant_enabled\": torch.tensor([1]),\n \"scale_activation.activation_post_process.observer_enabled\": torch.tensor([1]),\n }\n for k, v in default_state_dict.items():\n full_key = prefix + k\n if full_key not in state_dict:\n state_dict[full_key] = v\n\n super()._load_from_state_dict(\n state_dict,\n prefix,\n local_metadata,\n strict,\n missing_keys,\n unexpected_keys,\n error_msgs,\n )\n\n\nclass QuantizableInvertedResidual(InvertedResidual):\n # TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659\n def __init__(self, *args: Any, **kwargs: Any) -> None:\n super().__init__(se_layer=QuantizableSqueezeExcitation, *args, **kwargs) # type: ignore[misc]\n self.skip_add = nn.quantized.FloatFunctional()\n\n def forward(self, x: Tensor) -> Tensor:\n if self.use_res_connect:\n return self.skip_add.add(x, self.block(x))\n else:\n return self.block(x)\n\n\nclass QuantizableMobileNetV3(MobileNetV3):\n def __init__(self, *args: Any, **kwargs: Any) -> None:\n \"\"\"\n MobileNet V3 main class\n\n Args:\n Inherits args from floating point MobileNetV3\n \"\"\"\n super().__init__(*args, **kwargs)\n self.quant = QuantStub()\n self.dequant = DeQuantStub()\n\n def forward(self, x: Tensor) -> Tensor:\n x = self.quant(x)\n x = self._forward_impl(x)\n x = self.dequant(x)\n return x\n\n def fuse_model(self) -> None:\n for m in self.modules():\n if type(m) is ConvNormActivation:\n modules_to_fuse = [\"0\", \"1\"]\n if len(m) == 3 and type(m[2]) is nn.ReLU:\n modules_to_fuse.append(\"2\")\n fuse_modules(m, modules_to_fuse, inplace=True)\n elif type(m) is QuantizableSqueezeExcitation:\n m.fuse_model()\n\n\ndef _load_weights(arch: str, model: QuantizableMobileNetV3, model_url: Optional[str], progress: bool) -> None:\n if model_url is None:\n raise ValueError(f\"No checkpoint is available for {arch}\")\n state_dict = load_state_dict_from_url(model_url, progress=progress)\n model.load_state_dict(state_dict)\n\n\ndef _mobilenet_v3_model(\n arch: str,\n inverted_residual_setting: List[InvertedResidualConfig],\n last_channel: int,\n pretrained: bool,\n progress: bool,\n quantize: bool,\n **kwargs: Any,\n) -> QuantizableMobileNetV3:\n\n model = QuantizableMobileNetV3(inverted_residual_setting, last_channel, block=QuantizableInvertedResidual, **kwargs)\n _replace_relu(model)\n\n if quantize:\n backend = \"qnnpack\"\n\n model.fuse_model()\n model.qconfig = torch.quantization.get_default_qat_qconfig(backend)\n torch.quantization.prepare_qat(model, inplace=True)\n\n if pretrained:\n _load_weights(arch, model, quant_model_urls.get(arch + \"_\" + backend, None), progress)\n\n torch.quantization.convert(model, inplace=True)\n model.eval()\n else:\n if pretrained:\n _load_weights(arch, model, model_urls.get(arch, None), progress)\n\n return model\n\n\ndef mobilenet_v3_large(\n pretrained: bool = False,\n progress: bool = True,\n quantize: bool = False,\n **kwargs: Any,\n) -> QuantizableMobileNetV3:\n \"\"\"\n Constructs a MobileNetV3 Large architecture from\n `\"Searching for MobileNetV3\" <https://arxiv.org/abs/1905.02244>`_.\n\n Note that quantize = True returns a quantized model with 8 bit\n weights. Quantized models only support inference and run on CPUs.\n GPU inference is not yet supported\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet.\n progress (bool): If True, displays a progress bar of the download to stderr\n quantize (bool): If True, returns a quantized model, else returns a float model\n \"\"\"\n arch = \"mobilenet_v3_large\"\n inverted_residual_setting, last_channel = _mobilenet_v3_conf(arch, **kwargs)\n return _mobilenet_v3_model(arch, inverted_residual_setting, last_channel, pretrained, progress, quantize, **kwargs)\n", "path": "torchvision/models/quantization/mobilenetv3.py"}], "after_files": [{"content": "from typing import Any, List, Optional\n\nimport torch\nfrom torch import nn, Tensor\nfrom torch.quantization import QuantStub, DeQuantStub, fuse_modules\n\nfrom ..._internally_replaced_utils import load_state_dict_from_url\nfrom ...ops.misc import ConvNormActivation, SqueezeExcitation\nfrom ..mobilenetv3 import InvertedResidual, InvertedResidualConfig, MobileNetV3, model_urls, _mobilenet_v3_conf\nfrom .utils import _replace_relu\n\n\n__all__ = [\"QuantizableMobileNetV3\", \"mobilenet_v3_large\"]\n\nquant_model_urls = {\n \"mobilenet_v3_large_qnnpack\": \"https://download.pytorch.org/models/quantized/mobilenet_v3_large_qnnpack-5bcacf28.pth\",\n}\n\n\nclass QuantizableSqueezeExcitation(SqueezeExcitation):\n _version = 2\n\n def __init__(self, *args: Any, **kwargs: Any) -> None:\n kwargs[\"scale_activation\"] = nn.Hardsigmoid\n super().__init__(*args, **kwargs)\n self.skip_mul = nn.quantized.FloatFunctional()\n\n def forward(self, input: Tensor) -> Tensor:\n return self.skip_mul.mul(self._scale(input), input)\n\n def fuse_model(self) -> None:\n fuse_modules(self, [\"fc1\", \"activation\"], inplace=True)\n\n def _load_from_state_dict(\n self,\n state_dict,\n prefix,\n local_metadata,\n strict,\n missing_keys,\n unexpected_keys,\n error_msgs,\n ):\n version = local_metadata.get(\"version\", None)\n\n if hasattr(self, \"qconfig\") and (version is None or version < 2):\n default_state_dict = {\n \"scale_activation.activation_post_process.scale\": torch.tensor([1.0]),\n \"scale_activation.activation_post_process.zero_point\": torch.tensor([0], dtype=torch.int32),\n \"scale_activation.activation_post_process.fake_quant_enabled\": torch.tensor([1]),\n \"scale_activation.activation_post_process.observer_enabled\": torch.tensor([1]),\n }\n for k, v in default_state_dict.items():\n full_key = prefix + k\n if full_key not in state_dict:\n state_dict[full_key] = v\n\n super()._load_from_state_dict(\n state_dict,\n prefix,\n local_metadata,\n strict,\n missing_keys,\n unexpected_keys,\n error_msgs,\n )\n\n\nclass QuantizableInvertedResidual(InvertedResidual):\n # TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659\n def __init__(self, *args: Any, **kwargs: Any) -> None:\n super().__init__(se_layer=QuantizableSqueezeExcitation, *args, **kwargs) # type: ignore[misc]\n self.skip_add = nn.quantized.FloatFunctional()\n\n def forward(self, x: Tensor) -> Tensor:\n if self.use_res_connect:\n return self.skip_add.add(x, self.block(x))\n else:\n return self.block(x)\n\n\nclass QuantizableMobileNetV3(MobileNetV3):\n def __init__(self, *args: Any, **kwargs: Any) -> None:\n \"\"\"\n MobileNet V3 main class\n\n Args:\n Inherits args from floating point MobileNetV3\n \"\"\"\n super().__init__(*args, **kwargs)\n self.quant = QuantStub()\n self.dequant = DeQuantStub()\n\n def forward(self, x: Tensor) -> Tensor:\n x = self.quant(x)\n x = self._forward_impl(x)\n x = self.dequant(x)\n return x\n\n def fuse_model(self) -> None:\n for m in self.modules():\n if type(m) is ConvNormActivation:\n modules_to_fuse = [\"0\", \"1\"]\n if len(m) == 3 and type(m[2]) is nn.ReLU:\n modules_to_fuse.append(\"2\")\n fuse_modules(m, modules_to_fuse, inplace=True)\n elif type(m) is QuantizableSqueezeExcitation:\n m.fuse_model()\n\n\ndef _load_weights(arch: str, model: QuantizableMobileNetV3, model_url: Optional[str], progress: bool) -> None:\n if model_url is None:\n raise ValueError(f\"No checkpoint is available for {arch}\")\n state_dict = load_state_dict_from_url(model_url, progress=progress)\n model.load_state_dict(state_dict)\n\n\ndef _mobilenet_v3_model(\n arch: str,\n inverted_residual_setting: List[InvertedResidualConfig],\n last_channel: int,\n pretrained: bool,\n progress: bool,\n quantize: bool,\n **kwargs: Any,\n) -> QuantizableMobileNetV3:\n\n model = QuantizableMobileNetV3(inverted_residual_setting, last_channel, block=QuantizableInvertedResidual, **kwargs)\n _replace_relu(model)\n\n if quantize:\n backend = \"qnnpack\"\n\n model.fuse_model()\n model.qconfig = torch.quantization.get_default_qat_qconfig(backend)\n torch.quantization.prepare_qat(model, inplace=True)\n\n if pretrained:\n _load_weights(arch, model, quant_model_urls.get(arch + \"_\" + backend, None), progress)\n\n torch.quantization.convert(model, inplace=True)\n model.eval()\n else:\n if pretrained:\n _load_weights(arch, model, model_urls.get(arch, None), progress)\n\n return model\n\n\ndef mobilenet_v3_large(\n pretrained: bool = False,\n progress: bool = True,\n quantize: bool = False,\n **kwargs: Any,\n) -> QuantizableMobileNetV3:\n \"\"\"\n Constructs a MobileNetV3 Large architecture from\n `\"Searching for MobileNetV3\" <https://arxiv.org/abs/1905.02244>`_.\n\n Note that quantize = True returns a quantized model with 8 bit\n weights. Quantized models only support inference and run on CPUs.\n GPU inference is not yet supported\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet.\n progress (bool): If True, displays a progress bar of the download to stderr\n quantize (bool): If True, returns a quantized model, else returns a float model\n \"\"\"\n arch = \"mobilenet_v3_large\"\n inverted_residual_setting, last_channel = _mobilenet_v3_conf(arch, **kwargs)\n return _mobilenet_v3_model(arch, inverted_residual_setting, last_channel, pretrained, progress, quantize, **kwargs)\n", "path": "torchvision/models/quantization/mobilenetv3.py"}]} | 3,790 | 170 |
gh_patches_debug_5470 | rasdani/github-patches | git_diff | Gallopsled__pwntools-1242 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
AttributeError: 'IntervalTree' object has no attribute 'search'
Running the latest version of pwntools from pip and python2:
```
In [1]: from pwn import *
In [2]: elf = ELF("./age_calc")
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-2-f9356b282eac> in <module>()
----> 1 elf = ELF("./age_calc")
/home/r2/formatStringExploiter/local/lib/python2.7/site-packages/pwnlib/elf/elf.pyc in __init__(self, path, checksec)
321
322 #: Path to the linker for the ELF
--> 323 self.linker = self.read(seg.header.p_vaddr, seg.header.p_memsz)
324 self.linker = self.linker.rstrip('\x00')
325
/home/r2/formatStringExploiter/local/lib/python2.7/site-packages/pwnlib/elf/elf.pyc in read(self, address, count)
1129 stop = address + count
1130
-> 1131 overlap = self.memory.search(start, stop)
1132
1133 # Create a new view of memory, for just what we need
AttributeError: 'IntervalTree' object has no attribute 'search'
```
```
pip freeze
alabaster==0.7.12
asn1crypto==0.24.0
atomicwrites==1.2.1
attrs==18.2.0
Babel==2.6.0
backports.shutil-get-terminal-size==1.0.0
bcrypt==3.1.5
capstone==4.0.0
certifi==2018.11.29
cffi==1.11.5
chardet==3.0.4
CommonMark==0.5.4
cryptography==2.4.2
decorator==4.3.0
docutils==0.14
enum34==1.1.6
filelock==3.0.10
formatStringExploiter==0.1.4
funcsigs==1.0.2
idna==2.8
imagesize==1.1.0
intervaltree==3.0.2
ipaddress==1.0.22
ipython==5.8.0
ipython-genutils==0.2.0
Jinja2==2.10
Mako==1.0.7
MarkupSafe==1.1.0
more-itertools==4.3.0
packaging==18.0
paramiko==2.4.2
pathlib2==2.3.3
pexpect==4.6.0
pickleshare==0.7.5
pkg-resources==0.0.0
pluggy==0.8.0
pockets==0.7.2
prettytable==0.7.2
prompt-toolkit==1.0.15
psutil==5.4.8
ptyprocess==0.6.0
pwntools==3.14.0.dev0
py==1.7.0
pyasn1==0.4.4
pycparser==2.19
pyelftools==0.25
Pygments==2.3.1
PyNaCl==1.3.0
pyparsing==2.3.0
pyserial==3.4
PySocks==1.6.8
pytest==4.0.2
python-dateutil==2.7.5
pytz==2018.7
r2pipe==1.2.0
recommonmark==0.4.0
requests==2.21.0
ROPGadget==5.4
scandir==1.9.0
simplegeneric==0.8.1
six==1.12.0
snowballstemmer==1.2.1
sortedcontainers==1.5.10
Sphinx==1.8.2
sphinx-rtd-theme==0.4.2
sphinxcontrib-napoleon==0.7
sphinxcontrib-websupport==1.1.0
toml==0.10.0
tox==3.6.0
traitlets==4.3.2
typing==3.6.6
unicorn==1.0.1
urllib3==1.24.1
virtualenv==16.1.0
wcwidth==0.1.7
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python2
2 import glob
3 import os
4 import platform
5 import sys
6 import traceback
7 from distutils.command.install import INSTALL_SCHEMES
8 from distutils.sysconfig import get_python_inc
9 from distutils.util import convert_path
10
11 from setuptools import find_packages
12 from setuptools import setup
13
14 # Get all template files
15 templates = []
16 for dirpath, dirnames, filenames in os.walk(convert_path('pwnlib/shellcraft/templates'), followlinks=True):
17 for f in filenames:
18 templates.append(os.path.relpath(os.path.join(dirpath, f), 'pwnlib'))
19
20 # This makes pwntools-LICENSE.txt appear with the package folders
21 for scheme in INSTALL_SCHEMES.values():
22 scheme['data'] = scheme['purelib']
23
24 console_scripts = ['pwn=pwnlib.commandline.main:main']
25
26 # Find all of the ancillary console scripts
27 # We have a magic flag --include-all-scripts
28 flag = '--only-use-pwn-command'
29 if flag in sys.argv:
30 sys.argv.remove(flag)
31 else:
32 flag = False
33
34 for filename in glob.glob('pwnlib/commandline/*'):
35 filename = os.path.basename(filename)
36 filename, ext = os.path.splitext(filename)
37
38 if ext != '.py' or '__init__' in filename:
39 continue
40
41 script = '%s=pwnlib.commandline.common:main' % filename
42 if not flag:
43 console_scripts.append(script)
44
45 install_requires = ['paramiko>=1.15.2',
46 'mako>=1.0.0',
47 'pyelftools>=0.2.4',
48 'capstone>=3.0.5rc2', # See Gallopsled/pwntools#971, Gallopsled/pwntools#1160
49 'ropgadget>=5.3',
50 'pyserial>=2.7',
51 'requests>=2.0',
52 'pip>=6.0.8',
53 'tox>=1.8.1',
54 'pygments>=2.0',
55 'pysocks',
56 'python-dateutil',
57 'pypandoc',
58 'packaging',
59 'psutil>=3.3.0',
60 'intervaltree',
61 'sortedcontainers<2.0', # See Gallopsled/pwntools#1154
62 'unicorn']
63
64 # Check that the user has installed the Python development headers
65 PythonH = os.path.join(get_python_inc(), 'Python.h')
66 if not os.path.exists(PythonH):
67 print >> sys.stderr, "You must install the Python development headers!"
68 print >> sys.stderr, "$ apt-get install python-dev"
69 sys.exit(-1)
70
71 # Convert README.md to reStructuredText for PyPI
72 long_description = ''
73 try:
74 import pypandoc
75 try:
76 pypandoc.get_pandoc_path()
77 except OSError:
78 pypandoc.download_pandoc()
79 long_description = pypandoc.convert_file('README.md', 'rst')
80 except ImportError:
81 pass
82 except Exception as e:
83 print >>sys.stderr, "Failed to convert README.md through pandoc, proceeding anyway"
84 traceback.print_exc()
85
86
87 setup(
88 name = 'pwntools',
89 packages = find_packages(),
90 version = '3.12.1',
91 data_files = [('',
92 glob.glob('*.md') + glob.glob('*.txt')),
93 ],
94 package_data = {
95 'pwnlib': [
96 'data/crcsums.txt',
97 'data/useragents/useragents.txt',
98 'data/binutils/*',
99 'data/includes/*.h',
100 'data/includes/*/*.h',
101 'data/templates/*.mako',
102 ] + templates,
103 },
104 entry_points = {'console_scripts': console_scripts},
105 scripts = glob.glob("bin/*"),
106 description = "Pwntools CTF framework and exploit development library.",
107 long_description = long_description,
108 author = "Gallopsled et al.",
109 author_email = "[email protected]",
110 url = 'https://pwntools.com',
111 download_url = "https://github.com/Gallopsled/pwntools/releases",
112 install_requires = install_requires,
113 license = "Mostly MIT, some GPL/BSD, see LICENSE-pwntools.txt",
114 keywords = 'pwntools exploit ctf capture the flag binary wargame overflow stack heap defcon',
115 classifiers = [
116 'Development Status :: 5 - Production/Stable',
117 'Environment :: Console',
118 'Intended Audience :: Developers',
119 'Intended Audience :: Science/Research',
120 'Intended Audience :: System Administrators',
121 'License :: OSI Approved :: MIT License',
122 'Natural Language :: English',
123 'Operating System :: POSIX :: Linux',
124 'Programming Language :: Python :: 2.7',
125 'Topic :: Security',
126 'Topic :: Software Development :: Assemblers',
127 'Topic :: Software Development :: Debuggers',
128 'Topic :: Software Development :: Disassemblers',
129 'Topic :: Software Development :: Embedded Systems',
130 'Topic :: Software Development :: Libraries :: Python Modules',
131 'Topic :: System :: System Shells',
132 'Topic :: Utilities',
133 ]
134 )
135
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -57,7 +57,7 @@
'pypandoc',
'packaging',
'psutil>=3.3.0',
- 'intervaltree',
+ 'intervaltree<3.0', # See Gallopsled/pwntools#1238
'sortedcontainers<2.0', # See Gallopsled/pwntools#1154
'unicorn']
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -57,7 +57,7 @@\n 'pypandoc',\n 'packaging',\n 'psutil>=3.3.0',\n- 'intervaltree',\n+ 'intervaltree<3.0', # See Gallopsled/pwntools#1238\n 'sortedcontainers<2.0', # See Gallopsled/pwntools#1154\n 'unicorn']\n", "issue": "AttributeError: 'IntervalTree' object has no attribute 'search'\nRunning the latest version of pwntools from pip and python2:\r\n\r\n```\r\nIn [1]: from pwn import *\r\n\r\nIn [2]: elf = ELF(\"./age_calc\")\r\n---------------------------------------------------------------------------\r\nAttributeError Traceback (most recent call last)\r\n<ipython-input-2-f9356b282eac> in <module>()\r\n----> 1 elf = ELF(\"./age_calc\")\r\n\r\n/home/r2/formatStringExploiter/local/lib/python2.7/site-packages/pwnlib/elf/elf.pyc in __init__(self, path, checksec)\r\n 321\r\n 322 #: Path to the linker for the ELF\r\n--> 323 self.linker = self.read(seg.header.p_vaddr, seg.header.p_memsz)\r\n 324 self.linker = self.linker.rstrip('\\x00')\r\n 325\r\n\r\n/home/r2/formatStringExploiter/local/lib/python2.7/site-packages/pwnlib/elf/elf.pyc in read(self, address, count)\r\n 1129 stop = address + count\r\n 1130\r\n-> 1131 overlap = self.memory.search(start, stop)\r\n 1132\r\n 1133 # Create a new view of memory, for just what we need\r\n\r\nAttributeError: 'IntervalTree' object has no attribute 'search'\r\n```\r\n\r\n```\r\npip freeze\r\nalabaster==0.7.12\r\nasn1crypto==0.24.0\r\natomicwrites==1.2.1\r\nattrs==18.2.0\r\nBabel==2.6.0\r\nbackports.shutil-get-terminal-size==1.0.0\r\nbcrypt==3.1.5\r\ncapstone==4.0.0\r\ncertifi==2018.11.29\r\ncffi==1.11.5\r\nchardet==3.0.4\r\nCommonMark==0.5.4\r\ncryptography==2.4.2\r\ndecorator==4.3.0\r\ndocutils==0.14\r\nenum34==1.1.6\r\nfilelock==3.0.10\r\nformatStringExploiter==0.1.4\r\nfuncsigs==1.0.2\r\nidna==2.8\r\nimagesize==1.1.0\r\nintervaltree==3.0.2\r\nipaddress==1.0.22\r\nipython==5.8.0\r\nipython-genutils==0.2.0\r\nJinja2==2.10\r\nMako==1.0.7\r\nMarkupSafe==1.1.0\r\nmore-itertools==4.3.0\r\npackaging==18.0\r\nparamiko==2.4.2\r\npathlib2==2.3.3\r\npexpect==4.6.0\r\npickleshare==0.7.5\r\npkg-resources==0.0.0\r\npluggy==0.8.0\r\npockets==0.7.2\r\nprettytable==0.7.2\r\nprompt-toolkit==1.0.15\r\npsutil==5.4.8\r\nptyprocess==0.6.0\r\npwntools==3.14.0.dev0\r\npy==1.7.0\r\npyasn1==0.4.4\r\npycparser==2.19\r\npyelftools==0.25\r\nPygments==2.3.1\r\nPyNaCl==1.3.0\r\npyparsing==2.3.0\r\npyserial==3.4\r\nPySocks==1.6.8\r\npytest==4.0.2\r\npython-dateutil==2.7.5\r\npytz==2018.7\r\nr2pipe==1.2.0\r\nrecommonmark==0.4.0\r\nrequests==2.21.0\r\nROPGadget==5.4\r\nscandir==1.9.0\r\nsimplegeneric==0.8.1\r\nsix==1.12.0\r\nsnowballstemmer==1.2.1\r\nsortedcontainers==1.5.10\r\nSphinx==1.8.2\r\nsphinx-rtd-theme==0.4.2\r\nsphinxcontrib-napoleon==0.7\r\nsphinxcontrib-websupport==1.1.0\r\ntoml==0.10.0\r\ntox==3.6.0\r\ntraitlets==4.3.2\r\ntyping==3.6.6\r\nunicorn==1.0.1\r\nurllib3==1.24.1\r\nvirtualenv==16.1.0\r\nwcwidth==0.1.7\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python2\nimport glob\nimport os\nimport platform\nimport sys\nimport traceback\nfrom distutils.command.install import INSTALL_SCHEMES\nfrom distutils.sysconfig import get_python_inc\nfrom distutils.util import convert_path\n\nfrom setuptools import find_packages\nfrom setuptools import setup\n\n# Get all template files\ntemplates = []\nfor dirpath, dirnames, filenames in os.walk(convert_path('pwnlib/shellcraft/templates'), followlinks=True):\n for f in filenames:\n templates.append(os.path.relpath(os.path.join(dirpath, f), 'pwnlib'))\n\n# This makes pwntools-LICENSE.txt appear with the package folders\nfor scheme in INSTALL_SCHEMES.values():\n scheme['data'] = scheme['purelib']\n\nconsole_scripts = ['pwn=pwnlib.commandline.main:main']\n\n# Find all of the ancillary console scripts\n# We have a magic flag --include-all-scripts\nflag = '--only-use-pwn-command'\nif flag in sys.argv:\n sys.argv.remove(flag)\nelse:\n flag = False\n\nfor filename in glob.glob('pwnlib/commandline/*'):\n filename = os.path.basename(filename)\n filename, ext = os.path.splitext(filename)\n\n if ext != '.py' or '__init__' in filename:\n continue\n\n script = '%s=pwnlib.commandline.common:main' % filename\n if not flag:\n console_scripts.append(script)\n\ninstall_requires = ['paramiko>=1.15.2',\n 'mako>=1.0.0',\n 'pyelftools>=0.2.4',\n 'capstone>=3.0.5rc2', # See Gallopsled/pwntools#971, Gallopsled/pwntools#1160\n 'ropgadget>=5.3',\n 'pyserial>=2.7',\n 'requests>=2.0',\n 'pip>=6.0.8',\n 'tox>=1.8.1',\n 'pygments>=2.0',\n 'pysocks',\n 'python-dateutil',\n 'pypandoc',\n 'packaging',\n 'psutil>=3.3.0',\n 'intervaltree',\n 'sortedcontainers<2.0', # See Gallopsled/pwntools#1154\n 'unicorn']\n\n# Check that the user has installed the Python development headers\nPythonH = os.path.join(get_python_inc(), 'Python.h')\nif not os.path.exists(PythonH):\n print >> sys.stderr, \"You must install the Python development headers!\"\n print >> sys.stderr, \"$ apt-get install python-dev\"\n sys.exit(-1)\n\n# Convert README.md to reStructuredText for PyPI\nlong_description = ''\ntry:\n import pypandoc\n try:\n pypandoc.get_pandoc_path()\n except OSError:\n pypandoc.download_pandoc()\n long_description = pypandoc.convert_file('README.md', 'rst')\nexcept ImportError:\n pass\nexcept Exception as e:\n print >>sys.stderr, \"Failed to convert README.md through pandoc, proceeding anyway\"\n traceback.print_exc()\n\n\nsetup(\n name = 'pwntools',\n packages = find_packages(),\n version = '3.12.1',\n data_files = [('',\n glob.glob('*.md') + glob.glob('*.txt')),\n ],\n package_data = {\n 'pwnlib': [\n 'data/crcsums.txt',\n 'data/useragents/useragents.txt',\n 'data/binutils/*',\n 'data/includes/*.h',\n 'data/includes/*/*.h',\n 'data/templates/*.mako',\n ] + templates,\n },\n entry_points = {'console_scripts': console_scripts},\n scripts = glob.glob(\"bin/*\"),\n description = \"Pwntools CTF framework and exploit development library.\",\n long_description = long_description,\n author = \"Gallopsled et al.\",\n author_email = \"[email protected]\",\n url = 'https://pwntools.com',\n download_url = \"https://github.com/Gallopsled/pwntools/releases\",\n install_requires = install_requires,\n license = \"Mostly MIT, some GPL/BSD, see LICENSE-pwntools.txt\",\n keywords = 'pwntools exploit ctf capture the flag binary wargame overflow stack heap defcon',\n classifiers = [\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python :: 2.7',\n 'Topic :: Security',\n 'Topic :: Software Development :: Assemblers',\n 'Topic :: Software Development :: Debuggers',\n 'Topic :: Software Development :: Disassemblers',\n 'Topic :: Software Development :: Embedded Systems',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: System :: System Shells',\n 'Topic :: Utilities',\n ]\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python2\nimport glob\nimport os\nimport platform\nimport sys\nimport traceback\nfrom distutils.command.install import INSTALL_SCHEMES\nfrom distutils.sysconfig import get_python_inc\nfrom distutils.util import convert_path\n\nfrom setuptools import find_packages\nfrom setuptools import setup\n\n# Get all template files\ntemplates = []\nfor dirpath, dirnames, filenames in os.walk(convert_path('pwnlib/shellcraft/templates'), followlinks=True):\n for f in filenames:\n templates.append(os.path.relpath(os.path.join(dirpath, f), 'pwnlib'))\n\n# This makes pwntools-LICENSE.txt appear with the package folders\nfor scheme in INSTALL_SCHEMES.values():\n scheme['data'] = scheme['purelib']\n\nconsole_scripts = ['pwn=pwnlib.commandline.main:main']\n\n# Find all of the ancillary console scripts\n# We have a magic flag --include-all-scripts\nflag = '--only-use-pwn-command'\nif flag in sys.argv:\n sys.argv.remove(flag)\nelse:\n flag = False\n\nfor filename in glob.glob('pwnlib/commandline/*'):\n filename = os.path.basename(filename)\n filename, ext = os.path.splitext(filename)\n\n if ext != '.py' or '__init__' in filename:\n continue\n\n script = '%s=pwnlib.commandline.common:main' % filename\n if not flag:\n console_scripts.append(script)\n\ninstall_requires = ['paramiko>=1.15.2',\n 'mako>=1.0.0',\n 'pyelftools>=0.2.4',\n 'capstone>=3.0.5rc2', # See Gallopsled/pwntools#971, Gallopsled/pwntools#1160\n 'ropgadget>=5.3',\n 'pyserial>=2.7',\n 'requests>=2.0',\n 'pip>=6.0.8',\n 'tox>=1.8.1',\n 'pygments>=2.0',\n 'pysocks',\n 'python-dateutil',\n 'pypandoc',\n 'packaging',\n 'psutil>=3.3.0',\n 'intervaltree<3.0', # See Gallopsled/pwntools#1238\n 'sortedcontainers<2.0', # See Gallopsled/pwntools#1154\n 'unicorn']\n\n# Check that the user has installed the Python development headers\nPythonH = os.path.join(get_python_inc(), 'Python.h')\nif not os.path.exists(PythonH):\n print >> sys.stderr, \"You must install the Python development headers!\"\n print >> sys.stderr, \"$ apt-get install python-dev\"\n sys.exit(-1)\n\n# Convert README.md to reStructuredText for PyPI\nlong_description = ''\ntry:\n import pypandoc\n try:\n pypandoc.get_pandoc_path()\n except OSError:\n pypandoc.download_pandoc()\n long_description = pypandoc.convert_file('README.md', 'rst')\nexcept ImportError:\n pass\nexcept Exception as e:\n print >>sys.stderr, \"Failed to convert README.md through pandoc, proceeding anyway\"\n traceback.print_exc()\n\n\nsetup(\n name = 'pwntools',\n packages = find_packages(),\n version = '3.12.1',\n data_files = [('',\n glob.glob('*.md') + glob.glob('*.txt')),\n ],\n package_data = {\n 'pwnlib': [\n 'data/crcsums.txt',\n 'data/useragents/useragents.txt',\n 'data/binutils/*',\n 'data/includes/*.h',\n 'data/includes/*/*.h',\n 'data/templates/*.mako',\n ] + templates,\n },\n entry_points = {'console_scripts': console_scripts},\n scripts = glob.glob(\"bin/*\"),\n description = \"Pwntools CTF framework and exploit development library.\",\n long_description = long_description,\n author = \"Gallopsled et al.\",\n author_email = \"[email protected]\",\n url = 'https://pwntools.com',\n download_url = \"https://github.com/Gallopsled/pwntools/releases\",\n install_requires = install_requires,\n license = \"Mostly MIT, some GPL/BSD, see LICENSE-pwntools.txt\",\n keywords = 'pwntools exploit ctf capture the flag binary wargame overflow stack heap defcon',\n classifiers = [\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python :: 2.7',\n 'Topic :: Security',\n 'Topic :: Software Development :: Assemblers',\n 'Topic :: Software Development :: Debuggers',\n 'Topic :: Software Development :: Disassemblers',\n 'Topic :: Software Development :: Embedded Systems',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: System :: System Shells',\n 'Topic :: Utilities',\n ]\n)\n", "path": "setup.py"}]} | 2,744 | 118 |
gh_patches_debug_30966 | rasdani/github-patches | git_diff | aimhubio__aim-2000 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Facing issue while converting tensorboard logs to Aim
## 🐛 Bug
<!-- A clear and concise description of what the bug is. -->
Trying to convert tensorboard event log file to Aim Run, but getting below error,
One more question, do we have a way to sync tensorboard logs real-time, like while training is in-progress parallelly can we sync tensorboard logs? Currently it's cli command to sync once we have tensorboard logs in place.
Many thanks!
```
The lock file /mnt/c/sharath_mk/ubuntu/aim/.aim/.repo_lock is on a filesystem of type `drvfs` (device id: 14). Using soft file locks to avoid potential data corruption.
2022-07-25 15:21:57.067693: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libcudart.so.11.0'; dlerror: libcudart.so.11.0: cannot open shared object file: No such file or directory
2022-07-25 15:21:57.067771: I tensorflow/stream_executor/cuda/cudart_stub.cc:29] Ignore above cudart dlerror if you do not have a GPU set up on your machine.
Converting TensorBoard logs: 0%| | 0/1 [00:00<?, ?it/sWARNING:tensorflow:From /home/miniconda3/lib/python3.8/site-packages/tensorflow/python/summary/summary_iterator.py:27: tf_record_iterator (from tensorflow.python.lib.io.tf_record) is deprecated and will be removed in a future version.
Instructions for updating:
Use eager execution and:
`tf.data.TFRecordDataset(path)`
Parsing logs in /mnt/c/sharath_mk/ubuntu/aim/tensorboard/run_tb_sync/test_tb: 0%| | 0/2 [00:00<?, ?it/s]
Converting TensorBoard logs: 0%| | 0/1 [00:00<?, ?it/s]
Traceback (most recent call last):
File "/home/miniconda3/bin/aim", line 8, in <module>
sys.exit(cli_entry_point())
File "/home/miniconda3/lib/python3.8/site-packages/click/core.py", line 1128, in __call__
return self.main(*args, **kwargs)
File "/home/miniconda3/lib/python3.8/site-packages/click/core.py", line 1053, in main
rv = self.invoke(ctx)
File "/home/miniconda3/lib/python3.8/site-packages/click/core.py", line 1659, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/miniconda3/lib/python3.8/site-packages/click/core.py", line 1659, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/miniconda3/lib/python3.8/site-packages/click/core.py", line 1395, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/miniconda3/lib/python3.8/site-packages/click/core.py", line 754, in invoke
return __callback(*args, **kwargs)
File "/home/miniconda3/lib/python3.8/site-packages/click/decorators.py", line 26, in new_func
return f(get_current_context(), *args, **kwargs)
File "/home/miniconda3/lib/python3.8/site-packages/aim/cli/convert/commands.py", line 39, in convert_tensorboard
parse_tb_logs(logdir, repo_inst, flat, no_cache)
File "/home/miniconda3/lib/python3.8/site-packages/aim/cli/convert/processors/tensorboard.py", line 220, in parse_tb_logs
track_val = value.tensor.float_val[0]
IndexError: list index (0) out of range
```
### To reproduce
<!-- Reproduction steps. -->
Log tensorbord event log file
### Expected behavior
<!-- Fill in expected behavior. -->
### Environment
- Aim Version (e.g., 3.0.1)
- Python version
- pip version
- OS (e.g., Linux)
- Any other relevant information
### Additional context
<!-- Add any other context about the problem here. -->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `aim/cli/convert/processors/tensorboard.py`
Content:
```
1 import json
2 import os
3
4 import click
5 from tqdm import tqdm
6
7 from aim import Audio, Image, Run
8
9
10 def parse_tb_logs(tb_logs, repo_inst, flat=False, no_cache=False):
11 """
12 This function scans and collects records from TB log files.
13
14 Creates and uses cache file "tb_logs_cache" in the repo dir
15 to track previously processed files and values
16
17 For more info please refer to our integration guides.
18 """
19
20 try:
21 # This import statement takes long to complete
22 import tensorflow as tf
23 from tensorflow.python.summary.summary_iterator import summary_iterator
24 except ImportError:
25 click.echo(
26 'Could not process TensorBoard logs - failed to import tensorflow module.', err=True
27 )
28 return
29
30 supported_plugins = ('images', 'scalars')
31 unsupported_plugin_noticed = False
32 tb_logs_cache_path = os.path.join(repo_inst.path, 'tb_logs_cache')
33
34 if no_cache and os.path.exists(tb_logs_cache_path):
35 os.remove(tb_logs_cache_path)
36 try:
37 with open(tb_logs_cache_path) as FS:
38 tb_logs_cache = json.load(FS)
39 except Exception:
40 tb_logs_cache = {}
41
42 def get_parent(current_path, level=0):
43 # level 0 is the direct parent directory
44 if level <= 0:
45 return os.path.dirname(current_path)
46 elif current_path in ('', '.', '/'):
47 return current_path
48 return get_parent(os.path.dirname(current_path), level - 1)
49
50 tb_logs = os.path.abspath(tb_logs)
51 run_dir_candidates = set()
52 for root, dirs, files in os.walk(tb_logs):
53 for file in files:
54 if not file.startswith('events.out.tfevents'):
55 continue
56
57 file_path = os.path.abspath(os.path.join(root, file))
58 run_dir = get_parent(file_path)
59
60 if not run_dir.startswith(tb_logs):
61 # it's outside tb_logs
62 continue
63
64 run_dir_candidates.add(run_dir)
65
66 def get_level(current_path):
67 level = -1
68 while current_path.startswith(tb_logs):
69 current_path, _ = os.path.split(current_path)
70 level += 1
71 return level
72
73 run_dir_candidates = sorted(run_dir_candidates, key=get_level, reverse=True)
74 run_dir_candidates_filtered = set()
75 run_dir_ignored = set()
76 groups = set()
77
78 for run_dir in run_dir_candidates:
79 if run_dir in run_dir_candidates_filtered:
80 # already tagged as a run dir
81 continue
82
83 if run_dir in groups:
84 # run dir which has other run dirs inside, so we skip it
85 run_dir_ignored.add(run_dir)
86 continue
87
88 depth = get_level(run_dir)
89 if depth >= 2:
90 if flat:
91 run_group_dir = get_parent(run_dir, 0)
92 new_run_dir = run_dir
93 else:
94 run_group_dir = get_parent(run_dir, 1)
95 new_run_dir = get_parent(run_dir, 0)
96 if new_run_dir in groups:
97 new_run_dir = run_dir
98 groups.add(run_group_dir)
99 elif depth == 1:
100 new_run_dir = run_dir
101 else:
102 continue
103 run_dir_candidates_filtered.add(new_run_dir)
104
105 if run_dir_ignored:
106 click.echo('WARN: Found directory entries with unorganized even files!\n'
107 'Please read the preparation instructions to properly process these files.\n'
108 'Event files in the following directories will be ignored:', err=True)
109 for c, r in enumerate(run_dir_ignored, start=1):
110 click.echo(f'{c}: {r}', err=True)
111
112 for path in tqdm(run_dir_candidates_filtered,
113 desc='Converting TensorBoard logs',
114 total=len(run_dir_candidates_filtered)):
115
116 events = {}
117 for root, dirs, files in os.walk(path):
118 for file in files:
119 if 'events.out.tfevents' not in file:
120 continue
121 file_path = os.path.join(root, file)
122 if file_path == os.path.join(path, file):
123 entry = None
124 else:
125 entry = os.path.basename(os.path.dirname(file_path))
126 events[file_path] = {
127 'context': {
128 'entry': entry
129 }
130 }
131
132 if path not in tb_logs_cache:
133 tb_logs_cache[path] = {}
134
135 run_cache = tb_logs_cache[path]
136 if run_cache:
137 run = Run(
138 run_hash=run_cache['run_hash'],
139 repo=repo_inst,
140 system_tracking_interval=None,
141 log_system_params=False,
142 capture_terminal_logs=False,
143 )
144 else:
145 run = Run(
146 repo=repo_inst,
147 system_tracking_interval=None,
148 log_system_params=False,
149 capture_terminal_logs=False,
150 )
151 run['tensorboard_logdir'] = path
152 run_cache.update({
153 'run_hash': run.hash,
154 'events': {},
155 })
156 run_tb_events = run_cache['events']
157
158 events_to_process = []
159 for event in events:
160 last_modified_at = os.path.getmtime(event)
161 try:
162 assert last_modified_at == run_tb_events[event]['last_modified_at']
163 except (KeyError, AssertionError, RuntimeError):
164 # Something has changed or hasn't been processed before
165 events_to_process.append(event)
166 try:
167 run_tb_events[event]['last_modified_at'] = last_modified_at
168 except KeyError:
169 # Completely new event
170 run_tb_events[event] = {
171 'last_modified_at': last_modified_at,
172 'values': {},
173 }
174
175 if not events_to_process:
176 continue
177
178 for event_file in tqdm(events_to_process, desc=f'Parsing logs in {path}', total=len(events_to_process)):
179 run_tb_log = run_tb_events[event_file]
180 event_context = events[event_file]['context']
181 try:
182 for event in summary_iterator(event_file):
183 timestamp = event.wall_time
184 step = event.step
185 fail_count = 0
186 _err_info = None
187
188 for value in event.summary.value:
189 tag = value.tag
190
191 plugin_name = value.metadata.plugin_data.plugin_name
192 value_id = f'{tag}_{plugin_name}'
193 if value_id in run_tb_log['values']:
194 if run_tb_log['values'][value_id]['timestamp'] >= timestamp:
195 # prevent previously tracked data from re-tracking upon file update
196 continue
197
198 if len(plugin_name) > 0 and plugin_name not in supported_plugins:
199 if not unsupported_plugin_noticed:
200 click.echo(
201 'Found unsupported plugin type in the log file. '
202 'Data for these wont be processed. '
203 'Supported plugin types are: {}'.format(', '.join(supported_plugins)),
204 err=True
205 )
206 unsupported_plugin_noticed = True
207 continue
208 track_val = None
209 try:
210 if value.HasField('tensor'):
211 # TODO: [MV] check the case when audios are passed via tensor
212 if plugin_name == 'images':
213 tensor = value.tensor.string_val[2:]
214 track_val = [
215 Image(tf.image.decode_image(t).numpy()) for t in tensor
216 ]
217 if len(track_val) == 1:
218 track_val = track_val[0]
219 else:
220 track_val = value.tensor.float_val[0]
221 elif value.HasField('simple_value'):
222 track_val = value.simple_value
223 elif value.HasField('image'):
224 track_val = Image(tf.image.decode_image(value.image.encoded_image_string).numpy())
225 elif value.HasField('audio'):
226 tf_audio, sample_rate = tf.audio.decode_wav(value.audio.encoded_audio_string)
227 track_val = Audio(tf_audio.numpy(), rate=sample_rate)
228
229 except RuntimeError as exc:
230 # catch all the nasty failures
231 fail_count += 1
232 if not _err_info:
233 _err_info = str(exc)
234 continue
235
236 run_tb_log['values'][value_id] = {
237 'step': step,
238 'timestamp': timestamp
239 }
240 if track_val is not None:
241 run._tracker._track(track_val, timestamp, tag, step, context=event_context)
242 if fail_count:
243 click.echo(f'Failed to process {fail_count} entries. First exception: {_err_info}', err=True)
244
245 except RuntimeError as exc:
246 click.echo(f'Failed to read log file {event_file} - {exc}', err=True)
247
248 # refresh cache
249 with open(tb_logs_cache_path, 'w') as FS:
250 json.dump(tb_logs_cache, FS)
251
252 click.echo('TensorBoard logs conversion complete!')
253
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/aim/cli/convert/processors/tensorboard.py b/aim/cli/convert/processors/tensorboard.py
--- a/aim/cli/convert/processors/tensorboard.py
+++ b/aim/cli/convert/processors/tensorboard.py
@@ -21,6 +21,7 @@
# This import statement takes long to complete
import tensorflow as tf
from tensorflow.python.summary.summary_iterator import summary_iterator
+ from tensorboard.util import tensor_util
except ImportError:
click.echo(
'Could not process TensorBoard logs - failed to import tensorflow module.', err=True
@@ -70,6 +71,13 @@
level += 1
return level
+ def create_ndarray(tensor):
+ res = tensor_util.make_ndarray(tensor)
+ if res.dtype == "object":
+ return None
+ else:
+ return res
+
run_dir_candidates = sorted(run_dir_candidates, key=get_level, reverse=True)
run_dir_candidates_filtered = set()
run_dir_ignored = set()
@@ -216,6 +224,8 @@
]
if len(track_val) == 1:
track_val = track_val[0]
+ elif plugin_name == "scalars" or plugin_name == "":
+ track_val = create_ndarray(value.tensor)
else:
track_val = value.tensor.float_val[0]
elif value.HasField('simple_value'):
| {"golden_diff": "diff --git a/aim/cli/convert/processors/tensorboard.py b/aim/cli/convert/processors/tensorboard.py\n--- a/aim/cli/convert/processors/tensorboard.py\n+++ b/aim/cli/convert/processors/tensorboard.py\n@@ -21,6 +21,7 @@\n # This import statement takes long to complete\n import tensorflow as tf\n from tensorflow.python.summary.summary_iterator import summary_iterator\n+ from tensorboard.util import tensor_util\n except ImportError:\n click.echo(\n 'Could not process TensorBoard logs - failed to import tensorflow module.', err=True\n@@ -70,6 +71,13 @@\n level += 1\n return level\n \n+ def create_ndarray(tensor):\n+ res = tensor_util.make_ndarray(tensor)\n+ if res.dtype == \"object\":\n+ return None\n+ else:\n+ return res\n+\n run_dir_candidates = sorted(run_dir_candidates, key=get_level, reverse=True)\n run_dir_candidates_filtered = set()\n run_dir_ignored = set()\n@@ -216,6 +224,8 @@\n ]\n if len(track_val) == 1:\n track_val = track_val[0]\n+ elif plugin_name == \"scalars\" or plugin_name == \"\":\n+ track_val = create_ndarray(value.tensor)\n else:\n track_val = value.tensor.float_val[0]\n elif value.HasField('simple_value'):\n", "issue": "Facing issue while converting tensorboard logs to Aim\n## \ud83d\udc1b Bug\r\n\r\n<!-- A clear and concise description of what the bug is. -->\r\nTrying to convert tensorboard event log file to Aim Run, but getting below error,\r\n\r\nOne more question, do we have a way to sync tensorboard logs real-time, like while training is in-progress parallelly can we sync tensorboard logs? Currently it's cli command to sync once we have tensorboard logs in place.\r\n\r\nMany thanks!\r\n\r\n```\r\nThe lock file /mnt/c/sharath_mk/ubuntu/aim/.aim/.repo_lock is on a filesystem of type `drvfs` (device id: 14). Using soft file locks to avoid potential data corruption.\r\n2022-07-25 15:21:57.067693: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libcudart.so.11.0'; dlerror: libcudart.so.11.0: cannot open shared object file: No such file or directory\r\n2022-07-25 15:21:57.067771: I tensorflow/stream_executor/cuda/cudart_stub.cc:29] Ignore above cudart dlerror if you do not have a GPU set up on your machine.\r\nConverting TensorBoard logs: 0%| | 0/1 [00:00<?, ?it/sWARNING:tensorflow:From /home/miniconda3/lib/python3.8/site-packages/tensorflow/python/summary/summary_iterator.py:27: tf_record_iterator (from tensorflow.python.lib.io.tf_record) is deprecated and will be removed in a future version.\r\nInstructions for updating:\r\nUse eager execution and: \r\n`tf.data.TFRecordDataset(path)`\r\nParsing logs in /mnt/c/sharath_mk/ubuntu/aim/tensorboard/run_tb_sync/test_tb: 0%| | 0/2 [00:00<?, ?it/s]\r\nConverting TensorBoard logs: 0%| | 0/1 [00:00<?, ?it/s]\r\nTraceback (most recent call last):\r\n File \"/home/miniconda3/bin/aim\", line 8, in <module>\r\n sys.exit(cli_entry_point())\r\n File \"/home/miniconda3/lib/python3.8/site-packages/click/core.py\", line 1128, in __call__\r\n return self.main(*args, **kwargs)\r\n File \"/home/miniconda3/lib/python3.8/site-packages/click/core.py\", line 1053, in main\r\n rv = self.invoke(ctx)\r\n File \"/home/miniconda3/lib/python3.8/site-packages/click/core.py\", line 1659, in invoke\r\n return _process_result(sub_ctx.command.invoke(sub_ctx))\r\n File \"/home/miniconda3/lib/python3.8/site-packages/click/core.py\", line 1659, in invoke\r\n return _process_result(sub_ctx.command.invoke(sub_ctx))\r\n File \"/home/miniconda3/lib/python3.8/site-packages/click/core.py\", line 1395, in invoke\r\n return ctx.invoke(self.callback, **ctx.params)\r\n File \"/home/miniconda3/lib/python3.8/site-packages/click/core.py\", line 754, in invoke\r\n return __callback(*args, **kwargs)\r\n File \"/home/miniconda3/lib/python3.8/site-packages/click/decorators.py\", line 26, in new_func\r\n return f(get_current_context(), *args, **kwargs)\r\n File \"/home/miniconda3/lib/python3.8/site-packages/aim/cli/convert/commands.py\", line 39, in convert_tensorboard\r\n parse_tb_logs(logdir, repo_inst, flat, no_cache)\r\n File \"/home/miniconda3/lib/python3.8/site-packages/aim/cli/convert/processors/tensorboard.py\", line 220, in parse_tb_logs\r\n track_val = value.tensor.float_val[0]\r\nIndexError: list index (0) out of range\r\n```\r\n### To reproduce\r\n\r\n<!-- Reproduction steps. -->\r\nLog tensorbord event log file\r\n\r\n### Expected behavior\r\n\r\n<!-- Fill in expected behavior. -->\r\n\r\n### Environment\r\n\r\n- Aim Version (e.g., 3.0.1)\r\n- Python version\r\n- pip version\r\n- OS (e.g., Linux)\r\n- Any other relevant information\r\n\r\n### Additional context\r\n\r\n<!-- Add any other context about the problem here. -->\r\n\n", "before_files": [{"content": "import json\nimport os\n\nimport click\nfrom tqdm import tqdm\n\nfrom aim import Audio, Image, Run\n\n\ndef parse_tb_logs(tb_logs, repo_inst, flat=False, no_cache=False):\n \"\"\"\n This function scans and collects records from TB log files.\n\n Creates and uses cache file \"tb_logs_cache\" in the repo dir\n to track previously processed files and values\n\n For more info please refer to our integration guides.\n \"\"\"\n\n try:\n # This import statement takes long to complete\n import tensorflow as tf\n from tensorflow.python.summary.summary_iterator import summary_iterator\n except ImportError:\n click.echo(\n 'Could not process TensorBoard logs - failed to import tensorflow module.', err=True\n )\n return\n\n supported_plugins = ('images', 'scalars')\n unsupported_plugin_noticed = False\n tb_logs_cache_path = os.path.join(repo_inst.path, 'tb_logs_cache')\n\n if no_cache and os.path.exists(tb_logs_cache_path):\n os.remove(tb_logs_cache_path)\n try:\n with open(tb_logs_cache_path) as FS:\n tb_logs_cache = json.load(FS)\n except Exception:\n tb_logs_cache = {}\n\n def get_parent(current_path, level=0):\n # level 0 is the direct parent directory\n if level <= 0:\n return os.path.dirname(current_path)\n elif current_path in ('', '.', '/'):\n return current_path\n return get_parent(os.path.dirname(current_path), level - 1)\n\n tb_logs = os.path.abspath(tb_logs)\n run_dir_candidates = set()\n for root, dirs, files in os.walk(tb_logs):\n for file in files:\n if not file.startswith('events.out.tfevents'):\n continue\n\n file_path = os.path.abspath(os.path.join(root, file))\n run_dir = get_parent(file_path)\n\n if not run_dir.startswith(tb_logs):\n # it's outside tb_logs\n continue\n\n run_dir_candidates.add(run_dir)\n\n def get_level(current_path):\n level = -1\n while current_path.startswith(tb_logs):\n current_path, _ = os.path.split(current_path)\n level += 1\n return level\n\n run_dir_candidates = sorted(run_dir_candidates, key=get_level, reverse=True)\n run_dir_candidates_filtered = set()\n run_dir_ignored = set()\n groups = set()\n\n for run_dir in run_dir_candidates:\n if run_dir in run_dir_candidates_filtered:\n # already tagged as a run dir\n continue\n\n if run_dir in groups:\n # run dir which has other run dirs inside, so we skip it\n run_dir_ignored.add(run_dir)\n continue\n\n depth = get_level(run_dir)\n if depth >= 2:\n if flat:\n run_group_dir = get_parent(run_dir, 0)\n new_run_dir = run_dir\n else:\n run_group_dir = get_parent(run_dir, 1)\n new_run_dir = get_parent(run_dir, 0)\n if new_run_dir in groups:\n new_run_dir = run_dir\n groups.add(run_group_dir)\n elif depth == 1:\n new_run_dir = run_dir\n else:\n continue\n run_dir_candidates_filtered.add(new_run_dir)\n\n if run_dir_ignored:\n click.echo('WARN: Found directory entries with unorganized even files!\\n'\n 'Please read the preparation instructions to properly process these files.\\n'\n 'Event files in the following directories will be ignored:', err=True)\n for c, r in enumerate(run_dir_ignored, start=1):\n click.echo(f'{c}: {r}', err=True)\n\n for path in tqdm(run_dir_candidates_filtered,\n desc='Converting TensorBoard logs',\n total=len(run_dir_candidates_filtered)):\n\n events = {}\n for root, dirs, files in os.walk(path):\n for file in files:\n if 'events.out.tfevents' not in file:\n continue\n file_path = os.path.join(root, file)\n if file_path == os.path.join(path, file):\n entry = None\n else:\n entry = os.path.basename(os.path.dirname(file_path))\n events[file_path] = {\n 'context': {\n 'entry': entry\n }\n }\n\n if path not in tb_logs_cache:\n tb_logs_cache[path] = {}\n\n run_cache = tb_logs_cache[path]\n if run_cache:\n run = Run(\n run_hash=run_cache['run_hash'],\n repo=repo_inst,\n system_tracking_interval=None,\n log_system_params=False,\n capture_terminal_logs=False,\n )\n else:\n run = Run(\n repo=repo_inst,\n system_tracking_interval=None,\n log_system_params=False,\n capture_terminal_logs=False,\n )\n run['tensorboard_logdir'] = path\n run_cache.update({\n 'run_hash': run.hash,\n 'events': {},\n })\n run_tb_events = run_cache['events']\n\n events_to_process = []\n for event in events:\n last_modified_at = os.path.getmtime(event)\n try:\n assert last_modified_at == run_tb_events[event]['last_modified_at']\n except (KeyError, AssertionError, RuntimeError):\n # Something has changed or hasn't been processed before\n events_to_process.append(event)\n try:\n run_tb_events[event]['last_modified_at'] = last_modified_at\n except KeyError:\n # Completely new event\n run_tb_events[event] = {\n 'last_modified_at': last_modified_at,\n 'values': {},\n }\n\n if not events_to_process:\n continue\n\n for event_file in tqdm(events_to_process, desc=f'Parsing logs in {path}', total=len(events_to_process)):\n run_tb_log = run_tb_events[event_file]\n event_context = events[event_file]['context']\n try:\n for event in summary_iterator(event_file):\n timestamp = event.wall_time\n step = event.step\n fail_count = 0\n _err_info = None\n\n for value in event.summary.value:\n tag = value.tag\n\n plugin_name = value.metadata.plugin_data.plugin_name\n value_id = f'{tag}_{plugin_name}'\n if value_id in run_tb_log['values']:\n if run_tb_log['values'][value_id]['timestamp'] >= timestamp:\n # prevent previously tracked data from re-tracking upon file update\n continue\n\n if len(plugin_name) > 0 and plugin_name not in supported_plugins:\n if not unsupported_plugin_noticed:\n click.echo(\n 'Found unsupported plugin type in the log file. '\n 'Data for these wont be processed. '\n 'Supported plugin types are: {}'.format(', '.join(supported_plugins)),\n err=True\n )\n unsupported_plugin_noticed = True\n continue\n track_val = None\n try:\n if value.HasField('tensor'):\n # TODO: [MV] check the case when audios are passed via tensor\n if plugin_name == 'images':\n tensor = value.tensor.string_val[2:]\n track_val = [\n Image(tf.image.decode_image(t).numpy()) for t in tensor\n ]\n if len(track_val) == 1:\n track_val = track_val[0]\n else:\n track_val = value.tensor.float_val[0]\n elif value.HasField('simple_value'):\n track_val = value.simple_value\n elif value.HasField('image'):\n track_val = Image(tf.image.decode_image(value.image.encoded_image_string).numpy())\n elif value.HasField('audio'):\n tf_audio, sample_rate = tf.audio.decode_wav(value.audio.encoded_audio_string)\n track_val = Audio(tf_audio.numpy(), rate=sample_rate)\n\n except RuntimeError as exc:\n # catch all the nasty failures\n fail_count += 1\n if not _err_info:\n _err_info = str(exc)\n continue\n\n run_tb_log['values'][value_id] = {\n 'step': step,\n 'timestamp': timestamp\n }\n if track_val is not None:\n run._tracker._track(track_val, timestamp, tag, step, context=event_context)\n if fail_count:\n click.echo(f'Failed to process {fail_count} entries. First exception: {_err_info}', err=True)\n\n except RuntimeError as exc:\n click.echo(f'Failed to read log file {event_file} - {exc}', err=True)\n\n # refresh cache\n with open(tb_logs_cache_path, 'w') as FS:\n json.dump(tb_logs_cache, FS)\n\n click.echo('TensorBoard logs conversion complete!')\n", "path": "aim/cli/convert/processors/tensorboard.py"}], "after_files": [{"content": "import json\nimport os\n\nimport click\nfrom tqdm import tqdm\n\nfrom aim import Audio, Image, Run\n\n\ndef parse_tb_logs(tb_logs, repo_inst, flat=False, no_cache=False):\n \"\"\"\n This function scans and collects records from TB log files.\n\n Creates and uses cache file \"tb_logs_cache\" in the repo dir\n to track previously processed files and values\n\n For more info please refer to our integration guides.\n \"\"\"\n\n try:\n # This import statement takes long to complete\n import tensorflow as tf\n from tensorflow.python.summary.summary_iterator import summary_iterator\n from tensorboard.util import tensor_util\n except ImportError:\n click.echo(\n 'Could not process TensorBoard logs - failed to import tensorflow module.', err=True\n )\n return\n\n supported_plugins = ('images', 'scalars')\n unsupported_plugin_noticed = False\n tb_logs_cache_path = os.path.join(repo_inst.path, 'tb_logs_cache')\n\n if no_cache and os.path.exists(tb_logs_cache_path):\n os.remove(tb_logs_cache_path)\n try:\n with open(tb_logs_cache_path) as FS:\n tb_logs_cache = json.load(FS)\n except Exception:\n tb_logs_cache = {}\n\n def get_parent(current_path, level=0):\n # level 0 is the direct parent directory\n if level <= 0:\n return os.path.dirname(current_path)\n elif current_path in ('', '.', '/'):\n return current_path\n return get_parent(os.path.dirname(current_path), level - 1)\n\n tb_logs = os.path.abspath(tb_logs)\n run_dir_candidates = set()\n for root, dirs, files in os.walk(tb_logs):\n for file in files:\n if not file.startswith('events.out.tfevents'):\n continue\n\n file_path = os.path.abspath(os.path.join(root, file))\n run_dir = get_parent(file_path)\n\n if not run_dir.startswith(tb_logs):\n # it's outside tb_logs\n continue\n\n run_dir_candidates.add(run_dir)\n\n def get_level(current_path):\n level = -1\n while current_path.startswith(tb_logs):\n current_path, _ = os.path.split(current_path)\n level += 1\n return level\n\n def create_ndarray(tensor):\n res = tensor_util.make_ndarray(tensor)\n if res.dtype == \"object\":\n return None\n else:\n return res\n\n run_dir_candidates = sorted(run_dir_candidates, key=get_level, reverse=True)\n run_dir_candidates_filtered = set()\n run_dir_ignored = set()\n groups = set()\n\n for run_dir in run_dir_candidates:\n if run_dir in run_dir_candidates_filtered:\n # already tagged as a run dir\n continue\n\n if run_dir in groups:\n # run dir which has other run dirs inside, so we skip it\n run_dir_ignored.add(run_dir)\n continue\n\n depth = get_level(run_dir)\n if depth >= 2:\n if flat:\n run_group_dir = get_parent(run_dir, 0)\n new_run_dir = run_dir\n else:\n run_group_dir = get_parent(run_dir, 1)\n new_run_dir = get_parent(run_dir, 0)\n if new_run_dir in groups:\n new_run_dir = run_dir\n groups.add(run_group_dir)\n elif depth == 1:\n new_run_dir = run_dir\n else:\n continue\n run_dir_candidates_filtered.add(new_run_dir)\n\n if run_dir_ignored:\n click.echo('WARN: Found directory entries with unorganized even files!\\n'\n 'Please read the preparation instructions to properly process these files.\\n'\n 'Event files in the following directories will be ignored:', err=True)\n for c, r in enumerate(run_dir_ignored, start=1):\n click.echo(f'{c}: {r}', err=True)\n\n for path in tqdm(run_dir_candidates_filtered,\n desc='Converting TensorBoard logs',\n total=len(run_dir_candidates_filtered)):\n\n events = {}\n for root, dirs, files in os.walk(path):\n for file in files:\n if 'events.out.tfevents' not in file:\n continue\n file_path = os.path.join(root, file)\n if file_path == os.path.join(path, file):\n entry = None\n else:\n entry = os.path.basename(os.path.dirname(file_path))\n events[file_path] = {\n 'context': {\n 'entry': entry\n }\n }\n\n if path not in tb_logs_cache:\n tb_logs_cache[path] = {}\n\n run_cache = tb_logs_cache[path]\n if run_cache:\n run = Run(\n run_hash=run_cache['run_hash'],\n repo=repo_inst,\n system_tracking_interval=None,\n log_system_params=False,\n capture_terminal_logs=False,\n )\n else:\n run = Run(\n repo=repo_inst,\n system_tracking_interval=None,\n log_system_params=False,\n capture_terminal_logs=False,\n )\n run['tensorboard_logdir'] = path\n run_cache.update({\n 'run_hash': run.hash,\n 'events': {},\n })\n run_tb_events = run_cache['events']\n\n events_to_process = []\n for event in events:\n last_modified_at = os.path.getmtime(event)\n try:\n assert last_modified_at == run_tb_events[event]['last_modified_at']\n except (KeyError, AssertionError, RuntimeError):\n # Something has changed or hasn't been processed before\n events_to_process.append(event)\n try:\n run_tb_events[event]['last_modified_at'] = last_modified_at\n except KeyError:\n # Completely new event\n run_tb_events[event] = {\n 'last_modified_at': last_modified_at,\n 'values': {},\n }\n\n if not events_to_process:\n continue\n\n for event_file in tqdm(events_to_process, desc=f'Parsing logs in {path}', total=len(events_to_process)):\n run_tb_log = run_tb_events[event_file]\n event_context = events[event_file]['context']\n try:\n for event in summary_iterator(event_file):\n timestamp = event.wall_time\n step = event.step\n fail_count = 0\n _err_info = None\n\n for value in event.summary.value:\n tag = value.tag\n\n plugin_name = value.metadata.plugin_data.plugin_name\n value_id = f'{tag}_{plugin_name}'\n if value_id in run_tb_log['values']:\n if run_tb_log['values'][value_id]['timestamp'] >= timestamp:\n # prevent previously tracked data from re-tracking upon file update\n continue\n\n if len(plugin_name) > 0 and plugin_name not in supported_plugins:\n if not unsupported_plugin_noticed:\n click.echo(\n 'Found unsupported plugin type in the log file. '\n 'Data for these wont be processed. '\n 'Supported plugin types are: {}'.format(', '.join(supported_plugins)),\n err=True\n )\n unsupported_plugin_noticed = True\n continue\n track_val = None\n try:\n if value.HasField('tensor'):\n # TODO: [MV] check the case when audios are passed via tensor\n if plugin_name == 'images':\n tensor = value.tensor.string_val[2:]\n track_val = [\n Image(tf.image.decode_image(t).numpy()) for t in tensor\n ]\n if len(track_val) == 1:\n track_val = track_val[0]\n elif plugin_name == \"scalars\" or plugin_name == \"\":\n track_val = create_ndarray(value.tensor)\n else:\n track_val = value.tensor.float_val[0]\n elif value.HasField('simple_value'):\n track_val = value.simple_value\n elif value.HasField('image'):\n track_val = Image(tf.image.decode_image(value.image.encoded_image_string).numpy())\n elif value.HasField('audio'):\n tf_audio, sample_rate = tf.audio.decode_wav(value.audio.encoded_audio_string)\n track_val = Audio(tf_audio.numpy(), rate=sample_rate)\n\n except RuntimeError as exc:\n # catch all the nasty failures\n fail_count += 1\n if not _err_info:\n _err_info = str(exc)\n continue\n\n run_tb_log['values'][value_id] = {\n 'step': step,\n 'timestamp': timestamp\n }\n if track_val is not None:\n run._tracker._track(track_val, timestamp, tag, step, context=event_context)\n if fail_count:\n click.echo(f'Failed to process {fail_count} entries. First exception: {_err_info}', err=True)\n\n except RuntimeError as exc:\n click.echo(f'Failed to read log file {event_file} - {exc}', err=True)\n\n # refresh cache\n with open(tb_logs_cache_path, 'w') as FS:\n json.dump(tb_logs_cache, FS)\n\n click.echo('TensorBoard logs conversion complete!')\n", "path": "aim/cli/convert/processors/tensorboard.py"}]} | 3,737 | 314 |
gh_patches_debug_2483 | rasdani/github-patches | git_diff | mosaicml__composer-2108 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Less strict numpy version pinning
## 🚀 Feature Request
Allow for newer numpy versions than `<1.23.0`.
## Motivation
Currently, composer fixes numpy to be `'numpy>=1.21.5,<1.23.0'`. This is unfortunate, because other requirements that we use need numpy > 1.23.0, creating an incompatibility.
This was set in #1345 in reaction to a build failure because scipy need(ed) `<1.23.0`. Scipy itself is apparently pulled from `torch-metrics`.
I don't believe this is current anymore:
- composer pulls in `torchmetrics>=0.10.0,<0.11.4`
- torchmetrics 0.11.3 pulls in `scipy >1.0.0, <=1.10.0` [(requirements)](https://github.com/Lightning-AI/metrics/blob/v0.11.3/requirements/image.txt#LL4C23-L4C23)
- scipy 1.10.0 sets `np_maxversion = '1.27.0'` [(setup.py)](https://github.com/scipy/scipy/blob/v1.10.0/setup.py#L453)
Indeed, scipy has https://github.com/scipy/scipy/issues/16964, which was fixed in https://github.com/scipy/scipy/pull/16966 in September of 2022, while #1345 above was noted in August.
Accordingly, it looks like relaxing the numpy maxversion should have a valid requirements path.
The closed PR https://github.com/mosaicml/composer/pull/1835 would already implement this.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # Copyright 2022 MosaicML Composer authors
2 # SPDX-License-Identifier: Apache-2.0
3
4 """Composer package setup."""
5
6 import os
7 import site
8 import sys
9 import textwrap
10
11 import setuptools
12 from setuptools import setup
13 from setuptools.command.develop import develop as develop_orig
14
15 # Read the composer version
16 # Cannot import from `composer.__version__` since that will not be available when building or installing the package
17 with open(os.path.join(os.path.dirname(__file__), 'composer', '_version.py')) as f:
18 version_globals = {}
19 version_locals = {}
20 exec(f.read(), version_globals, version_locals)
21 composer_version = version_locals['__version__']
22
23 _IS_ROOT = os.getuid() == 0
24 _IS_USER = '--user' in sys.argv[1:]
25 _IS_VIRTUALENV = 'VIRTUAL_ENV' in os.environ
26
27
28 # From https://stackoverflow.com/questions/51292333/how-to-tell-from-setup-py-if-the-module-is-being-installed-in-editable-mode
29 class develop(develop_orig):
30 """Override the ``develop`` class to error if attempting an editable install as root."""
31
32 def run(self):
33 if _IS_ROOT and (not _IS_VIRTUALENV) and (not _IS_USER):
34 raise RuntimeError(
35 textwrap.dedent("""\
36 When installing in editable mode as root outside of a virtual environment,
37 please specify `--user`. Editable installs as the root user outside of a virtual environment
38 do not work without the `--user` flag. Please instead run something like: `pip install --user -e .`"""
39 ))
40 super().run()
41
42
43 # From https://github.com/pypa/pip/issues/7953#issuecomment-645133255
44 site.ENABLE_USER_SITE = _IS_USER
45
46
47 def package_files(prefix: str, directory: str, extension: str):
48 """Get all the files to package."""
49 # from https://stackoverflow.com/a/36693250
50 paths = []
51 for (path, _, filenames) in os.walk(os.path.join(prefix, directory)):
52 for filename in filenames:
53 if filename.endswith(extension):
54 paths.append(os.path.relpath(os.path.join(path, filename), prefix))
55 return paths
56
57
58 with open('README.md', 'r', encoding='utf-8') as fh:
59 long_description = fh.read()
60
61 # Hide the content between <!-- SETUPTOOLS_LONG_DESCRIPTION_HIDE_BEGIN --> and
62 # <!-- SETUPTOOLS_LONG_DESCRIPTION_HIDE_END --> tags in the README
63 while True:
64 start_tag = '<!-- SETUPTOOLS_LONG_DESCRIPTION_HIDE_BEGIN -->'
65 end_tag = '<!-- SETUPTOOLS_LONG_DESCRIPTION_HIDE_END -->'
66 start = long_description.find(start_tag)
67 end = long_description.find(end_tag)
68 if start == -1:
69 assert end == -1, 'there should be a balanced number of start and ends'
70 break
71 else:
72 assert end != -1, 'there should be a balanced number of start and ends'
73 long_description = long_description[:start] + long_description[end + len(end_tag):]
74
75 install_requires = [
76 'pyyaml>=6.0,<7',
77 'tqdm>=4.62.3,<5',
78 'torchmetrics>=0.10.0,<0.11.4',
79 'torch_optimizer>=0.3.0,<0.4',
80 'torchvision>=0.11.0,<0.15',
81 'torch>=1.10.0,<1.14',
82 'requests>=2.26.0,<3',
83 'numpy>=1.21.5,<1.23.0',
84 'psutil>=5.8.0,<6',
85 'coolname>=1.1.0,<3',
86 'tabulate==0.9.0', # for auto-generating tables
87 'py-cpuinfo>=8.0.0,<10',
88 'packaging>=21.3.0,<23',
89 'importlib-metadata>=5.0.0,<7',
90 ]
91 extra_deps = {}
92
93 extra_deps['base'] = []
94
95 extra_deps['dev'] = [
96 # Imports for docs builds and running tests
97 # Pinning versions strictly to avoid random test failures.
98 # Should manually update dependency versions occassionally.
99 'custom_inherit==2.4.1',
100 'junitparser==2.8.0',
101 'coverage[toml]==7.2.2',
102 'fasteners==0.18', # object store tests require fasteners
103 'pytest==7.2.2',
104 'toml==0.10.2',
105 'ipython==8.11.0',
106 'ipykernel==6.20.1',
107 'jupyter==1.0.0',
108 'yamllint==1.30.0',
109 'recommonmark==0.7.1',
110 'sphinx==4.4.0',
111 'pre-commit>=2.18.1,<3',
112 # embedding md in rst require docutils>=0.17. See
113 # https://myst-parser.readthedocs.io/en/latest/sphinx/use.html?highlight=parser#include-markdown-files-into-an-rst-file
114 'docutils==0.17.1',
115 'sphinx_markdown_tables==0.0.17',
116 'sphinx-argparse==0.4.0',
117 'sphinxcontrib.katex==0.9.4',
118 'sphinxext.opengraph==0.7.4',
119 'sphinxemoji==0.2.0',
120 'furo==2022.9.29',
121 'sphinx-copybutton==0.5.0',
122 'testbook==0.4.2',
123 'myst-parser==0.16.1',
124 'sphinx_panels==0.6.0',
125 'sphinxcontrib-images==0.9.4',
126 'pytest_codeblocks==0.16.1',
127 'traitlets==5.9.0',
128 'nbsphinx==0.8.12',
129 'pandoc==2.3',
130 'pypandoc==1.11',
131 'GitPython==3.1.31',
132 'moto[s3]>=4.0.1,<5',
133 'mock-ssh-server==0.9.1',
134 'cryptography==38.0.4',
135 'pytest-httpserver>=1.0.4,<1.1',
136 'setuptools<=59.5.0',
137 ]
138
139 extra_deps['health_checker'] = {
140 'pynvml>=11.5.0,<12',
141 'slack_sdk>=3.19.5,<4',
142 }
143
144 extra_deps['deepspeed'] = [
145 'deepspeed==0.7.7',
146 ]
147
148 extra_deps['wandb'] = [
149 'wandb>=0.13.2,<0.14',
150 ]
151
152 extra_deps['comet_ml'] = [
153 'comet_ml>=3.31.12,<4.0.0',
154 ]
155
156 extra_deps['tensorboard'] = [
157 'tensorboard>=2.9.1,<3.0.0',
158 ]
159
160 extra_deps['unet'] = [
161 'monai>=0.9.1,<1.2',
162 'scikit-learn>=1.0.1,<2',
163 ]
164
165 extra_deps['vit'] = [
166 'vit_pytorch==0.35.8',
167 ]
168
169 extra_deps['timm'] = [
170 'timm>=0.5.4,<0.6',
171 ]
172
173 extra_deps['coco'] = [
174 'pycocotools>=2.0.4,<3',
175 ]
176
177 extra_deps['nlp'] = [
178 'transformers>=4.11,<4.27.5',
179 'datasets>=2.4,<3',
180 ]
181
182 extra_deps['sentencepiece'] = ['sentencepiece==0.1.97']
183
184 extra_deps['mlperf'] = [
185 # TODO: use pip when available: https://github.com/mlcommons/logging/issues/218
186 # "mlperf_logging @ git+https://github.com/mlperf/logging.git",
187 'py-cpuinfo>=8.0.0,<10',
188 ]
189
190 extra_deps['streaming'] = [
191 'mosaicml-streaming<0.4',
192 'boto3>=1.21.45,<2',
193 'paramiko>=2.11.0,<3',
194 ]
195
196 extra_deps['libcloud'] = [
197 'apache-libcloud>=3.3.1,<4',
198 ]
199
200 extra_deps['oci'] = [
201 'oci>=2.88.2,<3.0.0',
202 ]
203
204 extra_deps['onnx'] = [
205 'onnx>=1.12.0,<2',
206 'onnxruntime>=1.12.1,<2',
207 ]
208
209 extra_deps['mlflow'] = ['mlflow>=2.0.1,<3.0']
210
211 extra_deps['all'] = set(dep for deps in extra_deps.values() for dep in deps)
212
213 composer_data_files = ['py.typed']
214 composer_data_files += package_files('composer', 'yamls', '.yaml')
215 composer_data_files += package_files('composer', 'algorithms', '.json')
216
217 package_name = os.environ.get('COMPOSER_PACKAGE_NAME', 'mosaicml')
218
219 if package_name != 'mosaicml':
220 print(f'`Building composer as `{package_name}`)', file=sys.stderr)
221
222 setup(name=package_name,
223 version=composer_version,
224 author='MosaicML',
225 author_email='[email protected]',
226 description=('Composer is a PyTorch library that enables you to train ' +
227 'neural networks faster, at lower cost, and to higher accuracy.'),
228 long_description=long_description,
229 long_description_content_type='text/markdown',
230 url='https://github.com/mosaicml/composer',
231 include_package_data=True,
232 package_data={
233 'composer': composer_data_files,
234 },
235 packages=setuptools.find_packages(exclude=['docker*', 'examples*', 'scripts*', 'tests*']),
236 classifiers=[
237 'Programming Language :: Python :: 3',
238 'Programming Language :: Python :: 3.8',
239 'Programming Language :: Python :: 3.9',
240 'Programming Language :: Python :: 3.10',
241 ],
242 install_requires=install_requires,
243 entry_points={
244 'console_scripts': [
245 'composer = composer.cli.launcher:main',
246 'composer_collect_env = composer.utils.collect_env:main',
247 ],
248 },
249 extras_require=extra_deps,
250 dependency_links=['https://developer.download.nvidia.com/compute/redist'],
251 python_requires='>=3.8',
252 ext_package='composer',
253 cmdclass={'develop': develop})
254
255 # only visible if user installs with verbose -v flag
256 # Printing to stdout as not to interfere with setup.py CLI flags (e.g. --version)
257 print('*' * 20, file=sys.stderr)
258 print(textwrap.dedent("""\
259 NOTE: For best performance, we recommend installing Pillow-SIMD
260 for accelerated image processing operations. To install:
261 \t pip uninstall pillow && pip install pillow-simd"""),
262 file=sys.stderr)
263 print('*' * 20, file=sys.stderr)
264
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -80,7 +80,7 @@
'torchvision>=0.11.0,<0.15',
'torch>=1.10.0,<1.14',
'requests>=2.26.0,<3',
- 'numpy>=1.21.5,<1.23.0',
+ 'numpy>=1.21.5,<1.25.0',
'psutil>=5.8.0,<6',
'coolname>=1.1.0,<3',
'tabulate==0.9.0', # for auto-generating tables
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -80,7 +80,7 @@\n 'torchvision>=0.11.0,<0.15',\n 'torch>=1.10.0,<1.14',\n 'requests>=2.26.0,<3',\n- 'numpy>=1.21.5,<1.23.0',\n+ 'numpy>=1.21.5,<1.25.0',\n 'psutil>=5.8.0,<6',\n 'coolname>=1.1.0,<3',\n 'tabulate==0.9.0', # for auto-generating tables\n", "issue": "Less strict numpy version pinning\n## \ud83d\ude80 Feature Request\r\nAllow for newer numpy versions than `<1.23.0`.\r\n\r\n## Motivation\r\n\r\nCurrently, composer fixes numpy to be `'numpy>=1.21.5,<1.23.0'`. This is unfortunate, because other requirements that we use need numpy > 1.23.0, creating an incompatibility.\r\n\r\nThis was set in #1345 in reaction to a build failure because scipy need(ed) `<1.23.0`. Scipy itself is apparently pulled from `torch-metrics`. \r\n\r\nI don't believe this is current anymore:\r\n- composer pulls in `torchmetrics>=0.10.0,<0.11.4`\r\n- torchmetrics 0.11.3 pulls in `scipy >1.0.0, <=1.10.0` [(requirements)](https://github.com/Lightning-AI/metrics/blob/v0.11.3/requirements/image.txt#LL4C23-L4C23)\r\n- scipy 1.10.0 sets `np_maxversion = '1.27.0'` [(setup.py)](https://github.com/scipy/scipy/blob/v1.10.0/setup.py#L453)\r\n\r\nIndeed, scipy has https://github.com/scipy/scipy/issues/16964, which was fixed in https://github.com/scipy/scipy/pull/16966 in September of 2022, while #1345 above was noted in August.\r\n\r\nAccordingly, it looks like relaxing the numpy maxversion should have a valid requirements path. \r\n\r\nThe closed PR https://github.com/mosaicml/composer/pull/1835 would already implement this.\n", "before_files": [{"content": "# Copyright 2022 MosaicML Composer authors\n# SPDX-License-Identifier: Apache-2.0\n\n\"\"\"Composer package setup.\"\"\"\n\nimport os\nimport site\nimport sys\nimport textwrap\n\nimport setuptools\nfrom setuptools import setup\nfrom setuptools.command.develop import develop as develop_orig\n\n# Read the composer version\n# Cannot import from `composer.__version__` since that will not be available when building or installing the package\nwith open(os.path.join(os.path.dirname(__file__), 'composer', '_version.py')) as f:\n version_globals = {}\n version_locals = {}\n exec(f.read(), version_globals, version_locals)\n composer_version = version_locals['__version__']\n\n_IS_ROOT = os.getuid() == 0\n_IS_USER = '--user' in sys.argv[1:]\n_IS_VIRTUALENV = 'VIRTUAL_ENV' in os.environ\n\n\n# From https://stackoverflow.com/questions/51292333/how-to-tell-from-setup-py-if-the-module-is-being-installed-in-editable-mode\nclass develop(develop_orig):\n \"\"\"Override the ``develop`` class to error if attempting an editable install as root.\"\"\"\n\n def run(self):\n if _IS_ROOT and (not _IS_VIRTUALENV) and (not _IS_USER):\n raise RuntimeError(\n textwrap.dedent(\"\"\"\\\n When installing in editable mode as root outside of a virtual environment,\n please specify `--user`. Editable installs as the root user outside of a virtual environment\n do not work without the `--user` flag. Please instead run something like: `pip install --user -e .`\"\"\"\n ))\n super().run()\n\n\n# From https://github.com/pypa/pip/issues/7953#issuecomment-645133255\nsite.ENABLE_USER_SITE = _IS_USER\n\n\ndef package_files(prefix: str, directory: str, extension: str):\n \"\"\"Get all the files to package.\"\"\"\n # from https://stackoverflow.com/a/36693250\n paths = []\n for (path, _, filenames) in os.walk(os.path.join(prefix, directory)):\n for filename in filenames:\n if filename.endswith(extension):\n paths.append(os.path.relpath(os.path.join(path, filename), prefix))\n return paths\n\n\nwith open('README.md', 'r', encoding='utf-8') as fh:\n long_description = fh.read()\n\n# Hide the content between <!-- SETUPTOOLS_LONG_DESCRIPTION_HIDE_BEGIN --> and\n# <!-- SETUPTOOLS_LONG_DESCRIPTION_HIDE_END --> tags in the README\nwhile True:\n start_tag = '<!-- SETUPTOOLS_LONG_DESCRIPTION_HIDE_BEGIN -->'\n end_tag = '<!-- SETUPTOOLS_LONG_DESCRIPTION_HIDE_END -->'\n start = long_description.find(start_tag)\n end = long_description.find(end_tag)\n if start == -1:\n assert end == -1, 'there should be a balanced number of start and ends'\n break\n else:\n assert end != -1, 'there should be a balanced number of start and ends'\n long_description = long_description[:start] + long_description[end + len(end_tag):]\n\ninstall_requires = [\n 'pyyaml>=6.0,<7',\n 'tqdm>=4.62.3,<5',\n 'torchmetrics>=0.10.0,<0.11.4',\n 'torch_optimizer>=0.3.0,<0.4',\n 'torchvision>=0.11.0,<0.15',\n 'torch>=1.10.0,<1.14',\n 'requests>=2.26.0,<3',\n 'numpy>=1.21.5,<1.23.0',\n 'psutil>=5.8.0,<6',\n 'coolname>=1.1.0,<3',\n 'tabulate==0.9.0', # for auto-generating tables\n 'py-cpuinfo>=8.0.0,<10',\n 'packaging>=21.3.0,<23',\n 'importlib-metadata>=5.0.0,<7',\n]\nextra_deps = {}\n\nextra_deps['base'] = []\n\nextra_deps['dev'] = [\n # Imports for docs builds and running tests\n # Pinning versions strictly to avoid random test failures.\n # Should manually update dependency versions occassionally.\n 'custom_inherit==2.4.1',\n 'junitparser==2.8.0',\n 'coverage[toml]==7.2.2',\n 'fasteners==0.18', # object store tests require fasteners\n 'pytest==7.2.2',\n 'toml==0.10.2',\n 'ipython==8.11.0',\n 'ipykernel==6.20.1',\n 'jupyter==1.0.0',\n 'yamllint==1.30.0',\n 'recommonmark==0.7.1',\n 'sphinx==4.4.0',\n 'pre-commit>=2.18.1,<3',\n # embedding md in rst require docutils>=0.17. See\n # https://myst-parser.readthedocs.io/en/latest/sphinx/use.html?highlight=parser#include-markdown-files-into-an-rst-file\n 'docutils==0.17.1',\n 'sphinx_markdown_tables==0.0.17',\n 'sphinx-argparse==0.4.0',\n 'sphinxcontrib.katex==0.9.4',\n 'sphinxext.opengraph==0.7.4',\n 'sphinxemoji==0.2.0',\n 'furo==2022.9.29',\n 'sphinx-copybutton==0.5.0',\n 'testbook==0.4.2',\n 'myst-parser==0.16.1',\n 'sphinx_panels==0.6.0',\n 'sphinxcontrib-images==0.9.4',\n 'pytest_codeblocks==0.16.1',\n 'traitlets==5.9.0',\n 'nbsphinx==0.8.12',\n 'pandoc==2.3',\n 'pypandoc==1.11',\n 'GitPython==3.1.31',\n 'moto[s3]>=4.0.1,<5',\n 'mock-ssh-server==0.9.1',\n 'cryptography==38.0.4',\n 'pytest-httpserver>=1.0.4,<1.1',\n 'setuptools<=59.5.0',\n]\n\nextra_deps['health_checker'] = {\n 'pynvml>=11.5.0,<12',\n 'slack_sdk>=3.19.5,<4',\n}\n\nextra_deps['deepspeed'] = [\n 'deepspeed==0.7.7',\n]\n\nextra_deps['wandb'] = [\n 'wandb>=0.13.2,<0.14',\n]\n\nextra_deps['comet_ml'] = [\n 'comet_ml>=3.31.12,<4.0.0',\n]\n\nextra_deps['tensorboard'] = [\n 'tensorboard>=2.9.1,<3.0.0',\n]\n\nextra_deps['unet'] = [\n 'monai>=0.9.1,<1.2',\n 'scikit-learn>=1.0.1,<2',\n]\n\nextra_deps['vit'] = [\n 'vit_pytorch==0.35.8',\n]\n\nextra_deps['timm'] = [\n 'timm>=0.5.4,<0.6',\n]\n\nextra_deps['coco'] = [\n 'pycocotools>=2.0.4,<3',\n]\n\nextra_deps['nlp'] = [\n 'transformers>=4.11,<4.27.5',\n 'datasets>=2.4,<3',\n]\n\nextra_deps['sentencepiece'] = ['sentencepiece==0.1.97']\n\nextra_deps['mlperf'] = [\n # TODO: use pip when available: https://github.com/mlcommons/logging/issues/218\n # \"mlperf_logging @ git+https://github.com/mlperf/logging.git\",\n 'py-cpuinfo>=8.0.0,<10',\n]\n\nextra_deps['streaming'] = [\n 'mosaicml-streaming<0.4',\n 'boto3>=1.21.45,<2',\n 'paramiko>=2.11.0,<3',\n]\n\nextra_deps['libcloud'] = [\n 'apache-libcloud>=3.3.1,<4',\n]\n\nextra_deps['oci'] = [\n 'oci>=2.88.2,<3.0.0',\n]\n\nextra_deps['onnx'] = [\n 'onnx>=1.12.0,<2',\n 'onnxruntime>=1.12.1,<2',\n]\n\nextra_deps['mlflow'] = ['mlflow>=2.0.1,<3.0']\n\nextra_deps['all'] = set(dep for deps in extra_deps.values() for dep in deps)\n\ncomposer_data_files = ['py.typed']\ncomposer_data_files += package_files('composer', 'yamls', '.yaml')\ncomposer_data_files += package_files('composer', 'algorithms', '.json')\n\npackage_name = os.environ.get('COMPOSER_PACKAGE_NAME', 'mosaicml')\n\nif package_name != 'mosaicml':\n print(f'`Building composer as `{package_name}`)', file=sys.stderr)\n\nsetup(name=package_name,\n version=composer_version,\n author='MosaicML',\n author_email='[email protected]',\n description=('Composer is a PyTorch library that enables you to train ' +\n 'neural networks faster, at lower cost, and to higher accuracy.'),\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='https://github.com/mosaicml/composer',\n include_package_data=True,\n package_data={\n 'composer': composer_data_files,\n },\n packages=setuptools.find_packages(exclude=['docker*', 'examples*', 'scripts*', 'tests*']),\n classifiers=[\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Programming Language :: Python :: 3.10',\n ],\n install_requires=install_requires,\n entry_points={\n 'console_scripts': [\n 'composer = composer.cli.launcher:main',\n 'composer_collect_env = composer.utils.collect_env:main',\n ],\n },\n extras_require=extra_deps,\n dependency_links=['https://developer.download.nvidia.com/compute/redist'],\n python_requires='>=3.8',\n ext_package='composer',\n cmdclass={'develop': develop})\n\n# only visible if user installs with verbose -v flag\n# Printing to stdout as not to interfere with setup.py CLI flags (e.g. --version)\nprint('*' * 20, file=sys.stderr)\nprint(textwrap.dedent(\"\"\"\\\n NOTE: For best performance, we recommend installing Pillow-SIMD\n for accelerated image processing operations. To install:\n \\t pip uninstall pillow && pip install pillow-simd\"\"\"),\n file=sys.stderr)\nprint('*' * 20, file=sys.stderr)\n", "path": "setup.py"}], "after_files": [{"content": "# Copyright 2022 MosaicML Composer authors\n# SPDX-License-Identifier: Apache-2.0\n\n\"\"\"Composer package setup.\"\"\"\n\nimport os\nimport site\nimport sys\nimport textwrap\n\nimport setuptools\nfrom setuptools import setup\nfrom setuptools.command.develop import develop as develop_orig\n\n# Read the composer version\n# Cannot import from `composer.__version__` since that will not be available when building or installing the package\nwith open(os.path.join(os.path.dirname(__file__), 'composer', '_version.py')) as f:\n version_globals = {}\n version_locals = {}\n exec(f.read(), version_globals, version_locals)\n composer_version = version_locals['__version__']\n\n_IS_ROOT = os.getuid() == 0\n_IS_USER = '--user' in sys.argv[1:]\n_IS_VIRTUALENV = 'VIRTUAL_ENV' in os.environ\n\n\n# From https://stackoverflow.com/questions/51292333/how-to-tell-from-setup-py-if-the-module-is-being-installed-in-editable-mode\nclass develop(develop_orig):\n \"\"\"Override the ``develop`` class to error if attempting an editable install as root.\"\"\"\n\n def run(self):\n if _IS_ROOT and (not _IS_VIRTUALENV) and (not _IS_USER):\n raise RuntimeError(\n textwrap.dedent(\"\"\"\\\n When installing in editable mode as root outside of a virtual environment,\n please specify `--user`. Editable installs as the root user outside of a virtual environment\n do not work without the `--user` flag. Please instead run something like: `pip install --user -e .`\"\"\"\n ))\n super().run()\n\n\n# From https://github.com/pypa/pip/issues/7953#issuecomment-645133255\nsite.ENABLE_USER_SITE = _IS_USER\n\n\ndef package_files(prefix: str, directory: str, extension: str):\n \"\"\"Get all the files to package.\"\"\"\n # from https://stackoverflow.com/a/36693250\n paths = []\n for (path, _, filenames) in os.walk(os.path.join(prefix, directory)):\n for filename in filenames:\n if filename.endswith(extension):\n paths.append(os.path.relpath(os.path.join(path, filename), prefix))\n return paths\n\n\nwith open('README.md', 'r', encoding='utf-8') as fh:\n long_description = fh.read()\n\n# Hide the content between <!-- SETUPTOOLS_LONG_DESCRIPTION_HIDE_BEGIN --> and\n# <!-- SETUPTOOLS_LONG_DESCRIPTION_HIDE_END --> tags in the README\nwhile True:\n start_tag = '<!-- SETUPTOOLS_LONG_DESCRIPTION_HIDE_BEGIN -->'\n end_tag = '<!-- SETUPTOOLS_LONG_DESCRIPTION_HIDE_END -->'\n start = long_description.find(start_tag)\n end = long_description.find(end_tag)\n if start == -1:\n assert end == -1, 'there should be a balanced number of start and ends'\n break\n else:\n assert end != -1, 'there should be a balanced number of start and ends'\n long_description = long_description[:start] + long_description[end + len(end_tag):]\n\ninstall_requires = [\n 'pyyaml>=6.0,<7',\n 'tqdm>=4.62.3,<5',\n 'torchmetrics>=0.10.0,<0.11.4',\n 'torch_optimizer>=0.3.0,<0.4',\n 'torchvision>=0.11.0,<0.15',\n 'torch>=1.10.0,<1.14',\n 'requests>=2.26.0,<3',\n 'numpy>=1.21.5,<1.25.0',\n 'psutil>=5.8.0,<6',\n 'coolname>=1.1.0,<3',\n 'tabulate==0.9.0', # for auto-generating tables\n 'py-cpuinfo>=8.0.0,<10',\n 'packaging>=21.3.0,<23',\n 'importlib-metadata>=5.0.0,<7',\n]\nextra_deps = {}\n\nextra_deps['base'] = []\n\nextra_deps['dev'] = [\n # Imports for docs builds and running tests\n # Pinning versions strictly to avoid random test failures.\n # Should manually update dependency versions occassionally.\n 'custom_inherit==2.4.1',\n 'junitparser==2.8.0',\n 'coverage[toml]==7.2.2',\n 'fasteners==0.18', # object store tests require fasteners\n 'pytest==7.2.2',\n 'toml==0.10.2',\n 'ipython==8.11.0',\n 'ipykernel==6.20.1',\n 'jupyter==1.0.0',\n 'yamllint==1.30.0',\n 'recommonmark==0.7.1',\n 'sphinx==4.4.0',\n 'pre-commit>=2.18.1,<3',\n # embedding md in rst require docutils>=0.17. See\n # https://myst-parser.readthedocs.io/en/latest/sphinx/use.html?highlight=parser#include-markdown-files-into-an-rst-file\n 'docutils==0.17.1',\n 'sphinx_markdown_tables==0.0.17',\n 'sphinx-argparse==0.4.0',\n 'sphinxcontrib.katex==0.9.4',\n 'sphinxext.opengraph==0.7.4',\n 'sphinxemoji==0.2.0',\n 'furo==2022.9.29',\n 'sphinx-copybutton==0.5.0',\n 'testbook==0.4.2',\n 'myst-parser==0.16.1',\n 'sphinx_panels==0.6.0',\n 'sphinxcontrib-images==0.9.4',\n 'pytest_codeblocks==0.16.1',\n 'traitlets==5.9.0',\n 'nbsphinx==0.8.12',\n 'pandoc==2.3',\n 'pypandoc==1.11',\n 'GitPython==3.1.31',\n 'moto[s3]>=4.0.1,<5',\n 'mock-ssh-server==0.9.1',\n 'cryptography==38.0.4',\n 'pytest-httpserver>=1.0.4,<1.1',\n 'setuptools<=59.5.0',\n]\n\nextra_deps['health_checker'] = {\n 'pynvml>=11.5.0,<12',\n 'slack_sdk>=3.19.5,<4',\n}\n\nextra_deps['deepspeed'] = [\n 'deepspeed==0.7.7',\n]\n\nextra_deps['wandb'] = [\n 'wandb>=0.13.2,<0.14',\n]\n\nextra_deps['comet_ml'] = [\n 'comet_ml>=3.31.12,<4.0.0',\n]\n\nextra_deps['tensorboard'] = [\n 'tensorboard>=2.9.1,<3.0.0',\n]\n\nextra_deps['unet'] = [\n 'monai>=0.9.1,<1.2',\n 'scikit-learn>=1.0.1,<2',\n]\n\nextra_deps['vit'] = [\n 'vit_pytorch==0.35.8',\n]\n\nextra_deps['timm'] = [\n 'timm>=0.5.4,<0.6',\n]\n\nextra_deps['coco'] = [\n 'pycocotools>=2.0.4,<3',\n]\n\nextra_deps['nlp'] = [\n 'transformers>=4.11,<4.27.5',\n 'datasets>=2.4,<3',\n]\n\nextra_deps['sentencepiece'] = ['sentencepiece==0.1.97']\n\nextra_deps['mlperf'] = [\n # TODO: use pip when available: https://github.com/mlcommons/logging/issues/218\n # \"mlperf_logging @ git+https://github.com/mlperf/logging.git\",\n 'py-cpuinfo>=8.0.0,<10',\n]\n\nextra_deps['streaming'] = [\n 'mosaicml-streaming<0.4',\n 'boto3>=1.21.45,<2',\n 'paramiko>=2.11.0,<3',\n]\n\nextra_deps['libcloud'] = [\n 'apache-libcloud>=3.3.1,<4',\n]\n\nextra_deps['oci'] = [\n 'oci>=2.88.2,<3.0.0',\n]\n\nextra_deps['onnx'] = [\n 'onnx>=1.12.0,<2',\n 'onnxruntime>=1.12.1,<2',\n]\n\nextra_deps['mlflow'] = ['mlflow>=2.0.1,<3.0']\n\nextra_deps['all'] = set(dep for deps in extra_deps.values() for dep in deps)\n\ncomposer_data_files = ['py.typed']\ncomposer_data_files += package_files('composer', 'yamls', '.yaml')\ncomposer_data_files += package_files('composer', 'algorithms', '.json')\n\npackage_name = os.environ.get('COMPOSER_PACKAGE_NAME', 'mosaicml')\n\nif package_name != 'mosaicml':\n print(f'`Building composer as `{package_name}`)', file=sys.stderr)\n\nsetup(name=package_name,\n version=composer_version,\n author='MosaicML',\n author_email='[email protected]',\n description=('Composer is a PyTorch library that enables you to train ' +\n 'neural networks faster, at lower cost, and to higher accuracy.'),\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='https://github.com/mosaicml/composer',\n include_package_data=True,\n package_data={\n 'composer': composer_data_files,\n },\n packages=setuptools.find_packages(exclude=['docker*', 'examples*', 'scripts*', 'tests*']),\n classifiers=[\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Programming Language :: Python :: 3.10',\n ],\n install_requires=install_requires,\n entry_points={\n 'console_scripts': [\n 'composer = composer.cli.launcher:main',\n 'composer_collect_env = composer.utils.collect_env:main',\n ],\n },\n extras_require=extra_deps,\n dependency_links=['https://developer.download.nvidia.com/compute/redist'],\n python_requires='>=3.8',\n ext_package='composer',\n cmdclass={'develop': develop})\n\n# only visible if user installs with verbose -v flag\n# Printing to stdout as not to interfere with setup.py CLI flags (e.g. --version)\nprint('*' * 20, file=sys.stderr)\nprint(textwrap.dedent(\"\"\"\\\n NOTE: For best performance, we recommend installing Pillow-SIMD\n for accelerated image processing operations. To install:\n \\t pip uninstall pillow && pip install pillow-simd\"\"\"),\n file=sys.stderr)\nprint('*' * 20, file=sys.stderr)\n", "path": "setup.py"}]} | 3,842 | 161 |
gh_patches_debug_12219 | rasdani/github-patches | git_diff | open-telemetry__opentelemetry-python-1124 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Zipkin exporter must map status
As per spec: https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/trace/sdk_exporters/zipkin.md#status
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `exporter/opentelemetry-exporter-zipkin/src/opentelemetry/exporter/zipkin/__init__.py`
Content:
```
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """
16 This library allows to export tracing data to `Zipkin <https://zipkin.io/>`_.
17
18 Usage
19 -----
20
21 The **OpenTelemetry Zipkin Exporter** allows to export `OpenTelemetry`_ traces to `Zipkin`_.
22 This exporter always send traces to the configured Zipkin collector using HTTP.
23
24
25 .. _Zipkin: https://zipkin.io/
26 .. _OpenTelemetry: https://github.com/open-telemetry/opentelemetry-python/
27 .. _Specification: https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/sdk-environment-variables.md#zipkin-exporter
28
29 .. code:: python
30
31 from opentelemetry import trace
32 from opentelemetry.exporter import zipkin
33 from opentelemetry.sdk.trace import TracerProvider
34 from opentelemetry.sdk.trace.export import BatchExportSpanProcessor
35
36 trace.set_tracer_provider(TracerProvider())
37 tracer = trace.get_tracer(__name__)
38
39 # create a ZipkinSpanExporter
40 zipkin_exporter = zipkin.ZipkinSpanExporter(
41 service_name="my-helloworld-service",
42 # optional:
43 # url="http://localhost:9411/api/v2/spans",
44 # ipv4="",
45 # ipv6="",
46 # retry=False,
47 )
48
49 # Create a BatchExportSpanProcessor and add the exporter to it
50 span_processor = BatchExportSpanProcessor(zipkin_exporter)
51
52 # add to the tracer
53 trace.get_tracer_provider().add_span_processor(span_processor)
54
55 with tracer.start_as_current_span("foo"):
56 print("Hello world!")
57
58 The exporter supports endpoint configuration via the OTEL_EXPORTER_ZIPKIN_ENDPOINT environment variables as defined in the `Specification`_
59
60 API
61 ---
62 """
63
64 import json
65 import logging
66 import os
67 from typing import Optional, Sequence
68 from urllib.parse import urlparse
69
70 import requests
71
72 from opentelemetry.sdk.trace.export import SpanExporter, SpanExportResult
73 from opentelemetry.trace import Span, SpanContext, SpanKind
74
75 DEFAULT_RETRY = False
76 DEFAULT_URL = "http://localhost:9411/api/v2/spans"
77 ZIPKIN_HEADERS = {"Content-Type": "application/json"}
78
79 SPAN_KIND_MAP = {
80 SpanKind.INTERNAL: None,
81 SpanKind.SERVER: "SERVER",
82 SpanKind.CLIENT: "CLIENT",
83 SpanKind.PRODUCER: "PRODUCER",
84 SpanKind.CONSUMER: "CONSUMER",
85 }
86
87 SUCCESS_STATUS_CODES = (200, 202)
88
89 logger = logging.getLogger(__name__)
90
91
92 class ZipkinSpanExporter(SpanExporter):
93 """Zipkin span exporter for OpenTelemetry.
94
95 Args:
96 service_name: Service that logged an annotation in a trace.Classifier
97 when query for spans.
98 url: The Zipkin endpoint URL
99 ipv4: Primary IPv4 address associated with this connection.
100 ipv6: Primary IPv6 address associated with this connection.
101 retry: Set to True to configure the exporter to retry on failure.
102 """
103
104 def __init__(
105 self,
106 service_name: str,
107 url: str = None,
108 ipv4: Optional[str] = None,
109 ipv6: Optional[str] = None,
110 retry: Optional[str] = DEFAULT_RETRY,
111 ):
112 self.service_name = service_name
113 if url is None:
114 self.url = os.environ.get(
115 "OTEL_EXPORTER_ZIPKIN_ENDPOINT", DEFAULT_URL
116 )
117 else:
118 self.url = url
119
120 self.port = urlparse(self.url).port
121
122 self.ipv4 = ipv4
123 self.ipv6 = ipv6
124 self.retry = retry
125
126 def export(self, spans: Sequence[Span]) -> SpanExportResult:
127 zipkin_spans = self._translate_to_zipkin(spans)
128 result = requests.post(
129 url=self.url, data=json.dumps(zipkin_spans), headers=ZIPKIN_HEADERS
130 )
131
132 if result.status_code not in SUCCESS_STATUS_CODES:
133 logger.error(
134 "Traces cannot be uploaded; status code: %s, message %s",
135 result.status_code,
136 result.text,
137 )
138
139 if self.retry:
140 return SpanExportResult.FAILURE
141 return SpanExportResult.FAILURE
142 return SpanExportResult.SUCCESS
143
144 def _translate_to_zipkin(self, spans: Sequence[Span]):
145
146 local_endpoint = {"serviceName": self.service_name, "port": self.port}
147
148 if self.ipv4 is not None:
149 local_endpoint["ipv4"] = self.ipv4
150
151 if self.ipv6 is not None:
152 local_endpoint["ipv6"] = self.ipv6
153
154 zipkin_spans = []
155 for span in spans:
156 context = span.get_context()
157 trace_id = context.trace_id
158 span_id = context.span_id
159
160 # Timestamp in zipkin spans is int of microseconds.
161 # see: https://zipkin.io/pages/instrumenting.html
162 start_timestamp_mus = _nsec_to_usec_round(span.start_time)
163 duration_mus = _nsec_to_usec_round(span.end_time - span.start_time)
164
165 zipkin_span = {
166 # Ensure left-zero-padding of traceId, spanId, parentId
167 "traceId": format(trace_id, "032x"),
168 "id": format(span_id, "016x"),
169 "name": span.name,
170 "timestamp": start_timestamp_mus,
171 "duration": duration_mus,
172 "localEndpoint": local_endpoint,
173 "kind": SPAN_KIND_MAP[span.kind],
174 "tags": _extract_tags_from_span(span),
175 "annotations": _extract_annotations_from_events(span.events),
176 }
177
178 if span.instrumentation_info is not None:
179 zipkin_span["tags"][
180 "otel.instrumentation_library.name"
181 ] = span.instrumentation_info.name
182 zipkin_span["tags"][
183 "otel.instrumentation_library.version"
184 ] = span.instrumentation_info.version
185
186 if context.trace_flags.sampled:
187 zipkin_span["debug"] = True
188
189 if isinstance(span.parent, Span):
190 zipkin_span["parentId"] = format(
191 span.parent.get_context().span_id, "016x"
192 )
193 elif isinstance(span.parent, SpanContext):
194 zipkin_span["parentId"] = format(span.parent.span_id, "016x")
195
196 zipkin_spans.append(zipkin_span)
197 return zipkin_spans
198
199 def shutdown(self) -> None:
200 pass
201
202
203 def _extract_tags_from_dict(tags_dict):
204 tags = {}
205 if not tags_dict:
206 return tags
207 for attribute_key, attribute_value in tags_dict.items():
208 if isinstance(attribute_value, (int, bool, float)):
209 value = str(attribute_value)
210 elif isinstance(attribute_value, str):
211 value = attribute_value[:128]
212 else:
213 logger.warning("Could not serialize tag %s", attribute_key)
214 continue
215 tags[attribute_key] = value
216 return tags
217
218
219 def _extract_tags_from_span(span: Span):
220 tags = _extract_tags_from_dict(getattr(span, "attributes", None))
221 if span.resource:
222 tags.update(_extract_tags_from_dict(span.resource.attributes))
223 return tags
224
225
226 def _extract_annotations_from_events(events):
227 return (
228 [
229 {"timestamp": _nsec_to_usec_round(e.timestamp), "value": e.name}
230 for e in events
231 ]
232 if events
233 else None
234 )
235
236
237 def _nsec_to_usec_round(nsec):
238 """Round nanoseconds to microseconds"""
239 return (nsec + 500) // 10 ** 3
240
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/exporter/opentelemetry-exporter-zipkin/src/opentelemetry/exporter/zipkin/__init__.py b/exporter/opentelemetry-exporter-zipkin/src/opentelemetry/exporter/zipkin/__init__.py
--- a/exporter/opentelemetry-exporter-zipkin/src/opentelemetry/exporter/zipkin/__init__.py
+++ b/exporter/opentelemetry-exporter-zipkin/src/opentelemetry/exporter/zipkin/__init__.py
@@ -183,6 +183,15 @@
"otel.instrumentation_library.version"
] = span.instrumentation_info.version
+ if span.status is not None:
+ zipkin_span["tags"][
+ "otel.status_code"
+ ] = span.status.canonical_code.value
+ if span.status.description is not None:
+ zipkin_span["tags"][
+ "otel.status_description"
+ ] = span.status.description
+
if context.trace_flags.sampled:
zipkin_span["debug"] = True
| {"golden_diff": "diff --git a/exporter/opentelemetry-exporter-zipkin/src/opentelemetry/exporter/zipkin/__init__.py b/exporter/opentelemetry-exporter-zipkin/src/opentelemetry/exporter/zipkin/__init__.py\n--- a/exporter/opentelemetry-exporter-zipkin/src/opentelemetry/exporter/zipkin/__init__.py\n+++ b/exporter/opentelemetry-exporter-zipkin/src/opentelemetry/exporter/zipkin/__init__.py\n@@ -183,6 +183,15 @@\n \"otel.instrumentation_library.version\"\n ] = span.instrumentation_info.version\n \n+ if span.status is not None:\n+ zipkin_span[\"tags\"][\n+ \"otel.status_code\"\n+ ] = span.status.canonical_code.value\n+ if span.status.description is not None:\n+ zipkin_span[\"tags\"][\n+ \"otel.status_description\"\n+ ] = span.status.description\n+\n if context.trace_flags.sampled:\n zipkin_span[\"debug\"] = True\n", "issue": "Zipkin exporter must map status\nAs per spec: https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/trace/sdk_exporters/zipkin.md#status\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThis library allows to export tracing data to `Zipkin <https://zipkin.io/>`_.\n\nUsage\n-----\n\nThe **OpenTelemetry Zipkin Exporter** allows to export `OpenTelemetry`_ traces to `Zipkin`_.\nThis exporter always send traces to the configured Zipkin collector using HTTP.\n\n\n.. _Zipkin: https://zipkin.io/\n.. _OpenTelemetry: https://github.com/open-telemetry/opentelemetry-python/\n.. _Specification: https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/sdk-environment-variables.md#zipkin-exporter\n\n.. code:: python\n\n from opentelemetry import trace\n from opentelemetry.exporter import zipkin\n from opentelemetry.sdk.trace import TracerProvider\n from opentelemetry.sdk.trace.export import BatchExportSpanProcessor\n\n trace.set_tracer_provider(TracerProvider())\n tracer = trace.get_tracer(__name__)\n\n # create a ZipkinSpanExporter\n zipkin_exporter = zipkin.ZipkinSpanExporter(\n service_name=\"my-helloworld-service\",\n # optional:\n # url=\"http://localhost:9411/api/v2/spans\",\n # ipv4=\"\",\n # ipv6=\"\",\n # retry=False,\n )\n\n # Create a BatchExportSpanProcessor and add the exporter to it\n span_processor = BatchExportSpanProcessor(zipkin_exporter)\n\n # add to the tracer\n trace.get_tracer_provider().add_span_processor(span_processor)\n\n with tracer.start_as_current_span(\"foo\"):\n print(\"Hello world!\")\n\nThe exporter supports endpoint configuration via the OTEL_EXPORTER_ZIPKIN_ENDPOINT environment variables as defined in the `Specification`_\n\nAPI\n---\n\"\"\"\n\nimport json\nimport logging\nimport os\nfrom typing import Optional, Sequence\nfrom urllib.parse import urlparse\n\nimport requests\n\nfrom opentelemetry.sdk.trace.export import SpanExporter, SpanExportResult\nfrom opentelemetry.trace import Span, SpanContext, SpanKind\n\nDEFAULT_RETRY = False\nDEFAULT_URL = \"http://localhost:9411/api/v2/spans\"\nZIPKIN_HEADERS = {\"Content-Type\": \"application/json\"}\n\nSPAN_KIND_MAP = {\n SpanKind.INTERNAL: None,\n SpanKind.SERVER: \"SERVER\",\n SpanKind.CLIENT: \"CLIENT\",\n SpanKind.PRODUCER: \"PRODUCER\",\n SpanKind.CONSUMER: \"CONSUMER\",\n}\n\nSUCCESS_STATUS_CODES = (200, 202)\n\nlogger = logging.getLogger(__name__)\n\n\nclass ZipkinSpanExporter(SpanExporter):\n \"\"\"Zipkin span exporter for OpenTelemetry.\n\n Args:\n service_name: Service that logged an annotation in a trace.Classifier\n when query for spans.\n url: The Zipkin endpoint URL\n ipv4: Primary IPv4 address associated with this connection.\n ipv6: Primary IPv6 address associated with this connection.\n retry: Set to True to configure the exporter to retry on failure.\n \"\"\"\n\n def __init__(\n self,\n service_name: str,\n url: str = None,\n ipv4: Optional[str] = None,\n ipv6: Optional[str] = None,\n retry: Optional[str] = DEFAULT_RETRY,\n ):\n self.service_name = service_name\n if url is None:\n self.url = os.environ.get(\n \"OTEL_EXPORTER_ZIPKIN_ENDPOINT\", DEFAULT_URL\n )\n else:\n self.url = url\n\n self.port = urlparse(self.url).port\n\n self.ipv4 = ipv4\n self.ipv6 = ipv6\n self.retry = retry\n\n def export(self, spans: Sequence[Span]) -> SpanExportResult:\n zipkin_spans = self._translate_to_zipkin(spans)\n result = requests.post(\n url=self.url, data=json.dumps(zipkin_spans), headers=ZIPKIN_HEADERS\n )\n\n if result.status_code not in SUCCESS_STATUS_CODES:\n logger.error(\n \"Traces cannot be uploaded; status code: %s, message %s\",\n result.status_code,\n result.text,\n )\n\n if self.retry:\n return SpanExportResult.FAILURE\n return SpanExportResult.FAILURE\n return SpanExportResult.SUCCESS\n\n def _translate_to_zipkin(self, spans: Sequence[Span]):\n\n local_endpoint = {\"serviceName\": self.service_name, \"port\": self.port}\n\n if self.ipv4 is not None:\n local_endpoint[\"ipv4\"] = self.ipv4\n\n if self.ipv6 is not None:\n local_endpoint[\"ipv6\"] = self.ipv6\n\n zipkin_spans = []\n for span in spans:\n context = span.get_context()\n trace_id = context.trace_id\n span_id = context.span_id\n\n # Timestamp in zipkin spans is int of microseconds.\n # see: https://zipkin.io/pages/instrumenting.html\n start_timestamp_mus = _nsec_to_usec_round(span.start_time)\n duration_mus = _nsec_to_usec_round(span.end_time - span.start_time)\n\n zipkin_span = {\n # Ensure left-zero-padding of traceId, spanId, parentId\n \"traceId\": format(trace_id, \"032x\"),\n \"id\": format(span_id, \"016x\"),\n \"name\": span.name,\n \"timestamp\": start_timestamp_mus,\n \"duration\": duration_mus,\n \"localEndpoint\": local_endpoint,\n \"kind\": SPAN_KIND_MAP[span.kind],\n \"tags\": _extract_tags_from_span(span),\n \"annotations\": _extract_annotations_from_events(span.events),\n }\n\n if span.instrumentation_info is not None:\n zipkin_span[\"tags\"][\n \"otel.instrumentation_library.name\"\n ] = span.instrumentation_info.name\n zipkin_span[\"tags\"][\n \"otel.instrumentation_library.version\"\n ] = span.instrumentation_info.version\n\n if context.trace_flags.sampled:\n zipkin_span[\"debug\"] = True\n\n if isinstance(span.parent, Span):\n zipkin_span[\"parentId\"] = format(\n span.parent.get_context().span_id, \"016x\"\n )\n elif isinstance(span.parent, SpanContext):\n zipkin_span[\"parentId\"] = format(span.parent.span_id, \"016x\")\n\n zipkin_spans.append(zipkin_span)\n return zipkin_spans\n\n def shutdown(self) -> None:\n pass\n\n\ndef _extract_tags_from_dict(tags_dict):\n tags = {}\n if not tags_dict:\n return tags\n for attribute_key, attribute_value in tags_dict.items():\n if isinstance(attribute_value, (int, bool, float)):\n value = str(attribute_value)\n elif isinstance(attribute_value, str):\n value = attribute_value[:128]\n else:\n logger.warning(\"Could not serialize tag %s\", attribute_key)\n continue\n tags[attribute_key] = value\n return tags\n\n\ndef _extract_tags_from_span(span: Span):\n tags = _extract_tags_from_dict(getattr(span, \"attributes\", None))\n if span.resource:\n tags.update(_extract_tags_from_dict(span.resource.attributes))\n return tags\n\n\ndef _extract_annotations_from_events(events):\n return (\n [\n {\"timestamp\": _nsec_to_usec_round(e.timestamp), \"value\": e.name}\n for e in events\n ]\n if events\n else None\n )\n\n\ndef _nsec_to_usec_round(nsec):\n \"\"\"Round nanoseconds to microseconds\"\"\"\n return (nsec + 500) // 10 ** 3\n", "path": "exporter/opentelemetry-exporter-zipkin/src/opentelemetry/exporter/zipkin/__init__.py"}], "after_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThis library allows to export tracing data to `Zipkin <https://zipkin.io/>`_.\n\nUsage\n-----\n\nThe **OpenTelemetry Zipkin Exporter** allows to export `OpenTelemetry`_ traces to `Zipkin`_.\nThis exporter always send traces to the configured Zipkin collector using HTTP.\n\n\n.. _Zipkin: https://zipkin.io/\n.. _OpenTelemetry: https://github.com/open-telemetry/opentelemetry-python/\n.. _Specification: https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/sdk-environment-variables.md#zipkin-exporter\n\n.. code:: python\n\n from opentelemetry import trace\n from opentelemetry.exporter import zipkin\n from opentelemetry.sdk.trace import TracerProvider\n from opentelemetry.sdk.trace.export import BatchExportSpanProcessor\n\n trace.set_tracer_provider(TracerProvider())\n tracer = trace.get_tracer(__name__)\n\n # create a ZipkinSpanExporter\n zipkin_exporter = zipkin.ZipkinSpanExporter(\n service_name=\"my-helloworld-service\",\n # optional:\n # url=\"http://localhost:9411/api/v2/spans\",\n # ipv4=\"\",\n # ipv6=\"\",\n # retry=False,\n )\n\n # Create a BatchExportSpanProcessor and add the exporter to it\n span_processor = BatchExportSpanProcessor(zipkin_exporter)\n\n # add to the tracer\n trace.get_tracer_provider().add_span_processor(span_processor)\n\n with tracer.start_as_current_span(\"foo\"):\n print(\"Hello world!\")\n\nThe exporter supports endpoint configuration via the OTEL_EXPORTER_ZIPKIN_ENDPOINT environment variables as defined in the `Specification`_\n\nAPI\n---\n\"\"\"\n\nimport json\nimport logging\nimport os\nfrom typing import Optional, Sequence\nfrom urllib.parse import urlparse\n\nimport requests\n\nfrom opentelemetry.sdk.trace.export import SpanExporter, SpanExportResult\nfrom opentelemetry.trace import Span, SpanContext, SpanKind\n\nDEFAULT_RETRY = False\nDEFAULT_URL = \"http://localhost:9411/api/v2/spans\"\nZIPKIN_HEADERS = {\"Content-Type\": \"application/json\"}\n\nSPAN_KIND_MAP = {\n SpanKind.INTERNAL: None,\n SpanKind.SERVER: \"SERVER\",\n SpanKind.CLIENT: \"CLIENT\",\n SpanKind.PRODUCER: \"PRODUCER\",\n SpanKind.CONSUMER: \"CONSUMER\",\n}\n\nSUCCESS_STATUS_CODES = (200, 202)\n\nlogger = logging.getLogger(__name__)\n\n\nclass ZipkinSpanExporter(SpanExporter):\n \"\"\"Zipkin span exporter for OpenTelemetry.\n\n Args:\n service_name: Service that logged an annotation in a trace.Classifier\n when query for spans.\n url: The Zipkin endpoint URL\n ipv4: Primary IPv4 address associated with this connection.\n ipv6: Primary IPv6 address associated with this connection.\n retry: Set to True to configure the exporter to retry on failure.\n \"\"\"\n\n def __init__(\n self,\n service_name: str,\n url: str = None,\n ipv4: Optional[str] = None,\n ipv6: Optional[str] = None,\n retry: Optional[str] = DEFAULT_RETRY,\n ):\n self.service_name = service_name\n if url is None:\n self.url = os.environ.get(\n \"OTEL_EXPORTER_ZIPKIN_ENDPOINT\", DEFAULT_URL\n )\n else:\n self.url = url\n\n self.port = urlparse(self.url).port\n\n self.ipv4 = ipv4\n self.ipv6 = ipv6\n self.retry = retry\n\n def export(self, spans: Sequence[Span]) -> SpanExportResult:\n zipkin_spans = self._translate_to_zipkin(spans)\n result = requests.post(\n url=self.url, data=json.dumps(zipkin_spans), headers=ZIPKIN_HEADERS\n )\n\n if result.status_code not in SUCCESS_STATUS_CODES:\n logger.error(\n \"Traces cannot be uploaded; status code: %s, message %s\",\n result.status_code,\n result.text,\n )\n\n if self.retry:\n return SpanExportResult.FAILURE\n return SpanExportResult.FAILURE\n return SpanExportResult.SUCCESS\n\n def _translate_to_zipkin(self, spans: Sequence[Span]):\n\n local_endpoint = {\"serviceName\": self.service_name, \"port\": self.port}\n\n if self.ipv4 is not None:\n local_endpoint[\"ipv4\"] = self.ipv4\n\n if self.ipv6 is not None:\n local_endpoint[\"ipv6\"] = self.ipv6\n\n zipkin_spans = []\n for span in spans:\n context = span.get_context()\n trace_id = context.trace_id\n span_id = context.span_id\n\n # Timestamp in zipkin spans is int of microseconds.\n # see: https://zipkin.io/pages/instrumenting.html\n start_timestamp_mus = _nsec_to_usec_round(span.start_time)\n duration_mus = _nsec_to_usec_round(span.end_time - span.start_time)\n\n zipkin_span = {\n # Ensure left-zero-padding of traceId, spanId, parentId\n \"traceId\": format(trace_id, \"032x\"),\n \"id\": format(span_id, \"016x\"),\n \"name\": span.name,\n \"timestamp\": start_timestamp_mus,\n \"duration\": duration_mus,\n \"localEndpoint\": local_endpoint,\n \"kind\": SPAN_KIND_MAP[span.kind],\n \"tags\": _extract_tags_from_span(span),\n \"annotations\": _extract_annotations_from_events(span.events),\n }\n\n if span.instrumentation_info is not None:\n zipkin_span[\"tags\"][\n \"otel.instrumentation_library.name\"\n ] = span.instrumentation_info.name\n zipkin_span[\"tags\"][\n \"otel.instrumentation_library.version\"\n ] = span.instrumentation_info.version\n\n if span.status is not None:\n zipkin_span[\"tags\"][\n \"otel.status_code\"\n ] = span.status.canonical_code.value\n if span.status.description is not None:\n zipkin_span[\"tags\"][\n \"otel.status_description\"\n ] = span.status.description\n\n if context.trace_flags.sampled:\n zipkin_span[\"debug\"] = True\n\n if isinstance(span.parent, Span):\n zipkin_span[\"parentId\"] = format(\n span.parent.get_context().span_id, \"016x\"\n )\n elif isinstance(span.parent, SpanContext):\n zipkin_span[\"parentId\"] = format(span.parent.span_id, \"016x\")\n\n zipkin_spans.append(zipkin_span)\n return zipkin_spans\n\n def shutdown(self) -> None:\n pass\n\n\ndef _extract_tags_from_dict(tags_dict):\n tags = {}\n if not tags_dict:\n return tags\n for attribute_key, attribute_value in tags_dict.items():\n if isinstance(attribute_value, (int, bool, float)):\n value = str(attribute_value)\n elif isinstance(attribute_value, str):\n value = attribute_value[:128]\n else:\n logger.warning(\"Could not serialize tag %s\", attribute_key)\n continue\n tags[attribute_key] = value\n return tags\n\n\ndef _extract_tags_from_span(span: Span):\n tags = _extract_tags_from_dict(getattr(span, \"attributes\", None))\n if span.resource:\n tags.update(_extract_tags_from_dict(span.resource.attributes))\n return tags\n\n\ndef _extract_annotations_from_events(events):\n return (\n [\n {\"timestamp\": _nsec_to_usec_round(e.timestamp), \"value\": e.name}\n for e in events\n ]\n if events\n else None\n )\n\n\ndef _nsec_to_usec_round(nsec):\n \"\"\"Round nanoseconds to microseconds\"\"\"\n return (nsec + 500) // 10 ** 3\n", "path": "exporter/opentelemetry-exporter-zipkin/src/opentelemetry/exporter/zipkin/__init__.py"}]} | 2,697 | 225 |
gh_patches_debug_4050 | rasdani/github-patches | git_diff | ocf__ocfweb-553 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
fix histogram on printing page
The user histogram is broken on ocf.io/stats/printing. The culprit is https://github.com/ocf/ocfweb/blob/ed143b8f1c59e58157780007fe5fd104ee18d944/ocfweb/stats/printing.py#L58
We should use `SEMESTERLY_QUOTA + 5` instead of 105.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ocfweb/stats/printing.py`
Content:
```
1 import time
2 from collections import defaultdict
3 from datetime import date
4 from datetime import timedelta
5 from functools import partial
6
7 from django.http import HttpResponse
8 from django.shortcuts import render
9 from matplotlib.figure import Figure
10 from ocflib.lab import stats
11 from ocflib.printing.printers import PRINTERS
12 from ocflib.printing.quota import get_connection
13 from ocflib.printing.quota import SEMESTERLY_QUOTA
14
15 from ocfweb.caching import periodic
16 from ocfweb.component.graph import plot_to_image_bytes
17
18
19 ALL_PRINTERS = ('papercut', 'pagefault', 'logjam', 'logjam-old', 'deforestation')
20 ACTIVE_PRINTERS = ('papercut', 'pagefault', 'logjam')
21
22
23 def stats_printing(request):
24 return render(
25 request,
26 'stats/printing.html',
27 {
28 'title': 'Printing Statistics',
29 'current_printers': PRINTERS,
30 'toner_changes': _toner_changes(),
31 'last_month': [
32 date.today() - timedelta(days=i)
33 for i in range(30)
34 ],
35 'pages_per_day': _pages_per_day(),
36 },
37 )
38
39
40 def semester_histogram(request):
41 return HttpResponse(
42 plot_to_image_bytes(_semester_histogram(), format='svg'),
43 content_type='image/svg+xml',
44 )
45
46
47 @periodic(300)
48 def _semester_histogram():
49 with get_connection() as c:
50 c.execute(
51 'SELECT `user`, `semester` FROM `printed` WHERE `semester` > 0',
52 )
53 users = [SEMESTERLY_QUOTA - int(r['semester']) for r in c]
54
55 fig = Figure(figsize=(10, 5))
56 ax = fig.add_subplot(1, 1, 1)
57 ax.locator_params(nbins=20)
58 ax.hist(users, bins=list(range(0, 105, 5)))
59 ax.grid(True)
60 ax.set_xlim(SEMESTERLY_QUOTA, 0)
61 ax.set_ylabel('Number of users')
62 ax.set_xlabel('Remaining balance')
63 ax.set_title('Remaining balances this semester')
64
65 return fig
66
67
68 @periodic(3600)
69 def _toner_changes():
70 return [
71 (
72 printer,
73 _toner_used_by_printer(printer),
74 )
75 for printer in ACTIVE_PRINTERS
76 ]
77
78
79 def _toner_used_by_printer(printer, cutoff=.05, since=None):
80 """Returns toner used for a printer since a given date (by default it
81 returns toner used for this semester).
82
83 Toner numbers can be significantly noisy, including significant diffs
84 whenever toner gets taken out and put back in whenever there is a jam.
85 Because of this it's hard to determine if a new toner is inserted into a
86 printer or if it was the same toner again. To reduce this noise we only
87 count diffs that are smaller than a cutoff which empirically seems to be
88 more accurate.
89 """
90 if not since:
91 since = stats.current_semester_start()
92
93 with stats.get_connection() as cursor:
94 cursor.execute(
95 '''
96 CREATE TEMPORARY TABLE ordered1
97 (PRIMARY KEY (position))
98 AS (
99 SELECT * FROM (
100 SELECT
101 T.*,
102 @rownum := @rownum + 1 AS position
103 FROM (
104 (
105 SELECT * FROM printer_toner_public
106 WHERE printer = %s AND
107 date > %s
108 ORDER BY date
109 ) AS T,
110 (SELECT @rownum := 0) AS r
111 )
112 ) AS x
113 )
114 ''', (printer, since.strftime('%Y-%m-%d')),
115 )
116 cursor.execute('''
117 CREATE TEMPORARY TABLE ordered2
118 (PRIMARY KEY (position))
119 AS (SELECT * FROM ordered1)
120 ''')
121 cursor.execute('''
122 CREATE TEMPORARY TABLE diffs
123 AS (SELECT
124 B.date AS date,
125 A.value/A.max - B.value/B.max as pct_diff
126 FROM
127 ordered1 as A,
128 ordered2 as B
129 WHERE
130 B.position = A.position + 1)
131 ''')
132 cursor.execute(
133 '''
134 SELECT SUM(pct_diff) as toner_used
135 FROM
136 diffs
137 WHERE
138 ABS(pct_diff)<%s
139 ''', (cutoff,),
140 )
141 result = cursor.fetchone()['toner_used']
142 return float(result or 0.0)
143
144
145 @periodic(120)
146 def _pages_per_day():
147 with stats.get_connection() as cursor:
148 cursor.execute('''
149 SELECT max(value) as value, cast(date as date) as date, printer
150 FROM printer_pages_public
151 GROUP BY cast(date as date), printer
152 ORDER BY date ASC, printer ASC
153 ''')
154
155 # Resolves the issue of possible missing dates.
156 # defaultdict(lambda: defaultdict(int)) doesn't work due to inability to pickle local objects like lambdas;
157 # this effectively does the same thing as that.
158 pages_printed = defaultdict(partial(defaultdict, int))
159 last_seen = {}
160
161 for row in cursor:
162 if row['printer'] in last_seen:
163 pages_printed.setdefault(row['date'], defaultdict(int))
164 pages_printed[row['date']][row['printer']] = (
165 row['value'] - last_seen[row['printer']]
166 )
167 last_seen[row['printer']] = row['value']
168
169 return pages_printed
170
171
172 def _pages_printed_for_printer(printer, resolution=100):
173 with stats.get_connection() as cursor:
174 cursor.execute(
175 '''
176 SELECT Z.date, Z.value FROM (
177 SELECT
178 T.*,
179 @rownum := @rownum + 1 AS position
180 FROM (
181 (
182 SELECT * FROM printer_pages_public
183 WHERE printer = %s
184 ORDER BY date
185 ) AS T,
186 (SELECT @rownum := 0) AS r
187 )
188 ) as Z
189 WHERE Z.position mod %s = 0
190 ''', (printer, resolution),
191 )
192 return [
193 (time.mktime(row['date'].timetuple()) * 1000, row['value'])
194 for row in cursor
195 ]
196
197
198 @periodic(3600)
199 def _pages_printed_data():
200 return [
201 {
202 'name': printer,
203 'animation': False,
204 'data': _pages_printed_for_printer(printer),
205 }
206 for printer in ALL_PRINTERS
207 ]
208
209
210 def pages_printed(request):
211 return render(
212 request,
213 'stats/printing/pages-printed.html',
214 {
215 'title': 'Pages Printed',
216 'data': _pages_printed_data(),
217 },
218 )
219
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ocfweb/stats/printing.py b/ocfweb/stats/printing.py
--- a/ocfweb/stats/printing.py
+++ b/ocfweb/stats/printing.py
@@ -55,7 +55,7 @@
fig = Figure(figsize=(10, 5))
ax = fig.add_subplot(1, 1, 1)
ax.locator_params(nbins=20)
- ax.hist(users, bins=list(range(0, 105, 5)))
+ ax.hist(users, bins=list(range(0, SEMESTERLY_QUOTA + 5, 5)))
ax.grid(True)
ax.set_xlim(SEMESTERLY_QUOTA, 0)
ax.set_ylabel('Number of users')
| {"golden_diff": "diff --git a/ocfweb/stats/printing.py b/ocfweb/stats/printing.py\n--- a/ocfweb/stats/printing.py\n+++ b/ocfweb/stats/printing.py\n@@ -55,7 +55,7 @@\n fig = Figure(figsize=(10, 5))\n ax = fig.add_subplot(1, 1, 1)\n ax.locator_params(nbins=20)\n- ax.hist(users, bins=list(range(0, 105, 5)))\n+ ax.hist(users, bins=list(range(0, SEMESTERLY_QUOTA + 5, 5)))\n ax.grid(True)\n ax.set_xlim(SEMESTERLY_QUOTA, 0)\n ax.set_ylabel('Number of users')\n", "issue": "fix histogram on printing page\nThe user histogram is broken on ocf.io/stats/printing. The culprit is https://github.com/ocf/ocfweb/blob/ed143b8f1c59e58157780007fe5fd104ee18d944/ocfweb/stats/printing.py#L58\r\nWe should use `SEMESTERLY_QUOTA + 5` instead of 105.\n", "before_files": [{"content": "import time\nfrom collections import defaultdict\nfrom datetime import date\nfrom datetime import timedelta\nfrom functools import partial\n\nfrom django.http import HttpResponse\nfrom django.shortcuts import render\nfrom matplotlib.figure import Figure\nfrom ocflib.lab import stats\nfrom ocflib.printing.printers import PRINTERS\nfrom ocflib.printing.quota import get_connection\nfrom ocflib.printing.quota import SEMESTERLY_QUOTA\n\nfrom ocfweb.caching import periodic\nfrom ocfweb.component.graph import plot_to_image_bytes\n\n\nALL_PRINTERS = ('papercut', 'pagefault', 'logjam', 'logjam-old', 'deforestation')\nACTIVE_PRINTERS = ('papercut', 'pagefault', 'logjam')\n\n\ndef stats_printing(request):\n return render(\n request,\n 'stats/printing.html',\n {\n 'title': 'Printing Statistics',\n 'current_printers': PRINTERS,\n 'toner_changes': _toner_changes(),\n 'last_month': [\n date.today() - timedelta(days=i)\n for i in range(30)\n ],\n 'pages_per_day': _pages_per_day(),\n },\n )\n\n\ndef semester_histogram(request):\n return HttpResponse(\n plot_to_image_bytes(_semester_histogram(), format='svg'),\n content_type='image/svg+xml',\n )\n\n\n@periodic(300)\ndef _semester_histogram():\n with get_connection() as c:\n c.execute(\n 'SELECT `user`, `semester` FROM `printed` WHERE `semester` > 0',\n )\n users = [SEMESTERLY_QUOTA - int(r['semester']) for r in c]\n\n fig = Figure(figsize=(10, 5))\n ax = fig.add_subplot(1, 1, 1)\n ax.locator_params(nbins=20)\n ax.hist(users, bins=list(range(0, 105, 5)))\n ax.grid(True)\n ax.set_xlim(SEMESTERLY_QUOTA, 0)\n ax.set_ylabel('Number of users')\n ax.set_xlabel('Remaining balance')\n ax.set_title('Remaining balances this semester')\n\n return fig\n\n\n@periodic(3600)\ndef _toner_changes():\n return [\n (\n printer,\n _toner_used_by_printer(printer),\n )\n for printer in ACTIVE_PRINTERS\n ]\n\n\ndef _toner_used_by_printer(printer, cutoff=.05, since=None):\n \"\"\"Returns toner used for a printer since a given date (by default it\n returns toner used for this semester).\n\n Toner numbers can be significantly noisy, including significant diffs\n whenever toner gets taken out and put back in whenever there is a jam.\n Because of this it's hard to determine if a new toner is inserted into a\n printer or if it was the same toner again. To reduce this noise we only\n count diffs that are smaller than a cutoff which empirically seems to be\n more accurate.\n \"\"\"\n if not since:\n since = stats.current_semester_start()\n\n with stats.get_connection() as cursor:\n cursor.execute(\n '''\n CREATE TEMPORARY TABLE ordered1\n (PRIMARY KEY (position))\n AS (\n SELECT * FROM (\n SELECT\n T.*,\n @rownum := @rownum + 1 AS position\n FROM (\n (\n SELECT * FROM printer_toner_public\n WHERE printer = %s AND\n date > %s\n ORDER BY date\n ) AS T,\n (SELECT @rownum := 0) AS r\n )\n ) AS x\n )\n ''', (printer, since.strftime('%Y-%m-%d')),\n )\n cursor.execute('''\n CREATE TEMPORARY TABLE ordered2\n (PRIMARY KEY (position))\n AS (SELECT * FROM ordered1)\n ''')\n cursor.execute('''\n CREATE TEMPORARY TABLE diffs\n AS (SELECT\n B.date AS date,\n A.value/A.max - B.value/B.max as pct_diff\n FROM\n ordered1 as A,\n ordered2 as B\n WHERE\n B.position = A.position + 1)\n ''')\n cursor.execute(\n '''\n SELECT SUM(pct_diff) as toner_used\n FROM\n diffs\n WHERE\n ABS(pct_diff)<%s\n ''', (cutoff,),\n )\n result = cursor.fetchone()['toner_used']\n return float(result or 0.0)\n\n\n@periodic(120)\ndef _pages_per_day():\n with stats.get_connection() as cursor:\n cursor.execute('''\n SELECT max(value) as value, cast(date as date) as date, printer\n FROM printer_pages_public\n GROUP BY cast(date as date), printer\n ORDER BY date ASC, printer ASC\n ''')\n\n # Resolves the issue of possible missing dates.\n # defaultdict(lambda: defaultdict(int)) doesn't work due to inability to pickle local objects like lambdas;\n # this effectively does the same thing as that.\n pages_printed = defaultdict(partial(defaultdict, int))\n last_seen = {}\n\n for row in cursor:\n if row['printer'] in last_seen:\n pages_printed.setdefault(row['date'], defaultdict(int))\n pages_printed[row['date']][row['printer']] = (\n row['value'] - last_seen[row['printer']]\n )\n last_seen[row['printer']] = row['value']\n\n return pages_printed\n\n\ndef _pages_printed_for_printer(printer, resolution=100):\n with stats.get_connection() as cursor:\n cursor.execute(\n '''\n SELECT Z.date, Z.value FROM (\n SELECT\n T.*,\n @rownum := @rownum + 1 AS position\n FROM (\n (\n SELECT * FROM printer_pages_public\n WHERE printer = %s\n ORDER BY date\n ) AS T,\n (SELECT @rownum := 0) AS r\n )\n ) as Z\n WHERE Z.position mod %s = 0\n ''', (printer, resolution),\n )\n return [\n (time.mktime(row['date'].timetuple()) * 1000, row['value'])\n for row in cursor\n ]\n\n\n@periodic(3600)\ndef _pages_printed_data():\n return [\n {\n 'name': printer,\n 'animation': False,\n 'data': _pages_printed_for_printer(printer),\n }\n for printer in ALL_PRINTERS\n ]\n\n\ndef pages_printed(request):\n return render(\n request,\n 'stats/printing/pages-printed.html',\n {\n 'title': 'Pages Printed',\n 'data': _pages_printed_data(),\n },\n )\n", "path": "ocfweb/stats/printing.py"}], "after_files": [{"content": "import time\nfrom collections import defaultdict\nfrom datetime import date\nfrom datetime import timedelta\nfrom functools import partial\n\nfrom django.http import HttpResponse\nfrom django.shortcuts import render\nfrom matplotlib.figure import Figure\nfrom ocflib.lab import stats\nfrom ocflib.printing.printers import PRINTERS\nfrom ocflib.printing.quota import get_connection\nfrom ocflib.printing.quota import SEMESTERLY_QUOTA\n\nfrom ocfweb.caching import periodic\nfrom ocfweb.component.graph import plot_to_image_bytes\n\n\nALL_PRINTERS = ('papercut', 'pagefault', 'logjam', 'logjam-old', 'deforestation')\nACTIVE_PRINTERS = ('papercut', 'pagefault', 'logjam')\n\n\ndef stats_printing(request):\n return render(\n request,\n 'stats/printing.html',\n {\n 'title': 'Printing Statistics',\n 'current_printers': PRINTERS,\n 'toner_changes': _toner_changes(),\n 'last_month': [\n date.today() - timedelta(days=i)\n for i in range(30)\n ],\n 'pages_per_day': _pages_per_day(),\n },\n )\n\n\ndef semester_histogram(request):\n return HttpResponse(\n plot_to_image_bytes(_semester_histogram(), format='svg'),\n content_type='image/svg+xml',\n )\n\n\n@periodic(300)\ndef _semester_histogram():\n with get_connection() as c:\n c.execute(\n 'SELECT `user`, `semester` FROM `printed` WHERE `semester` > 0',\n )\n users = [SEMESTERLY_QUOTA - int(r['semester']) for r in c]\n\n fig = Figure(figsize=(10, 5))\n ax = fig.add_subplot(1, 1, 1)\n ax.locator_params(nbins=20)\n ax.hist(users, bins=list(range(0, SEMESTERLY_QUOTA + 5, 5)))\n ax.grid(True)\n ax.set_xlim(SEMESTERLY_QUOTA, 0)\n ax.set_ylabel('Number of users')\n ax.set_xlabel('Remaining balance')\n ax.set_title('Remaining balances this semester')\n\n return fig\n\n\n@periodic(3600)\ndef _toner_changes():\n return [\n (\n printer,\n _toner_used_by_printer(printer),\n )\n for printer in ACTIVE_PRINTERS\n ]\n\n\ndef _toner_used_by_printer(printer, cutoff=.05, since=None):\n \"\"\"Returns toner used for a printer since a given date (by default it\n returns toner used for this semester).\n\n Toner numbers can be significantly noisy, including significant diffs\n whenever toner gets taken out and put back in whenever there is a jam.\n Because of this it's hard to determine if a new toner is inserted into a\n printer or if it was the same toner again. To reduce this noise we only\n count diffs that are smaller than a cutoff which empirically seems to be\n more accurate.\n \"\"\"\n if not since:\n since = stats.current_semester_start()\n\n with stats.get_connection() as cursor:\n cursor.execute(\n '''\n CREATE TEMPORARY TABLE ordered1\n (PRIMARY KEY (position))\n AS (\n SELECT * FROM (\n SELECT\n T.*,\n @rownum := @rownum + 1 AS position\n FROM (\n (\n SELECT * FROM printer_toner_public\n WHERE printer = %s AND\n date > %s\n ORDER BY date\n ) AS T,\n (SELECT @rownum := 0) AS r\n )\n ) AS x\n )\n ''', (printer, since.strftime('%Y-%m-%d')),\n )\n cursor.execute('''\n CREATE TEMPORARY TABLE ordered2\n (PRIMARY KEY (position))\n AS (SELECT * FROM ordered1)\n ''')\n cursor.execute('''\n CREATE TEMPORARY TABLE diffs\n AS (SELECT\n B.date AS date,\n A.value/A.max - B.value/B.max as pct_diff\n FROM\n ordered1 as A,\n ordered2 as B\n WHERE\n B.position = A.position + 1)\n ''')\n cursor.execute(\n '''\n SELECT SUM(pct_diff) as toner_used\n FROM\n diffs\n WHERE\n ABS(pct_diff)<%s\n ''', (cutoff,),\n )\n result = cursor.fetchone()['toner_used']\n return float(result or 0.0)\n\n\n@periodic(120)\ndef _pages_per_day():\n with stats.get_connection() as cursor:\n cursor.execute('''\n SELECT max(value) as value, cast(date as date) as date, printer\n FROM printer_pages_public\n GROUP BY cast(date as date), printer\n ORDER BY date ASC, printer ASC\n ''')\n\n # Resolves the issue of possible missing dates.\n # defaultdict(lambda: defaultdict(int)) doesn't work due to inability to pickle local objects like lambdas;\n # this effectively does the same thing as that.\n pages_printed = defaultdict(partial(defaultdict, int))\n last_seen = {}\n\n for row in cursor:\n if row['printer'] in last_seen:\n pages_printed.setdefault(row['date'], defaultdict(int))\n pages_printed[row['date']][row['printer']] = (\n row['value'] - last_seen[row['printer']]\n )\n last_seen[row['printer']] = row['value']\n\n return pages_printed\n\n\ndef _pages_printed_for_printer(printer, resolution=100):\n with stats.get_connection() as cursor:\n cursor.execute(\n '''\n SELECT Z.date, Z.value FROM (\n SELECT\n T.*,\n @rownum := @rownum + 1 AS position\n FROM (\n (\n SELECT * FROM printer_pages_public\n WHERE printer = %s\n ORDER BY date\n ) AS T,\n (SELECT @rownum := 0) AS r\n )\n ) as Z\n WHERE Z.position mod %s = 0\n ''', (printer, resolution),\n )\n return [\n (time.mktime(row['date'].timetuple()) * 1000, row['value'])\n for row in cursor\n ]\n\n\n@periodic(3600)\ndef _pages_printed_data():\n return [\n {\n 'name': printer,\n 'animation': False,\n 'data': _pages_printed_for_printer(printer),\n }\n for printer in ALL_PRINTERS\n ]\n\n\ndef pages_printed(request):\n return render(\n request,\n 'stats/printing/pages-printed.html',\n {\n 'title': 'Pages Printed',\n 'data': _pages_printed_data(),\n },\n )\n", "path": "ocfweb/stats/printing.py"}]} | 2,380 | 168 |
gh_patches_debug_4017 | rasdani/github-patches | git_diff | coala__coala-bears-970 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
GitCommitBear: Change ``returns`` to ``return``
[line](https://github.com/coala-analyzer/coala-bears/blob/master/bears/vcs/git/GitCommitBear.py#L147)
difficulty/newcomer
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bears/vcs/git/GitCommitBear.py`
Content:
```
1 import nltk
2 import re
3 import shutil
4 import os
5
6 from coalib.bears.GlobalBear import GlobalBear
7 from coalib.bears.requirements.PipRequirement import PipRequirement
8 from coalib.misc.ContextManagers import change_directory
9 from coalib.misc.Shell import run_shell_command
10 from coalib.results.Result import Result
11 from coalib.results.RESULT_SEVERITY import RESULT_SEVERITY
12 from coalib.settings.FunctionMetadata import FunctionMetadata
13 from coalib.settings.Setting import typed_list
14
15
16 class GitCommitBear(GlobalBear):
17 LANGUAGES = {"Git"}
18 REQUIREMENTS = {PipRequirement('nltk', '3.1.*')}
19 AUTHORS = {'The coala developers'}
20 AUTHORS_EMAILS = {'[email protected]'}
21 LICENSE = 'AGPL-3.0'
22 ASCIINEMA_URL = 'https://asciinema.org/a/e146c9739ojhr8396wedsvf0d'
23 CAN_DETECT = {'Formatting'}
24
25 @classmethod
26 def check_prerequisites(cls):
27 if shutil.which("git") is None:
28 return "git is not installed."
29 else:
30 return True
31
32 @classmethod
33 def get_shortlog_checks_metadata(cls):
34 return FunctionMetadata.from_function(
35 cls.check_shortlog,
36 omit={"self", "shortlog"})
37
38 @classmethod
39 def get_body_checks_metadata(cls):
40 return FunctionMetadata.from_function(
41 cls.check_body,
42 omit={"self", "body"})
43
44 @classmethod
45 def get_metadata(cls):
46 return FunctionMetadata.merge(
47 FunctionMetadata.from_function(
48 cls.run,
49 omit={"self", "dependency_results"}),
50 cls.get_shortlog_checks_metadata(),
51 cls.get_body_checks_metadata())
52
53 def run(self, allow_empty_commit_message: bool = False, **kwargs):
54 """
55 Check the current git commit message at HEAD.
56
57 This bear ensures automatically that the shortlog and body do not
58 exceed a given line-length and that a newline lies between them.
59
60 :param allow_empty_commit_message: Whether empty commit messages are
61 allowed or not.
62 """
63 with change_directory(self.get_config_dir() or os.getcwd()):
64 stdout, stderr = run_shell_command("git log -1 --pretty=%B")
65
66 if stderr:
67 self.err("git:", repr(stderr))
68 return
69
70 stdout = stdout.rstrip("\n").splitlines()
71
72 if len(stdout) == 0:
73 if not allow_empty_commit_message:
74 yield Result(self, "HEAD commit has no message.")
75 return
76
77 yield from self.check_shortlog(
78 stdout[0],
79 **self.get_shortlog_checks_metadata().filter_parameters(kwargs))
80 yield from self.check_body(
81 stdout[1:],
82 **self.get_body_checks_metadata().filter_parameters(kwargs))
83
84 def check_shortlog(self, shortlog,
85 shortlog_length: int=50,
86 shortlog_regex: str="",
87 shortlog_trailing_period: bool=None,
88 shortlog_imperative_check: bool=True,
89 shortlog_wip_check: bool=True):
90 """
91 Checks the given shortlog.
92
93 :param shortlog: The shortlog message string.
94 :param shortlog_length: The maximum length of the shortlog.
95 The newline character at end does not
96 count to the length.
97 :param regex: A regex to check the shortlog with.
98 :param shortlog_trailing_period: Whether a dot shall be enforced at end
99 end or not (or ``None`` for "don't
100 care").
101 :param shortlog_wip_check: Whether a "WIP" in the shortlog text
102 should yield a result or not.
103 """
104 diff = len(shortlog) - shortlog_length
105 if diff > 0:
106 yield Result(self,
107 "Shortlog of the HEAD commit contains {} "
108 "character(s). This is {} character(s) longer than "
109 "the limit ({} > {}).".format(
110 len(shortlog), diff,
111 len(shortlog), shortlog_length))
112
113 if (shortlog[-1] != ".") == shortlog_trailing_period:
114 yield Result(self,
115 "Shortlog of HEAD commit contains no period at end."
116 if shortlog_trailing_period else
117 "Shortlog of HEAD commit contains a period at end.")
118
119 if shortlog_regex:
120 match = re.fullmatch(shortlog_regex, shortlog)
121 if not match:
122 yield Result(
123 self,
124 "Shortlog of HEAD commit does not match given regex:"
125 " {regex}".format(regex=shortlog_regex))
126
127 if shortlog_imperative_check:
128 colon_pos = shortlog.find(':')
129 shortlog = (shortlog[colon_pos + 1:]
130 if colon_pos != -1
131 else shortlog)
132 has_flaws = self.check_imperative(shortlog)
133 if has_flaws:
134 bad_word = has_flaws[0]
135 yield Result(self,
136 "Shortlog of HEAD commit isn't in imperative "
137 "mood! Bad words are '{}'".format(bad_word))
138 if shortlog_wip_check:
139 if "wip" in shortlog.lower()[:4]:
140 yield Result(
141 self,
142 "This commit seems to be marked as work in progress and "
143 "should not be used in production. Treat carefully.")
144
145 def check_imperative(self, paragraph):
146 """
147 Check the given sentence/s for Imperatives.
148
149 :param paragraph:
150 The input paragraph to be tested.
151 :returns:
152 A list of tuples having 2 elements (invalid word, parts of speech)
153 or an empty list if no invalid words are found.
154 """
155 try:
156 words = nltk.word_tokenize(nltk.sent_tokenize(paragraph)[0])
157 # VBZ : Verb, 3rd person singular present, like 'adds', 'writes'
158 # etc
159 # VBD : Verb, Past tense , like 'added', 'wrote' etc
160 # VBG : Verb, Present participle, like 'adding', 'writing'
161 word, tag = nltk.pos_tag(['I'] + words)[1:2][0]
162 if(tag.startswith('VBZ') or
163 tag.startswith('VBD') or
164 tag.startswith('VBG') or
165 word.endswith('ing')): # Handle special case for VBG
166 return (word, tag)
167 else:
168 return None
169 except LookupError as error: # pragma: no cover
170 self.err("NLTK data missing, install by running following "
171 "commands `python3 -m nltk.downloader punkt"
172 " maxent_treebank_pos_tagger averaged_perceptron_tagger`")
173 return
174
175 def check_body(self, body,
176 body_line_length: int=72,
177 force_body: bool=False,
178 ignore_length_regex: typed_list(str)=()):
179 """
180 Checks the given commit body.
181
182 :param body: The commit body splitted by lines.
183 :param body_line_length: The maximum line-length of the body. The
184 newline character at each line end does not
185 count to the length.
186 :param force_body: Whether a body shall exist or not.
187 :param ignore_length_regex: Lines matching each of the regular
188 expressions in this list will be ignored.
189 """
190 if len(body) == 0:
191 if force_body:
192 yield Result(self, "No commit message body at HEAD.")
193 return
194
195 if body[0] != "":
196 yield Result(self, "No newline found between shortlog and body at "
197 "HEAD commit. Please add one.")
198 return
199
200 ignore_regexes = [re.compile(regex) for regex in ignore_length_regex]
201 if any((len(line) > body_line_length and
202 not any(regex.search(line) for regex in ignore_regexes))
203 for line in body[1:]):
204 yield Result(self, "Body of HEAD commit contains too long lines. "
205 "Commit body lines should not exceed {} "
206 "characters.".format(body_line_length))
207
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bears/vcs/git/GitCommitBear.py b/bears/vcs/git/GitCommitBear.py
--- a/bears/vcs/git/GitCommitBear.py
+++ b/bears/vcs/git/GitCommitBear.py
@@ -148,7 +148,7 @@
:param paragraph:
The input paragraph to be tested.
- :returns:
+ :return:
A list of tuples having 2 elements (invalid word, parts of speech)
or an empty list if no invalid words are found.
"""
| {"golden_diff": "diff --git a/bears/vcs/git/GitCommitBear.py b/bears/vcs/git/GitCommitBear.py\n--- a/bears/vcs/git/GitCommitBear.py\n+++ b/bears/vcs/git/GitCommitBear.py\n@@ -148,7 +148,7 @@\n \n :param paragraph:\n The input paragraph to be tested.\n- :returns:\n+ :return:\n A list of tuples having 2 elements (invalid word, parts of speech)\n or an empty list if no invalid words are found.\n \"\"\"\n", "issue": "GitCommitBear: Change ``returns`` to ``return``\n[line](https://github.com/coala-analyzer/coala-bears/blob/master/bears/vcs/git/GitCommitBear.py#L147)\ndifficulty/newcomer\n\n", "before_files": [{"content": "import nltk\nimport re\nimport shutil\nimport os\n\nfrom coalib.bears.GlobalBear import GlobalBear\nfrom coalib.bears.requirements.PipRequirement import PipRequirement\nfrom coalib.misc.ContextManagers import change_directory\nfrom coalib.misc.Shell import run_shell_command\nfrom coalib.results.Result import Result\nfrom coalib.results.RESULT_SEVERITY import RESULT_SEVERITY\nfrom coalib.settings.FunctionMetadata import FunctionMetadata\nfrom coalib.settings.Setting import typed_list\n\n\nclass GitCommitBear(GlobalBear):\n LANGUAGES = {\"Git\"}\n REQUIREMENTS = {PipRequirement('nltk', '3.1.*')}\n AUTHORS = {'The coala developers'}\n AUTHORS_EMAILS = {'[email protected]'}\n LICENSE = 'AGPL-3.0'\n ASCIINEMA_URL = 'https://asciinema.org/a/e146c9739ojhr8396wedsvf0d'\n CAN_DETECT = {'Formatting'}\n\n @classmethod\n def check_prerequisites(cls):\n if shutil.which(\"git\") is None:\n return \"git is not installed.\"\n else:\n return True\n\n @classmethod\n def get_shortlog_checks_metadata(cls):\n return FunctionMetadata.from_function(\n cls.check_shortlog,\n omit={\"self\", \"shortlog\"})\n\n @classmethod\n def get_body_checks_metadata(cls):\n return FunctionMetadata.from_function(\n cls.check_body,\n omit={\"self\", \"body\"})\n\n @classmethod\n def get_metadata(cls):\n return FunctionMetadata.merge(\n FunctionMetadata.from_function(\n cls.run,\n omit={\"self\", \"dependency_results\"}),\n cls.get_shortlog_checks_metadata(),\n cls.get_body_checks_metadata())\n\n def run(self, allow_empty_commit_message: bool = False, **kwargs):\n \"\"\"\n Check the current git commit message at HEAD.\n\n This bear ensures automatically that the shortlog and body do not\n exceed a given line-length and that a newline lies between them.\n\n :param allow_empty_commit_message: Whether empty commit messages are\n allowed or not.\n \"\"\"\n with change_directory(self.get_config_dir() or os.getcwd()):\n stdout, stderr = run_shell_command(\"git log -1 --pretty=%B\")\n\n if stderr:\n self.err(\"git:\", repr(stderr))\n return\n\n stdout = stdout.rstrip(\"\\n\").splitlines()\n\n if len(stdout) == 0:\n if not allow_empty_commit_message:\n yield Result(self, \"HEAD commit has no message.\")\n return\n\n yield from self.check_shortlog(\n stdout[0],\n **self.get_shortlog_checks_metadata().filter_parameters(kwargs))\n yield from self.check_body(\n stdout[1:],\n **self.get_body_checks_metadata().filter_parameters(kwargs))\n\n def check_shortlog(self, shortlog,\n shortlog_length: int=50,\n shortlog_regex: str=\"\",\n shortlog_trailing_period: bool=None,\n shortlog_imperative_check: bool=True,\n shortlog_wip_check: bool=True):\n \"\"\"\n Checks the given shortlog.\n\n :param shortlog: The shortlog message string.\n :param shortlog_length: The maximum length of the shortlog.\n The newline character at end does not\n count to the length.\n :param regex: A regex to check the shortlog with.\n :param shortlog_trailing_period: Whether a dot shall be enforced at end\n end or not (or ``None`` for \"don't\n care\").\n :param shortlog_wip_check: Whether a \"WIP\" in the shortlog text\n should yield a result or not.\n \"\"\"\n diff = len(shortlog) - shortlog_length\n if diff > 0:\n yield Result(self,\n \"Shortlog of the HEAD commit contains {} \"\n \"character(s). This is {} character(s) longer than \"\n \"the limit ({} > {}).\".format(\n len(shortlog), diff,\n len(shortlog), shortlog_length))\n\n if (shortlog[-1] != \".\") == shortlog_trailing_period:\n yield Result(self,\n \"Shortlog of HEAD commit contains no period at end.\"\n if shortlog_trailing_period else\n \"Shortlog of HEAD commit contains a period at end.\")\n\n if shortlog_regex:\n match = re.fullmatch(shortlog_regex, shortlog)\n if not match:\n yield Result(\n self,\n \"Shortlog of HEAD commit does not match given regex:\"\n \" {regex}\".format(regex=shortlog_regex))\n\n if shortlog_imperative_check:\n colon_pos = shortlog.find(':')\n shortlog = (shortlog[colon_pos + 1:]\n if colon_pos != -1\n else shortlog)\n has_flaws = self.check_imperative(shortlog)\n if has_flaws:\n bad_word = has_flaws[0]\n yield Result(self,\n \"Shortlog of HEAD commit isn't in imperative \"\n \"mood! Bad words are '{}'\".format(bad_word))\n if shortlog_wip_check:\n if \"wip\" in shortlog.lower()[:4]:\n yield Result(\n self,\n \"This commit seems to be marked as work in progress and \"\n \"should not be used in production. Treat carefully.\")\n\n def check_imperative(self, paragraph):\n \"\"\"\n Check the given sentence/s for Imperatives.\n\n :param paragraph:\n The input paragraph to be tested.\n :returns:\n A list of tuples having 2 elements (invalid word, parts of speech)\n or an empty list if no invalid words are found.\n \"\"\"\n try:\n words = nltk.word_tokenize(nltk.sent_tokenize(paragraph)[0])\n # VBZ : Verb, 3rd person singular present, like 'adds', 'writes'\n # etc\n # VBD : Verb, Past tense , like 'added', 'wrote' etc\n # VBG : Verb, Present participle, like 'adding', 'writing'\n word, tag = nltk.pos_tag(['I'] + words)[1:2][0]\n if(tag.startswith('VBZ') or\n tag.startswith('VBD') or\n tag.startswith('VBG') or\n word.endswith('ing')): # Handle special case for VBG\n return (word, tag)\n else:\n return None\n except LookupError as error: # pragma: no cover\n self.err(\"NLTK data missing, install by running following \"\n \"commands `python3 -m nltk.downloader punkt\"\n \" maxent_treebank_pos_tagger averaged_perceptron_tagger`\")\n return\n\n def check_body(self, body,\n body_line_length: int=72,\n force_body: bool=False,\n ignore_length_regex: typed_list(str)=()):\n \"\"\"\n Checks the given commit body.\n\n :param body: The commit body splitted by lines.\n :param body_line_length: The maximum line-length of the body. The\n newline character at each line end does not\n count to the length.\n :param force_body: Whether a body shall exist or not.\n :param ignore_length_regex: Lines matching each of the regular\n expressions in this list will be ignored.\n \"\"\"\n if len(body) == 0:\n if force_body:\n yield Result(self, \"No commit message body at HEAD.\")\n return\n\n if body[0] != \"\":\n yield Result(self, \"No newline found between shortlog and body at \"\n \"HEAD commit. Please add one.\")\n return\n\n ignore_regexes = [re.compile(regex) for regex in ignore_length_regex]\n if any((len(line) > body_line_length and\n not any(regex.search(line) for regex in ignore_regexes))\n for line in body[1:]):\n yield Result(self, \"Body of HEAD commit contains too long lines. \"\n \"Commit body lines should not exceed {} \"\n \"characters.\".format(body_line_length))\n", "path": "bears/vcs/git/GitCommitBear.py"}], "after_files": [{"content": "import nltk\nimport re\nimport shutil\nimport os\n\nfrom coalib.bears.GlobalBear import GlobalBear\nfrom coalib.bears.requirements.PipRequirement import PipRequirement\nfrom coalib.misc.ContextManagers import change_directory\nfrom coalib.misc.Shell import run_shell_command\nfrom coalib.results.Result import Result\nfrom coalib.results.RESULT_SEVERITY import RESULT_SEVERITY\nfrom coalib.settings.FunctionMetadata import FunctionMetadata\nfrom coalib.settings.Setting import typed_list\n\n\nclass GitCommitBear(GlobalBear):\n LANGUAGES = {\"Git\"}\n REQUIREMENTS = {PipRequirement('nltk', '3.1.*')}\n AUTHORS = {'The coala developers'}\n AUTHORS_EMAILS = {'[email protected]'}\n LICENSE = 'AGPL-3.0'\n ASCIINEMA_URL = 'https://asciinema.org/a/e146c9739ojhr8396wedsvf0d'\n CAN_DETECT = {'Formatting'}\n\n @classmethod\n def check_prerequisites(cls):\n if shutil.which(\"git\") is None:\n return \"git is not installed.\"\n else:\n return True\n\n @classmethod\n def get_shortlog_checks_metadata(cls):\n return FunctionMetadata.from_function(\n cls.check_shortlog,\n omit={\"self\", \"shortlog\"})\n\n @classmethod\n def get_body_checks_metadata(cls):\n return FunctionMetadata.from_function(\n cls.check_body,\n omit={\"self\", \"body\"})\n\n @classmethod\n def get_metadata(cls):\n return FunctionMetadata.merge(\n FunctionMetadata.from_function(\n cls.run,\n omit={\"self\", \"dependency_results\"}),\n cls.get_shortlog_checks_metadata(),\n cls.get_body_checks_metadata())\n\n def run(self, allow_empty_commit_message: bool = False, **kwargs):\n \"\"\"\n Check the current git commit message at HEAD.\n\n This bear ensures automatically that the shortlog and body do not\n exceed a given line-length and that a newline lies between them.\n\n :param allow_empty_commit_message: Whether empty commit messages are\n allowed or not.\n \"\"\"\n with change_directory(self.get_config_dir() or os.getcwd()):\n stdout, stderr = run_shell_command(\"git log -1 --pretty=%B\")\n\n if stderr:\n self.err(\"git:\", repr(stderr))\n return\n\n stdout = stdout.rstrip(\"\\n\").splitlines()\n\n if len(stdout) == 0:\n if not allow_empty_commit_message:\n yield Result(self, \"HEAD commit has no message.\")\n return\n\n yield from self.check_shortlog(\n stdout[0],\n **self.get_shortlog_checks_metadata().filter_parameters(kwargs))\n yield from self.check_body(\n stdout[1:],\n **self.get_body_checks_metadata().filter_parameters(kwargs))\n\n def check_shortlog(self, shortlog,\n shortlog_length: int=50,\n shortlog_regex: str=\"\",\n shortlog_trailing_period: bool=None,\n shortlog_imperative_check: bool=True,\n shortlog_wip_check: bool=True):\n \"\"\"\n Checks the given shortlog.\n\n :param shortlog: The shortlog message string.\n :param shortlog_length: The maximum length of the shortlog.\n The newline character at end does not\n count to the length.\n :param regex: A regex to check the shortlog with.\n :param shortlog_trailing_period: Whether a dot shall be enforced at end\n end or not (or ``None`` for \"don't\n care\").\n :param shortlog_wip_check: Whether a \"WIP\" in the shortlog text\n should yield a result or not.\n \"\"\"\n diff = len(shortlog) - shortlog_length\n if diff > 0:\n yield Result(self,\n \"Shortlog of the HEAD commit contains {} \"\n \"character(s). This is {} character(s) longer than \"\n \"the limit ({} > {}).\".format(\n len(shortlog), diff,\n len(shortlog), shortlog_length))\n\n if (shortlog[-1] != \".\") == shortlog_trailing_period:\n yield Result(self,\n \"Shortlog of HEAD commit contains no period at end.\"\n if shortlog_trailing_period else\n \"Shortlog of HEAD commit contains a period at end.\")\n\n if shortlog_regex:\n match = re.fullmatch(shortlog_regex, shortlog)\n if not match:\n yield Result(\n self,\n \"Shortlog of HEAD commit does not match given regex:\"\n \" {regex}\".format(regex=shortlog_regex))\n\n if shortlog_imperative_check:\n colon_pos = shortlog.find(':')\n shortlog = (shortlog[colon_pos + 1:]\n if colon_pos != -1\n else shortlog)\n has_flaws = self.check_imperative(shortlog)\n if has_flaws:\n bad_word = has_flaws[0]\n yield Result(self,\n \"Shortlog of HEAD commit isn't in imperative \"\n \"mood! Bad words are '{}'\".format(bad_word))\n if shortlog_wip_check:\n if \"wip\" in shortlog.lower()[:4]:\n yield Result(\n self,\n \"This commit seems to be marked as work in progress and \"\n \"should not be used in production. Treat carefully.\")\n\n def check_imperative(self, paragraph):\n \"\"\"\n Check the given sentence/s for Imperatives.\n\n :param paragraph:\n The input paragraph to be tested.\n :return:\n A list of tuples having 2 elements (invalid word, parts of speech)\n or an empty list if no invalid words are found.\n \"\"\"\n try:\n words = nltk.word_tokenize(nltk.sent_tokenize(paragraph)[0])\n # VBZ : Verb, 3rd person singular present, like 'adds', 'writes'\n # etc\n # VBD : Verb, Past tense , like 'added', 'wrote' etc\n # VBG : Verb, Present participle, like 'adding', 'writing'\n word, tag = nltk.pos_tag(['I'] + words)[1:2][0]\n if(tag.startswith('VBZ') or\n tag.startswith('VBD') or\n tag.startswith('VBG') or\n word.endswith('ing')): # Handle special case for VBG\n return (word, tag)\n else:\n return None\n except LookupError as error: # pragma: no cover\n self.err(\"NLTK data missing, install by running following \"\n \"commands `python3 -m nltk.downloader punkt\"\n \" maxent_treebank_pos_tagger averaged_perceptron_tagger`\")\n return\n\n def check_body(self, body,\n body_line_length: int=72,\n force_body: bool=False,\n ignore_length_regex: typed_list(str)=()):\n \"\"\"\n Checks the given commit body.\n\n :param body: The commit body splitted by lines.\n :param body_line_length: The maximum line-length of the body. The\n newline character at each line end does not\n count to the length.\n :param force_body: Whether a body shall exist or not.\n :param ignore_length_regex: Lines matching each of the regular\n expressions in this list will be ignored.\n \"\"\"\n if len(body) == 0:\n if force_body:\n yield Result(self, \"No commit message body at HEAD.\")\n return\n\n if body[0] != \"\":\n yield Result(self, \"No newline found between shortlog and body at \"\n \"HEAD commit. Please add one.\")\n return\n\n ignore_regexes = [re.compile(regex) for regex in ignore_length_regex]\n if any((len(line) > body_line_length and\n not any(regex.search(line) for regex in ignore_regexes))\n for line in body[1:]):\n yield Result(self, \"Body of HEAD commit contains too long lines. \"\n \"Commit body lines should not exceed {} \"\n \"characters.\".format(body_line_length))\n", "path": "bears/vcs/git/GitCommitBear.py"}]} | 2,557 | 121 |
gh_patches_debug_4709 | rasdani/github-patches | git_diff | pyca__cryptography-1549 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Don't install enum34 on Python3.4+
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # This file is dual licensed under the terms of the Apache License, Version
2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3 # for complete details.
4
5 from __future__ import absolute_import, division, print_function
6
7 import os
8 import platform
9 import subprocess
10 import sys
11 from distutils.command.build import build
12
13 import pkg_resources
14
15 from setuptools import find_packages, setup
16 from setuptools.command.install import install
17 from setuptools.command.test import test
18
19
20 base_dir = os.path.dirname(__file__)
21 src_dir = os.path.join(base_dir, "src")
22
23 # When executing the setup.py, we need to be able to import ourselves, this
24 # means that we need to add the src/ directory to the sys.path.
25 sys.path.insert(0, src_dir)
26
27 about = {}
28 with open(os.path.join(src_dir, "cryptography", "__about__.py")) as f:
29 exec(f.read(), about)
30
31
32 SETUPTOOLS_DEPENDENCY = "setuptools"
33 CFFI_DEPENDENCY = "cffi>=0.8"
34 SIX_DEPENDENCY = "six>=1.4.1"
35 VECTORS_DEPENDENCY = "cryptography_vectors=={0}".format(about['__version__'])
36
37 requirements = [
38 CFFI_DEPENDENCY,
39 "enum34",
40 "pyasn1",
41 SIX_DEPENDENCY,
42 SETUPTOOLS_DEPENDENCY
43 ]
44
45 # If you add a new dep here you probably need to add it in the tox.ini as well
46 test_requirements = [
47 "pytest",
48 "pretend",
49 "iso8601",
50 ]
51
52 # If there's no vectors locally that probably means we are in a tarball and
53 # need to go and get the matching vectors package from PyPi
54 if not os.path.exists(os.path.join(base_dir, "vectors/setup.py")):
55 test_requirements.append(VECTORS_DEPENDENCY)
56
57
58 def cc_is_available():
59 return sys.platform == "darwin" and list(map(
60 int, platform.mac_ver()[0].split("."))) >= [10, 8, 0]
61
62
63 backends = [
64 "openssl = cryptography.hazmat.backends.openssl:backend"
65 ]
66
67 if cc_is_available():
68 backends.append(
69 "commoncrypto = cryptography.hazmat.backends.commoncrypto:backend",
70 )
71
72
73 def get_ext_modules():
74 from cryptography.hazmat.bindings.commoncrypto.binding import (
75 Binding as CommonCryptoBinding
76 )
77 from cryptography.hazmat.bindings.openssl.binding import (
78 Binding as OpenSSLBinding
79 )
80 from cryptography.hazmat.primitives import constant_time, padding
81
82 ext_modules = [
83 OpenSSLBinding.ffi.verifier.get_extension(),
84 constant_time._ffi.verifier.get_extension(),
85 padding._ffi.verifier.get_extension()
86 ]
87 if cc_is_available():
88 ext_modules.append(CommonCryptoBinding.ffi.verifier.get_extension())
89 return ext_modules
90
91
92 class CFFIBuild(build):
93 """
94 This class exists, instead of just providing ``ext_modules=[...]`` directly
95 in ``setup()`` because importing cryptography requires we have several
96 packages installed first.
97
98 By doing the imports here we ensure that packages listed in
99 ``setup_requires`` are already installed.
100 """
101
102 def finalize_options(self):
103 self.distribution.ext_modules = get_ext_modules()
104 build.finalize_options(self)
105
106
107 class CFFIInstall(install):
108 """
109 As a consequence of CFFIBuild and it's late addition of ext_modules, we
110 need the equivalent for the ``install`` command to install into platlib
111 install-dir rather than purelib.
112 """
113
114 def finalize_options(self):
115 self.distribution.ext_modules = get_ext_modules()
116 install.finalize_options(self)
117
118
119 class PyTest(test):
120 def finalize_options(self):
121 test.finalize_options(self)
122 self.test_args = []
123 self.test_suite = True
124
125 # This means there's a vectors/ folder with the package in here.
126 # cd into it, install the vectors package and then refresh sys.path
127 if VECTORS_DEPENDENCY not in test_requirements:
128 subprocess.check_call(
129 [sys.executable, "setup.py", "install"], cwd="vectors"
130 )
131 pkg_resources.get_distribution("cryptography_vectors").activate()
132
133 def run_tests(self):
134 # Import here because in module scope the eggs are not loaded.
135 import pytest
136 errno = pytest.main(self.test_args)
137 sys.exit(errno)
138
139
140 def keywords_with_side_effects(argv):
141 """
142 Get a dictionary with setup keywords that (can) have side effects.
143
144 :param argv: A list of strings with command line arguments.
145 :returns: A dictionary with keyword arguments for the ``setup()`` function.
146
147 This setup.py script uses the setuptools 'setup_requires' feature because
148 this is required by the cffi package to compile extension modules. The
149 purpose of ``keywords_with_side_effects()`` is to avoid triggering the cffi
150 build process as a result of setup.py invocations that don't need the cffi
151 module to be built (setup.py serves the dual purpose of exposing package
152 metadata).
153
154 All of the options listed by ``python setup.py --help`` that print
155 information should be recognized here. The commands ``clean``,
156 ``egg_info``, ``register``, ``sdist`` and ``upload`` are also recognized.
157 Any combination of these options and commands is also supported.
158
159 This function was originally based on the `setup.py script`_ of SciPy (see
160 also the discussion in `pip issue #25`_).
161
162 .. _pip issue #25: https://github.com/pypa/pip/issues/25
163 .. _setup.py script: https://github.com/scipy/scipy/blob/master/setup.py
164 """
165 no_setup_requires_arguments = (
166 '-h', '--help',
167 '-n', '--dry-run',
168 '-q', '--quiet',
169 '-v', '--verbose',
170 '-V', '--version',
171 '--author',
172 '--author-email',
173 '--classifiers',
174 '--contact',
175 '--contact-email',
176 '--description',
177 '--egg-base',
178 '--fullname',
179 '--help-commands',
180 '--keywords',
181 '--licence',
182 '--license',
183 '--long-description',
184 '--maintainer',
185 '--maintainer-email',
186 '--name',
187 '--no-user-cfg',
188 '--obsoletes',
189 '--platforms',
190 '--provides',
191 '--requires',
192 '--url',
193 'clean',
194 'egg_info',
195 'register',
196 'sdist',
197 'upload',
198 )
199
200 def is_short_option(argument):
201 """Check whether a command line argument is a short option."""
202 return len(argument) >= 2 and argument[0] == '-' and argument[1] != '-'
203
204 def expand_short_options(argument):
205 """Expand combined short options into canonical short options."""
206 return ('-' + char for char in argument[1:])
207
208 def argument_without_setup_requirements(argv, i):
209 """Check whether a command line argument needs setup requirements."""
210 if argv[i] in no_setup_requires_arguments:
211 # Simple case: An argument which is either an option or a command
212 # which doesn't need setup requirements.
213 return True
214 elif (is_short_option(argv[i]) and
215 all(option in no_setup_requires_arguments
216 for option in expand_short_options(argv[i]))):
217 # Not so simple case: Combined short options none of which need
218 # setup requirements.
219 return True
220 elif argv[i - 1:i] == ['--egg-base']:
221 # Tricky case: --egg-info takes an argument which should not make
222 # us use setup_requires (defeating the purpose of this code).
223 return True
224 else:
225 return False
226
227 if all(argument_without_setup_requirements(argv, i)
228 for i in range(1, len(argv))):
229 return {
230 "cmdclass": {
231 "build": DummyCFFIBuild,
232 "install": DummyCFFIInstall,
233 "test": DummyPyTest,
234 }
235 }
236 else:
237 return {
238 "setup_requires": requirements,
239 "cmdclass": {
240 "build": CFFIBuild,
241 "install": CFFIInstall,
242 "test": PyTest,
243 }
244 }
245
246
247 setup_requires_error = ("Requested setup command that needs 'setup_requires' "
248 "while command line arguments implied a side effect "
249 "free command or option.")
250
251
252 class DummyCFFIBuild(build):
253 """
254 This class makes it very obvious when ``keywords_with_side_effects()`` has
255 incorrectly interpreted the command line arguments to ``setup.py build`` as
256 one of the 'side effect free' commands or options.
257 """
258
259 def run(self):
260 raise RuntimeError(setup_requires_error)
261
262
263 class DummyCFFIInstall(install):
264 """
265 This class makes it very obvious when ``keywords_with_side_effects()`` has
266 incorrectly interpreted the command line arguments to ``setup.py install``
267 as one of the 'side effect free' commands or options.
268 """
269
270 def run(self):
271 raise RuntimeError(setup_requires_error)
272
273
274 class DummyPyTest(test):
275 """
276 This class makes it very obvious when ``keywords_with_side_effects()`` has
277 incorrectly interpreted the command line arguments to ``setup.py test`` as
278 one of the 'side effect free' commands or options.
279 """
280
281 def run_tests(self):
282 raise RuntimeError(setup_requires_error)
283
284
285 with open(os.path.join(base_dir, "README.rst")) as f:
286 long_description = f.read()
287
288
289 setup(
290 name=about["__title__"],
291 version=about["__version__"],
292
293 description=about["__summary__"],
294 long_description=long_description,
295 license=about["__license__"],
296 url=about["__uri__"],
297
298 author=about["__author__"],
299 author_email=about["__email__"],
300
301 classifiers=[
302 "Intended Audience :: Developers",
303 "License :: OSI Approved :: Apache Software License",
304 "License :: OSI Approved :: BSD License",
305 "Natural Language :: English",
306 "Operating System :: MacOS :: MacOS X",
307 "Operating System :: POSIX",
308 "Operating System :: POSIX :: BSD",
309 "Operating System :: POSIX :: Linux",
310 "Operating System :: Microsoft :: Windows",
311 "Programming Language :: Python",
312 "Programming Language :: Python :: 2",
313 "Programming Language :: Python :: 2.6",
314 "Programming Language :: Python :: 2.7",
315 "Programming Language :: Python :: 3",
316 "Programming Language :: Python :: 3.2",
317 "Programming Language :: Python :: 3.3",
318 "Programming Language :: Python :: 3.4",
319 "Programming Language :: Python :: Implementation :: CPython",
320 "Programming Language :: Python :: Implementation :: PyPy",
321 "Topic :: Security :: Cryptography",
322 ],
323
324 package_dir={"": "src"},
325 packages=find_packages(where="src", exclude=["tests", "tests.*"]),
326 include_package_data=True,
327
328 install_requires=requirements,
329 tests_require=test_requirements,
330
331 # for cffi
332 zip_safe=False,
333 ext_package="cryptography",
334 entry_points={
335 "cryptography.backends": backends,
336 },
337 **keywords_with_side_effects(sys.argv)
338 )
339
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -36,12 +36,14 @@
requirements = [
CFFI_DEPENDENCY,
- "enum34",
"pyasn1",
SIX_DEPENDENCY,
SETUPTOOLS_DEPENDENCY
]
+if sys.version_info < (3, 4):
+ requirements.append("enum34")
+
# If you add a new dep here you probably need to add it in the tox.ini as well
test_requirements = [
"pytest",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -36,12 +36,14 @@\n \n requirements = [\n CFFI_DEPENDENCY,\n- \"enum34\",\n \"pyasn1\",\n SIX_DEPENDENCY,\n SETUPTOOLS_DEPENDENCY\n ]\n \n+if sys.version_info < (3, 4):\n+ requirements.append(\"enum34\")\n+\n # If you add a new dep here you probably need to add it in the tox.ini as well\n test_requirements = [\n \"pytest\",\n", "issue": "Don't install enum34 on Python3.4+\n\n", "before_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport os\nimport platform\nimport subprocess\nimport sys\nfrom distutils.command.build import build\n\nimport pkg_resources\n\nfrom setuptools import find_packages, setup\nfrom setuptools.command.install import install\nfrom setuptools.command.test import test\n\n\nbase_dir = os.path.dirname(__file__)\nsrc_dir = os.path.join(base_dir, \"src\")\n\n# When executing the setup.py, we need to be able to import ourselves, this\n# means that we need to add the src/ directory to the sys.path.\nsys.path.insert(0, src_dir)\n\nabout = {}\nwith open(os.path.join(src_dir, \"cryptography\", \"__about__.py\")) as f:\n exec(f.read(), about)\n\n\nSETUPTOOLS_DEPENDENCY = \"setuptools\"\nCFFI_DEPENDENCY = \"cffi>=0.8\"\nSIX_DEPENDENCY = \"six>=1.4.1\"\nVECTORS_DEPENDENCY = \"cryptography_vectors=={0}\".format(about['__version__'])\n\nrequirements = [\n CFFI_DEPENDENCY,\n \"enum34\",\n \"pyasn1\",\n SIX_DEPENDENCY,\n SETUPTOOLS_DEPENDENCY\n]\n\n# If you add a new dep here you probably need to add it in the tox.ini as well\ntest_requirements = [\n \"pytest\",\n \"pretend\",\n \"iso8601\",\n]\n\n# If there's no vectors locally that probably means we are in a tarball and\n# need to go and get the matching vectors package from PyPi\nif not os.path.exists(os.path.join(base_dir, \"vectors/setup.py\")):\n test_requirements.append(VECTORS_DEPENDENCY)\n\n\ndef cc_is_available():\n return sys.platform == \"darwin\" and list(map(\n int, platform.mac_ver()[0].split(\".\"))) >= [10, 8, 0]\n\n\nbackends = [\n \"openssl = cryptography.hazmat.backends.openssl:backend\"\n]\n\nif cc_is_available():\n backends.append(\n \"commoncrypto = cryptography.hazmat.backends.commoncrypto:backend\",\n )\n\n\ndef get_ext_modules():\n from cryptography.hazmat.bindings.commoncrypto.binding import (\n Binding as CommonCryptoBinding\n )\n from cryptography.hazmat.bindings.openssl.binding import (\n Binding as OpenSSLBinding\n )\n from cryptography.hazmat.primitives import constant_time, padding\n\n ext_modules = [\n OpenSSLBinding.ffi.verifier.get_extension(),\n constant_time._ffi.verifier.get_extension(),\n padding._ffi.verifier.get_extension()\n ]\n if cc_is_available():\n ext_modules.append(CommonCryptoBinding.ffi.verifier.get_extension())\n return ext_modules\n\n\nclass CFFIBuild(build):\n \"\"\"\n This class exists, instead of just providing ``ext_modules=[...]`` directly\n in ``setup()`` because importing cryptography requires we have several\n packages installed first.\n\n By doing the imports here we ensure that packages listed in\n ``setup_requires`` are already installed.\n \"\"\"\n\n def finalize_options(self):\n self.distribution.ext_modules = get_ext_modules()\n build.finalize_options(self)\n\n\nclass CFFIInstall(install):\n \"\"\"\n As a consequence of CFFIBuild and it's late addition of ext_modules, we\n need the equivalent for the ``install`` command to install into platlib\n install-dir rather than purelib.\n \"\"\"\n\n def finalize_options(self):\n self.distribution.ext_modules = get_ext_modules()\n install.finalize_options(self)\n\n\nclass PyTest(test):\n def finalize_options(self):\n test.finalize_options(self)\n self.test_args = []\n self.test_suite = True\n\n # This means there's a vectors/ folder with the package in here.\n # cd into it, install the vectors package and then refresh sys.path\n if VECTORS_DEPENDENCY not in test_requirements:\n subprocess.check_call(\n [sys.executable, \"setup.py\", \"install\"], cwd=\"vectors\"\n )\n pkg_resources.get_distribution(\"cryptography_vectors\").activate()\n\n def run_tests(self):\n # Import here because in module scope the eggs are not loaded.\n import pytest\n errno = pytest.main(self.test_args)\n sys.exit(errno)\n\n\ndef keywords_with_side_effects(argv):\n \"\"\"\n Get a dictionary with setup keywords that (can) have side effects.\n\n :param argv: A list of strings with command line arguments.\n :returns: A dictionary with keyword arguments for the ``setup()`` function.\n\n This setup.py script uses the setuptools 'setup_requires' feature because\n this is required by the cffi package to compile extension modules. The\n purpose of ``keywords_with_side_effects()`` is to avoid triggering the cffi\n build process as a result of setup.py invocations that don't need the cffi\n module to be built (setup.py serves the dual purpose of exposing package\n metadata).\n\n All of the options listed by ``python setup.py --help`` that print\n information should be recognized here. The commands ``clean``,\n ``egg_info``, ``register``, ``sdist`` and ``upload`` are also recognized.\n Any combination of these options and commands is also supported.\n\n This function was originally based on the `setup.py script`_ of SciPy (see\n also the discussion in `pip issue #25`_).\n\n .. _pip issue #25: https://github.com/pypa/pip/issues/25\n .. _setup.py script: https://github.com/scipy/scipy/blob/master/setup.py\n \"\"\"\n no_setup_requires_arguments = (\n '-h', '--help',\n '-n', '--dry-run',\n '-q', '--quiet',\n '-v', '--verbose',\n '-V', '--version',\n '--author',\n '--author-email',\n '--classifiers',\n '--contact',\n '--contact-email',\n '--description',\n '--egg-base',\n '--fullname',\n '--help-commands',\n '--keywords',\n '--licence',\n '--license',\n '--long-description',\n '--maintainer',\n '--maintainer-email',\n '--name',\n '--no-user-cfg',\n '--obsoletes',\n '--platforms',\n '--provides',\n '--requires',\n '--url',\n 'clean',\n 'egg_info',\n 'register',\n 'sdist',\n 'upload',\n )\n\n def is_short_option(argument):\n \"\"\"Check whether a command line argument is a short option.\"\"\"\n return len(argument) >= 2 and argument[0] == '-' and argument[1] != '-'\n\n def expand_short_options(argument):\n \"\"\"Expand combined short options into canonical short options.\"\"\"\n return ('-' + char for char in argument[1:])\n\n def argument_without_setup_requirements(argv, i):\n \"\"\"Check whether a command line argument needs setup requirements.\"\"\"\n if argv[i] in no_setup_requires_arguments:\n # Simple case: An argument which is either an option or a command\n # which doesn't need setup requirements.\n return True\n elif (is_short_option(argv[i]) and\n all(option in no_setup_requires_arguments\n for option in expand_short_options(argv[i]))):\n # Not so simple case: Combined short options none of which need\n # setup requirements.\n return True\n elif argv[i - 1:i] == ['--egg-base']:\n # Tricky case: --egg-info takes an argument which should not make\n # us use setup_requires (defeating the purpose of this code).\n return True\n else:\n return False\n\n if all(argument_without_setup_requirements(argv, i)\n for i in range(1, len(argv))):\n return {\n \"cmdclass\": {\n \"build\": DummyCFFIBuild,\n \"install\": DummyCFFIInstall,\n \"test\": DummyPyTest,\n }\n }\n else:\n return {\n \"setup_requires\": requirements,\n \"cmdclass\": {\n \"build\": CFFIBuild,\n \"install\": CFFIInstall,\n \"test\": PyTest,\n }\n }\n\n\nsetup_requires_error = (\"Requested setup command that needs 'setup_requires' \"\n \"while command line arguments implied a side effect \"\n \"free command or option.\")\n\n\nclass DummyCFFIBuild(build):\n \"\"\"\n This class makes it very obvious when ``keywords_with_side_effects()`` has\n incorrectly interpreted the command line arguments to ``setup.py build`` as\n one of the 'side effect free' commands or options.\n \"\"\"\n\n def run(self):\n raise RuntimeError(setup_requires_error)\n\n\nclass DummyCFFIInstall(install):\n \"\"\"\n This class makes it very obvious when ``keywords_with_side_effects()`` has\n incorrectly interpreted the command line arguments to ``setup.py install``\n as one of the 'side effect free' commands or options.\n \"\"\"\n\n def run(self):\n raise RuntimeError(setup_requires_error)\n\n\nclass DummyPyTest(test):\n \"\"\"\n This class makes it very obvious when ``keywords_with_side_effects()`` has\n incorrectly interpreted the command line arguments to ``setup.py test`` as\n one of the 'side effect free' commands or options.\n \"\"\"\n\n def run_tests(self):\n raise RuntimeError(setup_requires_error)\n\n\nwith open(os.path.join(base_dir, \"README.rst\")) as f:\n long_description = f.read()\n\n\nsetup(\n name=about[\"__title__\"],\n version=about[\"__version__\"],\n\n description=about[\"__summary__\"],\n long_description=long_description,\n license=about[\"__license__\"],\n url=about[\"__uri__\"],\n\n author=about[\"__author__\"],\n author_email=about[\"__email__\"],\n\n classifiers=[\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"License :: OSI Approved :: BSD License\",\n \"Natural Language :: English\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX\",\n \"Operating System :: POSIX :: BSD\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.6\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.2\",\n \"Programming Language :: Python :: 3.3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Topic :: Security :: Cryptography\",\n ],\n\n package_dir={\"\": \"src\"},\n packages=find_packages(where=\"src\", exclude=[\"tests\", \"tests.*\"]),\n include_package_data=True,\n\n install_requires=requirements,\n tests_require=test_requirements,\n\n # for cffi\n zip_safe=False,\n ext_package=\"cryptography\",\n entry_points={\n \"cryptography.backends\": backends,\n },\n **keywords_with_side_effects(sys.argv)\n)\n", "path": "setup.py"}], "after_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport os\nimport platform\nimport subprocess\nimport sys\nfrom distutils.command.build import build\n\nimport pkg_resources\n\nfrom setuptools import find_packages, setup\nfrom setuptools.command.install import install\nfrom setuptools.command.test import test\n\n\nbase_dir = os.path.dirname(__file__)\nsrc_dir = os.path.join(base_dir, \"src\")\n\n# When executing the setup.py, we need to be able to import ourselves, this\n# means that we need to add the src/ directory to the sys.path.\nsys.path.insert(0, src_dir)\n\nabout = {}\nwith open(os.path.join(src_dir, \"cryptography\", \"__about__.py\")) as f:\n exec(f.read(), about)\n\n\nSETUPTOOLS_DEPENDENCY = \"setuptools\"\nCFFI_DEPENDENCY = \"cffi>=0.8\"\nSIX_DEPENDENCY = \"six>=1.4.1\"\nVECTORS_DEPENDENCY = \"cryptography_vectors=={0}\".format(about['__version__'])\n\nrequirements = [\n CFFI_DEPENDENCY,\n \"pyasn1\",\n SIX_DEPENDENCY,\n SETUPTOOLS_DEPENDENCY\n]\n\nif sys.version_info < (3, 4):\n requirements.append(\"enum34\")\n\n# If you add a new dep here you probably need to add it in the tox.ini as well\ntest_requirements = [\n \"pytest\",\n \"pretend\",\n \"iso8601\",\n]\n\n# If there's no vectors locally that probably means we are in a tarball and\n# need to go and get the matching vectors package from PyPi\nif not os.path.exists(os.path.join(base_dir, \"vectors/setup.py\")):\n test_requirements.append(VECTORS_DEPENDENCY)\n\n\ndef cc_is_available():\n return sys.platform == \"darwin\" and list(map(\n int, platform.mac_ver()[0].split(\".\"))) >= [10, 8, 0]\n\n\nbackends = [\n \"openssl = cryptography.hazmat.backends.openssl:backend\"\n]\n\nif cc_is_available():\n backends.append(\n \"commoncrypto = cryptography.hazmat.backends.commoncrypto:backend\",\n )\n\n\ndef get_ext_modules():\n from cryptography.hazmat.bindings.commoncrypto.binding import (\n Binding as CommonCryptoBinding\n )\n from cryptography.hazmat.bindings.openssl.binding import (\n Binding as OpenSSLBinding\n )\n from cryptography.hazmat.primitives import constant_time, padding\n\n ext_modules = [\n OpenSSLBinding.ffi.verifier.get_extension(),\n constant_time._ffi.verifier.get_extension(),\n padding._ffi.verifier.get_extension()\n ]\n if cc_is_available():\n ext_modules.append(CommonCryptoBinding.ffi.verifier.get_extension())\n return ext_modules\n\n\nclass CFFIBuild(build):\n \"\"\"\n This class exists, instead of just providing ``ext_modules=[...]`` directly\n in ``setup()`` because importing cryptography requires we have several\n packages installed first.\n\n By doing the imports here we ensure that packages listed in\n ``setup_requires`` are already installed.\n \"\"\"\n\n def finalize_options(self):\n self.distribution.ext_modules = get_ext_modules()\n build.finalize_options(self)\n\n\nclass CFFIInstall(install):\n \"\"\"\n As a consequence of CFFIBuild and it's late addition of ext_modules, we\n need the equivalent for the ``install`` command to install into platlib\n install-dir rather than purelib.\n \"\"\"\n\n def finalize_options(self):\n self.distribution.ext_modules = get_ext_modules()\n install.finalize_options(self)\n\n\nclass PyTest(test):\n def finalize_options(self):\n test.finalize_options(self)\n self.test_args = []\n self.test_suite = True\n\n # This means there's a vectors/ folder with the package in here.\n # cd into it, install the vectors package and then refresh sys.path\n if VECTORS_DEPENDENCY not in test_requirements:\n subprocess.check_call(\n [sys.executable, \"setup.py\", \"install\"], cwd=\"vectors\"\n )\n pkg_resources.get_distribution(\"cryptography_vectors\").activate()\n\n def run_tests(self):\n # Import here because in module scope the eggs are not loaded.\n import pytest\n errno = pytest.main(self.test_args)\n sys.exit(errno)\n\n\ndef keywords_with_side_effects(argv):\n \"\"\"\n Get a dictionary with setup keywords that (can) have side effects.\n\n :param argv: A list of strings with command line arguments.\n :returns: A dictionary with keyword arguments for the ``setup()`` function.\n\n This setup.py script uses the setuptools 'setup_requires' feature because\n this is required by the cffi package to compile extension modules. The\n purpose of ``keywords_with_side_effects()`` is to avoid triggering the cffi\n build process as a result of setup.py invocations that don't need the cffi\n module to be built (setup.py serves the dual purpose of exposing package\n metadata).\n\n All of the options listed by ``python setup.py --help`` that print\n information should be recognized here. The commands ``clean``,\n ``egg_info``, ``register``, ``sdist`` and ``upload`` are also recognized.\n Any combination of these options and commands is also supported.\n\n This function was originally based on the `setup.py script`_ of SciPy (see\n also the discussion in `pip issue #25`_).\n\n .. _pip issue #25: https://github.com/pypa/pip/issues/25\n .. _setup.py script: https://github.com/scipy/scipy/blob/master/setup.py\n \"\"\"\n no_setup_requires_arguments = (\n '-h', '--help',\n '-n', '--dry-run',\n '-q', '--quiet',\n '-v', '--verbose',\n '-V', '--version',\n '--author',\n '--author-email',\n '--classifiers',\n '--contact',\n '--contact-email',\n '--description',\n '--egg-base',\n '--fullname',\n '--help-commands',\n '--keywords',\n '--licence',\n '--license',\n '--long-description',\n '--maintainer',\n '--maintainer-email',\n '--name',\n '--no-user-cfg',\n '--obsoletes',\n '--platforms',\n '--provides',\n '--requires',\n '--url',\n 'clean',\n 'egg_info',\n 'register',\n 'sdist',\n 'upload',\n )\n\n def is_short_option(argument):\n \"\"\"Check whether a command line argument is a short option.\"\"\"\n return len(argument) >= 2 and argument[0] == '-' and argument[1] != '-'\n\n def expand_short_options(argument):\n \"\"\"Expand combined short options into canonical short options.\"\"\"\n return ('-' + char for char in argument[1:])\n\n def argument_without_setup_requirements(argv, i):\n \"\"\"Check whether a command line argument needs setup requirements.\"\"\"\n if argv[i] in no_setup_requires_arguments:\n # Simple case: An argument which is either an option or a command\n # which doesn't need setup requirements.\n return True\n elif (is_short_option(argv[i]) and\n all(option in no_setup_requires_arguments\n for option in expand_short_options(argv[i]))):\n # Not so simple case: Combined short options none of which need\n # setup requirements.\n return True\n elif argv[i - 1:i] == ['--egg-base']:\n # Tricky case: --egg-info takes an argument which should not make\n # us use setup_requires (defeating the purpose of this code).\n return True\n else:\n return False\n\n if all(argument_without_setup_requirements(argv, i)\n for i in range(1, len(argv))):\n return {\n \"cmdclass\": {\n \"build\": DummyCFFIBuild,\n \"install\": DummyCFFIInstall,\n \"test\": DummyPyTest,\n }\n }\n else:\n return {\n \"setup_requires\": requirements,\n \"cmdclass\": {\n \"build\": CFFIBuild,\n \"install\": CFFIInstall,\n \"test\": PyTest,\n }\n }\n\n\nsetup_requires_error = (\"Requested setup command that needs 'setup_requires' \"\n \"while command line arguments implied a side effect \"\n \"free command or option.\")\n\n\nclass DummyCFFIBuild(build):\n \"\"\"\n This class makes it very obvious when ``keywords_with_side_effects()`` has\n incorrectly interpreted the command line arguments to ``setup.py build`` as\n one of the 'side effect free' commands or options.\n \"\"\"\n\n def run(self):\n raise RuntimeError(setup_requires_error)\n\n\nclass DummyCFFIInstall(install):\n \"\"\"\n This class makes it very obvious when ``keywords_with_side_effects()`` has\n incorrectly interpreted the command line arguments to ``setup.py install``\n as one of the 'side effect free' commands or options.\n \"\"\"\n\n def run(self):\n raise RuntimeError(setup_requires_error)\n\n\nclass DummyPyTest(test):\n \"\"\"\n This class makes it very obvious when ``keywords_with_side_effects()`` has\n incorrectly interpreted the command line arguments to ``setup.py test`` as\n one of the 'side effect free' commands or options.\n \"\"\"\n\n def run_tests(self):\n raise RuntimeError(setup_requires_error)\n\n\nwith open(os.path.join(base_dir, \"README.rst\")) as f:\n long_description = f.read()\n\n\nsetup(\n name=about[\"__title__\"],\n version=about[\"__version__\"],\n\n description=about[\"__summary__\"],\n long_description=long_description,\n license=about[\"__license__\"],\n url=about[\"__uri__\"],\n\n author=about[\"__author__\"],\n author_email=about[\"__email__\"],\n\n classifiers=[\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"License :: OSI Approved :: BSD License\",\n \"Natural Language :: English\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX\",\n \"Operating System :: POSIX :: BSD\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.6\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.2\",\n \"Programming Language :: Python :: 3.3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Topic :: Security :: Cryptography\",\n ],\n\n package_dir={\"\": \"src\"},\n packages=find_packages(where=\"src\", exclude=[\"tests\", \"tests.*\"]),\n include_package_data=True,\n\n install_requires=requirements,\n tests_require=test_requirements,\n\n # for cffi\n zip_safe=False,\n ext_package=\"cryptography\",\n entry_points={\n \"cryptography.backends\": backends,\n },\n **keywords_with_side_effects(sys.argv)\n)\n", "path": "setup.py"}]} | 3,614 | 125 |
gh_patches_debug_40745 | rasdani/github-patches | git_diff | svthalia__concrexit-3652 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
The same names with different capitalisation are seen as different
### Describe the bug
When claiming promo requests in the admin site, if the claimant name is entered twice, first without capital and then with one. It is counted as two different persons.
### Expected behaviour
The same name with different capitalisation should still count as the same name.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `website/promotion/admin.py`
Content:
```
1 """Registers admin interfaces for the models defined in this module."""
2 from django.contrib import admin
3 from django.contrib.admin import ModelAdmin
4
5 from events.services import is_organiser
6 from promotion.forms import PromotionRequestForm
7
8 from .models import PromotionChannel, PromotionRequest
9
10
11 @admin.register(PromotionRequest)
12 class PromotionRequestAdmin(admin.ModelAdmin):
13 """This manages the admin interface for the model items."""
14
15 list_display = ("event", "publish_date", "channel", "assigned_to", "status")
16 list_filter = (
17 "publish_date",
18 "assigned_to",
19 "status",
20 )
21 date_hierarchy = "publish_date"
22 form = PromotionRequestForm
23 actions = ["mark_not_started", "mark_started", "mark_finished", "mark_published"]
24
25 def has_change_permission(self, request, obj=None):
26 if obj is not None and obj.event and is_organiser(request.member, obj.event):
27 return True
28 return super().has_change_permission(request, obj)
29
30 def mark_not_started(self, request, queryset):
31 """Change the status of the event to published."""
32 self._change_published(queryset, PromotionRequest.NOT_STARTED)
33
34 mark_not_started.short_description = "Mark requests as not started"
35
36 def mark_started(self, request, queryset):
37 """Change the status of the event to published."""
38 self._change_published(queryset, PromotionRequest.STARTED)
39
40 mark_started.short_description = "Mark requests as started"
41
42 def mark_finished(self, request, queryset):
43 """Change the status of the event to published."""
44 self._change_published(queryset, PromotionRequest.FINISHED)
45
46 mark_finished.short_description = "Mark requests as finished"
47
48 def mark_published(self, request, queryset):
49 """Change the status of the event to published."""
50 self._change_published(queryset, PromotionRequest.PUBLISHED)
51
52 mark_published.short_description = "Mark requests as published"
53
54 @staticmethod
55 def _change_published(queryset, status):
56 queryset.update(status=status)
57
58
59 @admin.register(PromotionChannel)
60 class PromotionChannelAdmin(ModelAdmin):
61 list_display = ("name", "publisher_reminder_email")
62
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/website/promotion/admin.py b/website/promotion/admin.py
--- a/website/promotion/admin.py
+++ b/website/promotion/admin.py
@@ -1,6 +1,9 @@
"""Registers admin interfaces for the models defined in this module."""
+
from django.contrib import admin
from django.contrib.admin import ModelAdmin
+from django.db import models
+from django.db.models.functions import Lower
from events.services import is_organiser
from promotion.forms import PromotionRequestForm
@@ -8,6 +11,75 @@
from .models import PromotionChannel, PromotionRequest
+class CaseInsensitiveFilter(admin.FieldListFilter):
+ def __init__(self, field, request, params, model, model_admin, field_path):
+ self.lookup_kwarg = f"{field_path}__iexact"
+ self.lookup_kwarg2 = f"{field_path}__isnull"
+ self.lookup_val = params.get(self.lookup_kwarg)
+ self.lookup_val2 = params.get(self.lookup_kwarg2)
+ super().__init__(field, request, params, model, model_admin, field_path)
+ self.empty_value_display = model_admin.get_empty_value_display()
+ queryset = model_admin.get_queryset(request)
+ lookup_choices = (
+ queryset.annotate(lowered=Lower(field.name))
+ .order_by(field.name)
+ .distinct()
+ .values_list(field.name, flat=True)
+ )
+ self.lookup_choices = set(
+ map(lambda x: x.lower() if x is not None else x, lookup_choices)
+ )
+
+ def get_facet_counts(self, pk_attname, filtered_qs):
+ return {
+ f"{i}__c": models.Count(
+ pk_attname,
+ filter=models.Q(
+ (self.lookup_kwarg, value)
+ if value is not None
+ else (self.lookup_kwarg2, True)
+ ),
+ )
+ for i, value in enumerate(self.lookup_choices)
+ }
+
+ def choices(self, changelist):
+ add_facets = changelist.add_facets
+ facet_counts = self.get_facet_queryset(changelist)
+ yield {
+ "selected": self.lookup_val is None,
+ "query_string": changelist.get_query_string(
+ remove=[self.lookup_kwarg, self.lookup_kwarg2]
+ ),
+ "display": "All",
+ }
+ include_none = False
+ empty_title = self.empty_value_display
+ for key, val in enumerate(self.lookup_choices):
+ if add_facets:
+ count = facet_counts[f"{key}__c"]
+ if val is None:
+ include_none = True
+ empty_title = f"{empty_title} ({count})" if add_facets else empty_title
+ continue
+ yield {
+ "selected": self.lookup_val is not None and val in self.lookup_val,
+ "query_string": changelist.get_query_string({self.lookup_kwarg: val}),
+ "display": f"{val} ({count})" if add_facets else val,
+ }
+ if include_none:
+ yield {
+ "selected": self.lookup_val2 is True,
+ "query_string": changelist.get_query_string(
+ {self.lookup_kwarg2: "True"}, remove=[self.lookup_kwarg]
+ ),
+ "display": empty_title,
+ }
+
+ def expected_parameters(self):
+ return [self.lookup_kwarg, self.lookup_kwarg2]
+
+
@admin.register(PromotionRequest)
class PromotionRequestAdmin(admin.ModelAdmin):
"""This manages the admin interface for the model items."""
@@ -15,7 +87,7 @@
list_display = ("event", "publish_date", "channel", "assigned_to", "status")
list_filter = (
"publish_date",
- "assigned_to",
+ ("assigned_to", CaseInsensitiveFilter),
"status",
)
date_hierarchy = "publish_date"
| {"golden_diff": "diff --git a/website/promotion/admin.py b/website/promotion/admin.py\n--- a/website/promotion/admin.py\n+++ b/website/promotion/admin.py\n@@ -1,6 +1,9 @@\n \"\"\"Registers admin interfaces for the models defined in this module.\"\"\"\n+\n from django.contrib import admin\n from django.contrib.admin import ModelAdmin\n+from django.db import models\n+from django.db.models.functions import Lower\n \n from events.services import is_organiser\n from promotion.forms import PromotionRequestForm\n@@ -8,6 +11,75 @@\n from .models import PromotionChannel, PromotionRequest\n \n \n+class CaseInsensitiveFilter(admin.FieldListFilter):\n+ def __init__(self, field, request, params, model, model_admin, field_path):\n+ self.lookup_kwarg = f\"{field_path}__iexact\"\n+ self.lookup_kwarg2 = f\"{field_path}__isnull\"\n+ self.lookup_val = params.get(self.lookup_kwarg)\n+ self.lookup_val2 = params.get(self.lookup_kwarg2)\n+ super().__init__(field, request, params, model, model_admin, field_path)\n+ self.empty_value_display = model_admin.get_empty_value_display()\n+ queryset = model_admin.get_queryset(request)\n+ lookup_choices = (\n+ queryset.annotate(lowered=Lower(field.name))\n+ .order_by(field.name)\n+ .distinct()\n+ .values_list(field.name, flat=True)\n+ )\n+ self.lookup_choices = set(\n+ map(lambda x: x.lower() if x is not None else x, lookup_choices)\n+ )\n+\n+ def get_facet_counts(self, pk_attname, filtered_qs):\n+ return {\n+ f\"{i}__c\": models.Count(\n+ pk_attname,\n+ filter=models.Q(\n+ (self.lookup_kwarg, value)\n+ if value is not None\n+ else (self.lookup_kwarg2, True)\n+ ),\n+ )\n+ for i, value in enumerate(self.lookup_choices)\n+ }\n+\n+ def choices(self, changelist):\n+ add_facets = changelist.add_facets\n+ facet_counts = self.get_facet_queryset(changelist)\n+ yield {\n+ \"selected\": self.lookup_val is None,\n+ \"query_string\": changelist.get_query_string(\n+ remove=[self.lookup_kwarg, self.lookup_kwarg2]\n+ ),\n+ \"display\": \"All\",\n+ }\n+ include_none = False\n+ empty_title = self.empty_value_display\n+ for key, val in enumerate(self.lookup_choices):\n+ if add_facets:\n+ count = facet_counts[f\"{key}__c\"]\n+ if val is None:\n+ include_none = True\n+ empty_title = f\"{empty_title} ({count})\" if add_facets else empty_title\n+ continue\n+ yield {\n+ \"selected\": self.lookup_val is not None and val in self.lookup_val,\n+ \"query_string\": changelist.get_query_string({self.lookup_kwarg: val}),\n+ \"display\": f\"{val} ({count})\" if add_facets else val,\n+ }\n+ if include_none:\n+ yield {\n+ \"selected\": self.lookup_val2 is True,\n+ \"query_string\": changelist.get_query_string(\n+ {self.lookup_kwarg2: \"True\"}, remove=[self.lookup_kwarg]\n+ ),\n+ \"display\": empty_title,\n+ }\n+\n+ def expected_parameters(self):\n+ return [self.lookup_kwarg, self.lookup_kwarg2]\n+\n+\n @admin.register(PromotionRequest)\n class PromotionRequestAdmin(admin.ModelAdmin):\n \"\"\"This manages the admin interface for the model items.\"\"\"\n@@ -15,7 +87,7 @@\n list_display = (\"event\", \"publish_date\", \"channel\", \"assigned_to\", \"status\")\n list_filter = (\n \"publish_date\",\n- \"assigned_to\",\n+ (\"assigned_to\", CaseInsensitiveFilter),\n \"status\",\n )\n date_hierarchy = \"publish_date\"\n", "issue": "The same names with different capitalisation are seen as different\n### Describe the bug\r\nWhen claiming promo requests in the admin site, if the claimant name is entered twice, first without capital and then with one. It is counted as two different persons.\r\n\r\n### Expected behaviour\r\nThe same name with different capitalisation should still count as the same name.\r\n\n", "before_files": [{"content": "\"\"\"Registers admin interfaces for the models defined in this module.\"\"\"\nfrom django.contrib import admin\nfrom django.contrib.admin import ModelAdmin\n\nfrom events.services import is_organiser\nfrom promotion.forms import PromotionRequestForm\n\nfrom .models import PromotionChannel, PromotionRequest\n\n\[email protected](PromotionRequest)\nclass PromotionRequestAdmin(admin.ModelAdmin):\n \"\"\"This manages the admin interface for the model items.\"\"\"\n\n list_display = (\"event\", \"publish_date\", \"channel\", \"assigned_to\", \"status\")\n list_filter = (\n \"publish_date\",\n \"assigned_to\",\n \"status\",\n )\n date_hierarchy = \"publish_date\"\n form = PromotionRequestForm\n actions = [\"mark_not_started\", \"mark_started\", \"mark_finished\", \"mark_published\"]\n\n def has_change_permission(self, request, obj=None):\n if obj is not None and obj.event and is_organiser(request.member, obj.event):\n return True\n return super().has_change_permission(request, obj)\n\n def mark_not_started(self, request, queryset):\n \"\"\"Change the status of the event to published.\"\"\"\n self._change_published(queryset, PromotionRequest.NOT_STARTED)\n\n mark_not_started.short_description = \"Mark requests as not started\"\n\n def mark_started(self, request, queryset):\n \"\"\"Change the status of the event to published.\"\"\"\n self._change_published(queryset, PromotionRequest.STARTED)\n\n mark_started.short_description = \"Mark requests as started\"\n\n def mark_finished(self, request, queryset):\n \"\"\"Change the status of the event to published.\"\"\"\n self._change_published(queryset, PromotionRequest.FINISHED)\n\n mark_finished.short_description = \"Mark requests as finished\"\n\n def mark_published(self, request, queryset):\n \"\"\"Change the status of the event to published.\"\"\"\n self._change_published(queryset, PromotionRequest.PUBLISHED)\n\n mark_published.short_description = \"Mark requests as published\"\n\n @staticmethod\n def _change_published(queryset, status):\n queryset.update(status=status)\n\n\[email protected](PromotionChannel)\nclass PromotionChannelAdmin(ModelAdmin):\n list_display = (\"name\", \"publisher_reminder_email\")\n", "path": "website/promotion/admin.py"}], "after_files": [{"content": "\"\"\"Registers admin interfaces for the models defined in this module.\"\"\"\n\nfrom django.contrib import admin\nfrom django.contrib.admin import ModelAdmin\nfrom django.db import models\nfrom django.db.models.functions import Lower\n\nfrom events.services import is_organiser\nfrom promotion.forms import PromotionRequestForm\n\nfrom .models import PromotionChannel, PromotionRequest\n\n\nclass CaseInsensitiveFilter(admin.FieldListFilter):\n def __init__(self, field, request, params, model, model_admin, field_path):\n self.lookup_kwarg = f\"{field_path}__iexact\"\n self.lookup_kwarg2 = f\"{field_path}__isnull\"\n self.lookup_val = params.get(self.lookup_kwarg)\n self.lookup_val2 = params.get(self.lookup_kwarg2)\n super().__init__(field, request, params, model, model_admin, field_path)\n self.empty_value_display = model_admin.get_empty_value_display()\n queryset = model_admin.get_queryset(request)\n lookup_choices = (\n queryset.annotate(lowered=Lower(field.name))\n .order_by(field.name)\n .distinct()\n .values_list(field.name, flat=True)\n )\n self.lookup_choices = set(\n map(lambda x: x.lower() if x is not None else x, lookup_choices)\n )\n\n def get_facet_counts(self, pk_attname, filtered_qs):\n return {\n f\"{i}__c\": models.Count(\n pk_attname,\n filter=models.Q(\n (self.lookup_kwarg, value)\n if value is not None\n else (self.lookup_kwarg2, True)\n ),\n )\n for i, value in enumerate(self.lookup_choices)\n }\n\n def choices(self, changelist):\n add_facets = changelist.add_facets\n facet_counts = self.get_facet_queryset(changelist)\n yield {\n \"selected\": self.lookup_val is None,\n \"query_string\": changelist.get_query_string(\n remove=[self.lookup_kwarg, self.lookup_kwarg2]\n ),\n \"display\": \"All\",\n }\n include_none = False\n empty_title = self.empty_value_display\n for key, val in enumerate(self.lookup_choices):\n if add_facets:\n count = facet_counts[f\"{key}__c\"]\n if val is None:\n include_none = True\n empty_title = f\"{empty_title} ({count})\" if add_facets else empty_title\n continue\n yield {\n \"selected\": self.lookup_val is not None and val in self.lookup_val,\n \"query_string\": changelist.get_query_string({self.lookup_kwarg: val}),\n \"display\": f\"{val} ({count})\" if add_facets else val,\n }\n if include_none:\n yield {\n \"selected\": self.lookup_val2 is True,\n \"query_string\": changelist.get_query_string(\n {self.lookup_kwarg2: \"True\"}, remove=[self.lookup_kwarg]\n ),\n \"display\": empty_title,\n }\n\n def expected_parameters(self):\n return [self.lookup_kwarg, self.lookup_kwarg2]\n\n\[email protected](PromotionRequest)\nclass PromotionRequestAdmin(admin.ModelAdmin):\n \"\"\"This manages the admin interface for the model items.\"\"\"\n\n list_display = (\"event\", \"publish_date\", \"channel\", \"assigned_to\", \"status\")\n list_filter = (\n \"publish_date\",\n (\"assigned_to\", CaseInsensitiveFilter),\n \"status\",\n )\n date_hierarchy = \"publish_date\"\n form = PromotionRequestForm\n actions = [\"mark_not_started\", \"mark_started\", \"mark_finished\", \"mark_published\"]\n\n def has_change_permission(self, request, obj=None):\n if obj is not None and obj.event and is_organiser(request.member, obj.event):\n return True\n return super().has_change_permission(request, obj)\n\n def mark_not_started(self, request, queryset):\n \"\"\"Change the status of the event to published.\"\"\"\n self._change_published(queryset, PromotionRequest.NOT_STARTED)\n\n mark_not_started.short_description = \"Mark requests as not started\"\n\n def mark_started(self, request, queryset):\n \"\"\"Change the status of the event to published.\"\"\"\n self._change_published(queryset, PromotionRequest.STARTED)\n\n mark_started.short_description = \"Mark requests as started\"\n\n def mark_finished(self, request, queryset):\n \"\"\"Change the status of the event to published.\"\"\"\n self._change_published(queryset, PromotionRequest.FINISHED)\n\n mark_finished.short_description = \"Mark requests as finished\"\n\n def mark_published(self, request, queryset):\n \"\"\"Change the status of the event to published.\"\"\"\n self._change_published(queryset, PromotionRequest.PUBLISHED)\n\n mark_published.short_description = \"Mark requests as published\"\n\n @staticmethod\n def _change_published(queryset, status):\n queryset.update(status=status)\n\n\[email protected](PromotionChannel)\nclass PromotionChannelAdmin(ModelAdmin):\n list_display = (\"name\", \"publisher_reminder_email\")\n", "path": "website/promotion/admin.py"}]} | 904 | 874 |
gh_patches_debug_5275 | rasdani/github-patches | git_diff | Netflix__lemur-267 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Create new roles for unknown owners
Currently when you create an authority with an unknown owner we get an error because we assumed that the owner is creating the authority.
This is not always the case as sometimes teams will create authorities on the behalf of other teams. We should just go ahead an create an owner_role if one does not exist.
```
2016-03-31 16:21:39,507 ERROR: 'NoneType' object has no attribute 'authority' [in /apps/lemur/lemur/common/utils.py:60]
Traceback (most recent call last):
File "/apps/lemur/lemur/common/utils.py", line 46, in wrapper
resp = f(*args, **kwargs)
File "/apps/lemur/lemur/authorities/views.py", line 201, in post
return service.create(args)
File "/apps/lemur/lemur/authorities/service.py", line 106, in create
owner_role.authority = authority
AttributeError: 'NoneType' object has no attribute 'authority'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lemur/authorities/service.py`
Content:
```
1 """
2 .. module: lemur.authorities.service
3 :platform: Unix
4 :synopsis: This module contains all of the services level functions used to
5 administer authorities in Lemur
6 :copyright: (c) 2015 by Netflix Inc., see AUTHORS for more
7 :license: Apache, see LICENSE for more details.
8 .. moduleauthor:: Kevin Glisson <[email protected]>
9
10 """
11 from flask import g
12 from flask import current_app
13
14 from lemur import database
15 from lemur.authorities.models import Authority
16 from lemur.roles import service as role_service
17 from lemur.notifications import service as notification_service
18
19 from lemur.roles.models import Role
20 from lemur.certificates.models import Certificate
21
22 from lemur.plugins.base import plugins
23
24
25 def update(authority_id, description=None, owner=None, active=None, roles=None):
26 """
27 Update a an authority with new values.
28
29 :param authority_id:
30 :param roles: roles that are allowed to use this authority
31 :return:
32 """
33 authority = get(authority_id)
34 if roles:
35 authority = database.update_list(authority, 'roles', Role, roles)
36
37 if active:
38 authority.active = active
39
40 authority.description = description
41 authority.owner = owner
42 return database.update(authority)
43
44
45 def create(kwargs):
46 """
47 Create a new authority.
48
49 :return:
50 """
51
52 issuer = plugins.get(kwargs.get('pluginName'))
53
54 kwargs['creator'] = g.current_user.email
55 cert_body, intermediate, issuer_roles = issuer.create_authority(kwargs)
56
57 cert = Certificate(cert_body, chain=intermediate)
58 cert.owner = kwargs['ownerEmail']
59
60 if kwargs['caType'] == 'subca':
61 cert.description = "This is the ROOT certificate for the {0} sub certificate authority the parent \
62 authority is {1}.".format(kwargs.get('caName'), kwargs.get('caParent'))
63 else:
64 cert.description = "This is the ROOT certificate for the {0} certificate authority.".format(
65 kwargs.get('caName')
66 )
67
68 cert.user = g.current_user
69
70 cert.notifications = notification_service.create_default_expiration_notifications(
71 'DEFAULT_SECURITY',
72 current_app.config.get('LEMUR_SECURITY_TEAM_EMAIL')
73 )
74
75 # we create and attach any roles that the issuer gives us
76 role_objs = []
77 for r in issuer_roles:
78
79 role = role_service.create(
80 r['name'],
81 password=r['password'],
82 description="{0} auto generated role".format(kwargs.get('pluginName')),
83 username=r['username'])
84
85 # the user creating the authority should be able to administer it
86 if role.username == 'admin':
87 g.current_user.roles.append(role)
88
89 role_objs.append(role)
90
91 authority = Authority(
92 kwargs.get('caName'),
93 kwargs['ownerEmail'],
94 kwargs['pluginName'],
95 cert_body,
96 description=kwargs['caDescription'],
97 chain=intermediate,
98 roles=role_objs
99 )
100
101 database.update(cert)
102 authority = database.create(authority)
103
104 # the owning dl or role should have this authority associated with it
105 owner_role = role_service.get_by_name(kwargs['ownerEmail'])
106 owner_role.authority = authority
107
108 g.current_user.authorities.append(authority)
109
110 return authority
111
112
113 def get_all():
114 """
115 Get all authorities that are currently in Lemur.
116
117 :rtype : List
118 :return:
119 """
120 query = database.session_query(Authority)
121 return database.find_all(query, Authority, {}).all()
122
123
124 def get(authority_id):
125 """
126 Retrieves an authority given it's ID
127
128 :param authority_id:
129 :return:
130 """
131 return database.get(Authority, authority_id)
132
133
134 def get_by_name(authority_name):
135 """
136 Retrieves an authority given it's name.
137
138 :param authority_name:
139 :return:
140 """
141 return database.get(Authority, authority_name, field='name')
142
143
144 def get_authority_role(ca_name):
145 """
146 Attempts to get the authority role for a given ca uses current_user
147 as a basis for accomplishing that.
148
149 :param ca_name:
150 """
151 if g.current_user.is_admin:
152 authority = get_by_name(ca_name)
153 # TODO we should pick admin ca roles for admin
154 return authority.roles[0]
155 else:
156 for role in g.current_user.roles:
157 if role.authority:
158 if role.authority.name == ca_name:
159 return role
160
161
162 def render(args):
163 """
164 Helper that helps us render the REST Api responses.
165 :param args:
166 :return:
167 """
168 query = database.session_query(Authority)
169 sort_by = args.pop('sort_by')
170 sort_dir = args.pop('sort_dir')
171 page = args.pop('page')
172 count = args.pop('count')
173 filt = args.pop('filter')
174
175 if filt:
176 terms = filt.split(';')
177 if 'active' in filt: # this is really weird but strcmp seems to not work here??
178 query = query.filter(Authority.active == terms[1])
179 else:
180 query = database.filter(query, Authority, terms)
181
182 # we make sure that a user can only use an authority they either own are are a member of - admins can see all
183 if not g.current_user.is_admin:
184 authority_ids = []
185 for role in g.current_user.roles:
186 if role.authority:
187 authority_ids.append(role.authority.id)
188 query = query.filter(Authority.id.in_(authority_ids))
189
190 query = database.find_all(query, Authority, args)
191
192 if sort_by and sort_dir:
193 query = database.sort(query, Authority, sort_by, sort_dir)
194
195 return database.paginate(query, page, count)
196
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lemur/authorities/service.py b/lemur/authorities/service.py
--- a/lemur/authorities/service.py
+++ b/lemur/authorities/service.py
@@ -103,6 +103,10 @@
# the owning dl or role should have this authority associated with it
owner_role = role_service.get_by_name(kwargs['ownerEmail'])
+
+ if not owner_role:
+ owner_role = role_service.create(kwargs['ownerEmail'])
+
owner_role.authority = authority
g.current_user.authorities.append(authority)
| {"golden_diff": "diff --git a/lemur/authorities/service.py b/lemur/authorities/service.py\n--- a/lemur/authorities/service.py\n+++ b/lemur/authorities/service.py\n@@ -103,6 +103,10 @@\n \n # the owning dl or role should have this authority associated with it\n owner_role = role_service.get_by_name(kwargs['ownerEmail'])\n+\n+ if not owner_role:\n+ owner_role = role_service.create(kwargs['ownerEmail'])\n+\n owner_role.authority = authority\n \n g.current_user.authorities.append(authority)\n", "issue": "Create new roles for unknown owners\nCurrently when you create an authority with an unknown owner we get an error because we assumed that the owner is creating the authority.\n\nThis is not always the case as sometimes teams will create authorities on the behalf of other teams. We should just go ahead an create an owner_role if one does not exist.\n\n```\n2016-03-31 16:21:39,507 ERROR: 'NoneType' object has no attribute 'authority' [in /apps/lemur/lemur/common/utils.py:60]\nTraceback (most recent call last):\n File \"/apps/lemur/lemur/common/utils.py\", line 46, in wrapper\n resp = f(*args, **kwargs)\n File \"/apps/lemur/lemur/authorities/views.py\", line 201, in post\n return service.create(args)\n File \"/apps/lemur/lemur/authorities/service.py\", line 106, in create\n owner_role.authority = authority\nAttributeError: 'NoneType' object has no attribute 'authority'\n```\n\n", "before_files": [{"content": "\"\"\"\n.. module: lemur.authorities.service\n :platform: Unix\n :synopsis: This module contains all of the services level functions used to\n administer authorities in Lemur\n :copyright: (c) 2015 by Netflix Inc., see AUTHORS for more\n :license: Apache, see LICENSE for more details.\n.. moduleauthor:: Kevin Glisson <[email protected]>\n\n\"\"\"\nfrom flask import g\nfrom flask import current_app\n\nfrom lemur import database\nfrom lemur.authorities.models import Authority\nfrom lemur.roles import service as role_service\nfrom lemur.notifications import service as notification_service\n\nfrom lemur.roles.models import Role\nfrom lemur.certificates.models import Certificate\n\nfrom lemur.plugins.base import plugins\n\n\ndef update(authority_id, description=None, owner=None, active=None, roles=None):\n \"\"\"\n Update a an authority with new values.\n\n :param authority_id:\n :param roles: roles that are allowed to use this authority\n :return:\n \"\"\"\n authority = get(authority_id)\n if roles:\n authority = database.update_list(authority, 'roles', Role, roles)\n\n if active:\n authority.active = active\n\n authority.description = description\n authority.owner = owner\n return database.update(authority)\n\n\ndef create(kwargs):\n \"\"\"\n Create a new authority.\n\n :return:\n \"\"\"\n\n issuer = plugins.get(kwargs.get('pluginName'))\n\n kwargs['creator'] = g.current_user.email\n cert_body, intermediate, issuer_roles = issuer.create_authority(kwargs)\n\n cert = Certificate(cert_body, chain=intermediate)\n cert.owner = kwargs['ownerEmail']\n\n if kwargs['caType'] == 'subca':\n cert.description = \"This is the ROOT certificate for the {0} sub certificate authority the parent \\\n authority is {1}.\".format(kwargs.get('caName'), kwargs.get('caParent'))\n else:\n cert.description = \"This is the ROOT certificate for the {0} certificate authority.\".format(\n kwargs.get('caName')\n )\n\n cert.user = g.current_user\n\n cert.notifications = notification_service.create_default_expiration_notifications(\n 'DEFAULT_SECURITY',\n current_app.config.get('LEMUR_SECURITY_TEAM_EMAIL')\n )\n\n # we create and attach any roles that the issuer gives us\n role_objs = []\n for r in issuer_roles:\n\n role = role_service.create(\n r['name'],\n password=r['password'],\n description=\"{0} auto generated role\".format(kwargs.get('pluginName')),\n username=r['username'])\n\n # the user creating the authority should be able to administer it\n if role.username == 'admin':\n g.current_user.roles.append(role)\n\n role_objs.append(role)\n\n authority = Authority(\n kwargs.get('caName'),\n kwargs['ownerEmail'],\n kwargs['pluginName'],\n cert_body,\n description=kwargs['caDescription'],\n chain=intermediate,\n roles=role_objs\n )\n\n database.update(cert)\n authority = database.create(authority)\n\n # the owning dl or role should have this authority associated with it\n owner_role = role_service.get_by_name(kwargs['ownerEmail'])\n owner_role.authority = authority\n\n g.current_user.authorities.append(authority)\n\n return authority\n\n\ndef get_all():\n \"\"\"\n Get all authorities that are currently in Lemur.\n\n :rtype : List\n :return:\n \"\"\"\n query = database.session_query(Authority)\n return database.find_all(query, Authority, {}).all()\n\n\ndef get(authority_id):\n \"\"\"\n Retrieves an authority given it's ID\n\n :param authority_id:\n :return:\n \"\"\"\n return database.get(Authority, authority_id)\n\n\ndef get_by_name(authority_name):\n \"\"\"\n Retrieves an authority given it's name.\n\n :param authority_name:\n :return:\n \"\"\"\n return database.get(Authority, authority_name, field='name')\n\n\ndef get_authority_role(ca_name):\n \"\"\"\n Attempts to get the authority role for a given ca uses current_user\n as a basis for accomplishing that.\n\n :param ca_name:\n \"\"\"\n if g.current_user.is_admin:\n authority = get_by_name(ca_name)\n # TODO we should pick admin ca roles for admin\n return authority.roles[0]\n else:\n for role in g.current_user.roles:\n if role.authority:\n if role.authority.name == ca_name:\n return role\n\n\ndef render(args):\n \"\"\"\n Helper that helps us render the REST Api responses.\n :param args:\n :return:\n \"\"\"\n query = database.session_query(Authority)\n sort_by = args.pop('sort_by')\n sort_dir = args.pop('sort_dir')\n page = args.pop('page')\n count = args.pop('count')\n filt = args.pop('filter')\n\n if filt:\n terms = filt.split(';')\n if 'active' in filt: # this is really weird but strcmp seems to not work here??\n query = query.filter(Authority.active == terms[1])\n else:\n query = database.filter(query, Authority, terms)\n\n # we make sure that a user can only use an authority they either own are are a member of - admins can see all\n if not g.current_user.is_admin:\n authority_ids = []\n for role in g.current_user.roles:\n if role.authority:\n authority_ids.append(role.authority.id)\n query = query.filter(Authority.id.in_(authority_ids))\n\n query = database.find_all(query, Authority, args)\n\n if sort_by and sort_dir:\n query = database.sort(query, Authority, sort_by, sort_dir)\n\n return database.paginate(query, page, count)\n", "path": "lemur/authorities/service.py"}], "after_files": [{"content": "\"\"\"\n.. module: lemur.authorities.service\n :platform: Unix\n :synopsis: This module contains all of the services level functions used to\n administer authorities in Lemur\n :copyright: (c) 2015 by Netflix Inc., see AUTHORS for more\n :license: Apache, see LICENSE for more details.\n.. moduleauthor:: Kevin Glisson <[email protected]>\n\n\"\"\"\nfrom flask import g\nfrom flask import current_app\n\nfrom lemur import database\nfrom lemur.authorities.models import Authority\nfrom lemur.roles import service as role_service\nfrom lemur.notifications import service as notification_service\n\nfrom lemur.roles.models import Role\nfrom lemur.certificates.models import Certificate\n\nfrom lemur.plugins.base import plugins\n\n\ndef update(authority_id, description=None, owner=None, active=None, roles=None):\n \"\"\"\n Update a an authority with new values.\n\n :param authority_id:\n :param roles: roles that are allowed to use this authority\n :return:\n \"\"\"\n authority = get(authority_id)\n if roles:\n authority = database.update_list(authority, 'roles', Role, roles)\n\n if active:\n authority.active = active\n\n authority.description = description\n authority.owner = owner\n return database.update(authority)\n\n\ndef create(kwargs):\n \"\"\"\n Create a new authority.\n\n :return:\n \"\"\"\n\n issuer = plugins.get(kwargs.get('pluginName'))\n\n kwargs['creator'] = g.current_user.email\n cert_body, intermediate, issuer_roles = issuer.create_authority(kwargs)\n\n cert = Certificate(cert_body, chain=intermediate)\n cert.owner = kwargs['ownerEmail']\n\n if kwargs['caType'] == 'subca':\n cert.description = \"This is the ROOT certificate for the {0} sub certificate authority the parent \\\n authority is {1}.\".format(kwargs.get('caName'), kwargs.get('caParent'))\n else:\n cert.description = \"This is the ROOT certificate for the {0} certificate authority.\".format(\n kwargs.get('caName')\n )\n\n cert.user = g.current_user\n\n cert.notifications = notification_service.create_default_expiration_notifications(\n 'DEFAULT_SECURITY',\n current_app.config.get('LEMUR_SECURITY_TEAM_EMAIL')\n )\n\n # we create and attach any roles that the issuer gives us\n role_objs = []\n for r in issuer_roles:\n\n role = role_service.create(\n r['name'],\n password=r['password'],\n description=\"{0} auto generated role\".format(kwargs.get('pluginName')),\n username=r['username'])\n\n # the user creating the authority should be able to administer it\n if role.username == 'admin':\n g.current_user.roles.append(role)\n\n role_objs.append(role)\n\n authority = Authority(\n kwargs.get('caName'),\n kwargs['ownerEmail'],\n kwargs['pluginName'],\n cert_body,\n description=kwargs['caDescription'],\n chain=intermediate,\n roles=role_objs\n )\n\n database.update(cert)\n authority = database.create(authority)\n\n # the owning dl or role should have this authority associated with it\n owner_role = role_service.get_by_name(kwargs['ownerEmail'])\n\n if not owner_role:\n owner_role = role_service.create(kwargs['ownerEmail'])\n\n owner_role.authority = authority\n\n g.current_user.authorities.append(authority)\n\n return authority\n\n\ndef get_all():\n \"\"\"\n Get all authorities that are currently in Lemur.\n\n :rtype : List\n :return:\n \"\"\"\n query = database.session_query(Authority)\n return database.find_all(query, Authority, {}).all()\n\n\ndef get(authority_id):\n \"\"\"\n Retrieves an authority given it's ID\n\n :param authority_id:\n :return:\n \"\"\"\n return database.get(Authority, authority_id)\n\n\ndef get_by_name(authority_name):\n \"\"\"\n Retrieves an authority given it's name.\n\n :param authority_name:\n :return:\n \"\"\"\n return database.get(Authority, authority_name, field='name')\n\n\ndef get_authority_role(ca_name):\n \"\"\"\n Attempts to get the authority role for a given ca uses current_user\n as a basis for accomplishing that.\n\n :param ca_name:\n \"\"\"\n if g.current_user.is_admin:\n authority = get_by_name(ca_name)\n # TODO we should pick admin ca roles for admin\n return authority.roles[0]\n else:\n for role in g.current_user.roles:\n if role.authority:\n if role.authority.name == ca_name:\n return role\n\n\ndef render(args):\n \"\"\"\n Helper that helps us render the REST Api responses.\n :param args:\n :return:\n \"\"\"\n query = database.session_query(Authority)\n sort_by = args.pop('sort_by')\n sort_dir = args.pop('sort_dir')\n page = args.pop('page')\n count = args.pop('count')\n filt = args.pop('filter')\n\n if filt:\n terms = filt.split(';')\n if 'active' in filt: # this is really weird but strcmp seems to not work here??\n query = query.filter(Authority.active == terms[1])\n else:\n query = database.filter(query, Authority, terms)\n\n # we make sure that a user can only use an authority they either own are are a member of - admins can see all\n if not g.current_user.is_admin:\n authority_ids = []\n for role in g.current_user.roles:\n if role.authority:\n authority_ids.append(role.authority.id)\n query = query.filter(Authority.id.in_(authority_ids))\n\n query = database.find_all(query, Authority, args)\n\n if sort_by and sort_dir:\n query = database.sort(query, Authority, sort_by, sort_dir)\n\n return database.paginate(query, page, count)\n", "path": "lemur/authorities/service.py"}]} | 2,234 | 129 |
gh_patches_debug_9988 | rasdani/github-patches | git_diff | certbot__certbot-8310 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Reduce "During handling of the above exception, another exception occurred" CLI output
Sometimes when encountering exceptions, Certbot outputs multiple stack traces on the CLI which are almost identical.
These take up a lot of room, and make any other important output hard to read.
As part of the terminal output revamp, we should find a way to improve this so it is more compact and not so duplicative.
I am not sure if it is possible to tackle this in a generic way, or whether we would need to tackle specific instances where this has been a problem.
e.g:
$ sudo certbot install --nginx --cert-name random27579.example.org
Saving debug log to /var/log/letsencrypt/letsencrypt.log
Plugins selected: Authenticator None, Installer nginx
Deploying Certificate to VirtualHost /etc/nginx/sites-enabled/default
Deploying Certificate to VirtualHost /etc/nginx/sites-enabled/default
Rolling back to previous server configuration...
Encountered exception during recovery:
Traceback (most recent call last):
File "/home/alex/devel/certbot/certbot/certbot/_internal/client.py", line 530, in deploy_certificate
self.installer.restart()
File "/home/alex/devel/certbot/certbot-nginx/certbot_nginx/_internal/configurator.py", line 918, in restart
nginx_restart(self.conf('ctl'), self.nginx_conf, self.conf('sleep-seconds'))
File "/home/alex/devel/certbot/certbot-nginx/certbot_nginx/_internal/configurator.py", line 1199, in nginx_restart
raise errors.MisconfigurationError(
certbot.errors.MisconfigurationError: nginx restart failed:
nginx: [emerg] bind() to 0.0.0.0:80 failed (98: Address already in use)
nginx: [emerg] bind() to 0.0.0.0:80 failed (98: Address already in use)
nginx: [emerg] bind() to 0.0.0.0:80 failed (98: Address already in use)
nginx: [emerg] bind() to 0.0.0.0:80 failed (98: Address already in use)
nginx: [emerg] bind() to 0.0.0.0:80 failed (98: Address already in use)
nginx: [emerg] still could not bind()
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/alex/devel/certbot/certbot/certbot/_internal/error_handler.py", line 125, in _call_registered
self.funcs[-1]()
File "/home/alex/devel/certbot/certbot/certbot/_internal/client.py", line 630, in _rollback_and_restart
self.installer.restart()
File "/home/alex/devel/certbot/certbot-nginx/certbot_nginx/_internal/configurator.py", line 918, in restart
nginx_restart(self.conf('ctl'), self.nginx_conf, self.conf('sleep-seconds'))
File "/home/alex/devel/certbot/certbot-nginx/certbot_nginx/_internal/configurator.py", line 1199, in nginx_restart
raise errors.MisconfigurationError(
certbot.errors.MisconfigurationError: nginx restart failed:
nginx: [emerg] bind() to 0.0.0.0:80 failed (98: Address already in use)
nginx: [emerg] bind() to 0.0.0.0:80 failed (98: Address already in use)
nginx: [emerg] bind() to 0.0.0.0:80 failed (98: Address already in use)
nginx: [emerg] bind() to 0.0.0.0:80 failed (98: Address already in use)
nginx: [emerg] bind() to 0.0.0.0:80 failed (98: Address already in use)
nginx: [emerg] still could not bind()
nginx restart failed:
nginx: [emerg] bind() to 0.0.0.0:80 failed (98: Address already in use)
nginx: [emerg] bind() to 0.0.0.0:80 failed (98: Address already in use)
nginx: [emerg] bind() to 0.0.0.0:80 failed (98: Address already in use)
nginx: [emerg] bind() to 0.0.0.0:80 failed (98: Address already in use)
nginx: [emerg] bind() to 0.0.0.0:80 failed (98: Address already in use)
nginx: [emerg] still could not bind()
IMPORTANT NOTES:
- An error occurred and we failed to restore your config and restart
your server. Please post to
https://community.letsencrypt.org/c/help with details about your
configuration and this error you received.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `certbot/certbot/_internal/error_handler.py`
Content:
```
1 """Registers functions to be called if an exception or signal occurs."""
2 import functools
3 import logging
4 import signal
5 import traceback
6
7 from acme.magic_typing import Any
8 from acme.magic_typing import Callable
9 from acme.magic_typing import Dict
10 from acme.magic_typing import List
11 from acme.magic_typing import Union
12 from certbot import errors
13 from certbot.compat import os
14
15 logger = logging.getLogger(__name__)
16
17
18 # _SIGNALS stores the signals that will be handled by the ErrorHandler. These
19 # signals were chosen as their default handler terminates the process and could
20 # potentially occur from inside Python. Signals such as SIGILL were not
21 # included as they could be a sign of something devious and we should terminate
22 # immediately.
23 if os.name != "nt":
24 _SIGNALS = [signal.SIGTERM]
25 for signal_code in [signal.SIGHUP, signal.SIGQUIT,
26 signal.SIGXCPU, signal.SIGXFSZ]:
27 # Adding only those signals that their default action is not Ignore.
28 # This is platform-dependent, so we check it dynamically.
29 if signal.getsignal(signal_code) != signal.SIG_IGN:
30 _SIGNALS.append(signal_code)
31 else:
32 # POSIX signals are not implemented natively in Windows, but emulated from the C runtime.
33 # As consumed by CPython, most of handlers on theses signals are useless, in particular
34 # SIGTERM: for instance, os.kill(pid, signal.SIGTERM) will call TerminateProcess, that stops
35 # immediately the process without calling the attached handler. Besides, non-POSIX signals
36 # (CTRL_C_EVENT and CTRL_BREAK_EVENT) are implemented in a console context to handle the
37 # CTRL+C event to a process launched from the console. Only CTRL_C_EVENT has a reliable
38 # behavior in fact, and maps to the handler to SIGINT. However in this case, a
39 # KeyboardInterrupt is raised, that will be handled by ErrorHandler through the context manager
40 # protocol. Finally, no signal on Windows is electable to be handled using ErrorHandler.
41 #
42 # Refs: https://stackoverflow.com/a/35792192, https://maruel.ca/post/python_windows_signal,
43 # https://docs.python.org/2/library/os.html#os.kill,
44 # https://www.reddit.com/r/Python/comments/1dsblt/windows_command_line_automation_ctrlc_question
45 _SIGNALS = []
46
47
48 class ErrorHandler(object):
49 """Context manager for running code that must be cleaned up on failure.
50
51 The context manager allows you to register functions that will be called
52 when an exception (excluding SystemExit) or signal is encountered.
53 Usage::
54
55 handler = ErrorHandler(cleanup1_func, *cleanup1_args, **cleanup1_kwargs)
56 handler.register(cleanup2_func, *cleanup2_args, **cleanup2_kwargs)
57
58 with handler:
59 do_something()
60
61 Or for one cleanup function::
62
63 with ErrorHandler(func, args, kwargs):
64 do_something()
65
66 If an exception is raised out of do_something, the cleanup functions will
67 be called in last in first out order. Then the exception is raised.
68 Similarly, if a signal is encountered, the cleanup functions are called
69 followed by the previously received signal handler.
70
71 Each registered cleanup function is called exactly once. If a registered
72 function raises an exception, it is logged and the next function is called.
73 Signals received while the registered functions are executing are
74 deferred until they finish.
75
76 """
77 def __init__(self, func, *args, **kwargs):
78 self.call_on_regular_exit = False
79 self.body_executed = False
80 self.funcs = [] # type: List[Callable[[], Any]]
81 self.prev_handlers = {} # type: Dict[int, Union[int, None, Callable]]
82 self.received_signals = [] # type: List[int]
83 if func is not None:
84 self.register(func, *args, **kwargs)
85
86 def __enter__(self):
87 self.body_executed = False
88 self._set_signal_handlers()
89
90 def __exit__(self, exec_type, exec_value, trace):
91 self.body_executed = True
92 retval = False
93 # SystemExit is ignored to properly handle forks that don't exec
94 if exec_type is SystemExit:
95 return retval
96 if exec_type is None:
97 if not self.call_on_regular_exit:
98 return retval
99 elif exec_type is errors.SignalExit:
100 logger.debug("Encountered signals: %s", self.received_signals)
101 retval = True
102 else:
103 logger.debug("Encountered exception:\n%s", "".join(
104 traceback.format_exception(exec_type, exec_value, trace)))
105
106 self._call_registered()
107 self._reset_signal_handlers()
108 self._call_signals()
109 return retval
110
111 def register(self, func, *args, **kwargs):
112 # type: (Callable, *Any, **Any) -> None
113 """Sets func to be run with the given arguments during cleanup.
114
115 :param function func: function to be called in case of an error
116
117 """
118 self.funcs.append(functools.partial(func, *args, **kwargs))
119
120 def _call_registered(self):
121 """Calls all registered functions"""
122 logger.debug("Calling registered functions")
123 while self.funcs:
124 try:
125 self.funcs[-1]()
126 except Exception: # pylint: disable=broad-except
127 logger.error("Encountered exception during recovery: ", exc_info=True)
128 self.funcs.pop()
129
130 def _set_signal_handlers(self):
131 """Sets signal handlers for signals in _SIGNALS."""
132 for signum in _SIGNALS:
133 prev_handler = signal.getsignal(signum)
134 # If prev_handler is None, the handler was set outside of Python
135 if prev_handler is not None:
136 self.prev_handlers[signum] = prev_handler
137 signal.signal(signum, self._signal_handler)
138
139 def _reset_signal_handlers(self):
140 """Resets signal handlers for signals in _SIGNALS."""
141 for signum in self.prev_handlers:
142 signal.signal(signum, self.prev_handlers[signum])
143 self.prev_handlers.clear()
144
145 def _signal_handler(self, signum, unused_frame):
146 """Replacement function for handling received signals.
147
148 Store the received signal. If we are executing the code block in
149 the body of the context manager, stop by raising signal exit.
150
151 :param int signum: number of current signal
152
153 """
154 self.received_signals.append(signum)
155 if not self.body_executed:
156 raise errors.SignalExit
157
158 def _call_signals(self):
159 """Finally call the deferred signals."""
160 for signum in self.received_signals:
161 logger.debug("Calling signal %s", signum)
162 os.kill(os.getpid(), signum)
163
164 class ExitHandler(ErrorHandler):
165 """Context manager for running code that must be cleaned up.
166
167 Subclass of ErrorHandler, with the same usage and parameters.
168 In addition to cleaning up on all signals, also cleans up on
169 regular exit.
170 """
171 def __init__(self, func, *args, **kwargs):
172 ErrorHandler.__init__(self, func, *args, **kwargs)
173 self.call_on_regular_exit = True
174
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/certbot/certbot/_internal/error_handler.py b/certbot/certbot/_internal/error_handler.py
--- a/certbot/certbot/_internal/error_handler.py
+++ b/certbot/certbot/_internal/error_handler.py
@@ -123,8 +123,10 @@
while self.funcs:
try:
self.funcs[-1]()
- except Exception: # pylint: disable=broad-except
- logger.error("Encountered exception during recovery: ", exc_info=True)
+ except Exception as exc: # pylint: disable=broad-except
+ output = traceback.format_exception_only(type(exc), exc)
+ logger.error("Encountered exception during recovery: %s",
+ ''.join(output).rstrip())
self.funcs.pop()
def _set_signal_handlers(self):
| {"golden_diff": "diff --git a/certbot/certbot/_internal/error_handler.py b/certbot/certbot/_internal/error_handler.py\n--- a/certbot/certbot/_internal/error_handler.py\n+++ b/certbot/certbot/_internal/error_handler.py\n@@ -123,8 +123,10 @@\n while self.funcs:\n try:\n self.funcs[-1]()\n- except Exception: # pylint: disable=broad-except\n- logger.error(\"Encountered exception during recovery: \", exc_info=True)\n+ except Exception as exc: # pylint: disable=broad-except\n+ output = traceback.format_exception_only(type(exc), exc)\n+ logger.error(\"Encountered exception during recovery: %s\",\n+ ''.join(output).rstrip())\n self.funcs.pop()\n \n def _set_signal_handlers(self):\n", "issue": "Reduce \"During handling of the above exception, another exception occurred\" CLI output\nSometimes when encountering exceptions, Certbot outputs multiple stack traces on the CLI which are almost identical.\r\n\r\nThese take up a lot of room, and make any other important output hard to read. \r\n\r\nAs part of the terminal output revamp, we should find a way to improve this so it is more compact and not so duplicative.\r\n\r\nI am not sure if it is possible to tackle this in a generic way, or whether we would need to tackle specific instances where this has been a problem.\r\n\r\ne.g:\r\n\r\n $ sudo certbot install --nginx --cert-name random27579.example.org\r\n Saving debug log to /var/log/letsencrypt/letsencrypt.log\r\n Plugins selected: Authenticator None, Installer nginx\r\n Deploying Certificate to VirtualHost /etc/nginx/sites-enabled/default\r\n Deploying Certificate to VirtualHost /etc/nginx/sites-enabled/default\r\n Rolling back to previous server configuration...\r\n Encountered exception during recovery:\r\n Traceback (most recent call last):\r\n File \"/home/alex/devel/certbot/certbot/certbot/_internal/client.py\", line 530, in deploy_certificate\r\n self.installer.restart()\r\n File \"/home/alex/devel/certbot/certbot-nginx/certbot_nginx/_internal/configurator.py\", line 918, in restart\r\n nginx_restart(self.conf('ctl'), self.nginx_conf, self.conf('sleep-seconds'))\r\n File \"/home/alex/devel/certbot/certbot-nginx/certbot_nginx/_internal/configurator.py\", line 1199, in nginx_restart\r\n raise errors.MisconfigurationError(\r\n certbot.errors.MisconfigurationError: nginx restart failed:\r\n nginx: [emerg] bind() to 0.0.0.0:80 failed (98: Address already in use)\r\n nginx: [emerg] bind() to 0.0.0.0:80 failed (98: Address already in use)\r\n nginx: [emerg] bind() to 0.0.0.0:80 failed (98: Address already in use)\r\n nginx: [emerg] bind() to 0.0.0.0:80 failed (98: Address already in use)\r\n nginx: [emerg] bind() to 0.0.0.0:80 failed (98: Address already in use)\r\n nginx: [emerg] still could not bind()\r\n\r\n\r\n During handling of the above exception, another exception occurred:\r\n\r\n Traceback (most recent call last):\r\n File \"/home/alex/devel/certbot/certbot/certbot/_internal/error_handler.py\", line 125, in _call_registered\r\n self.funcs[-1]()\r\n File \"/home/alex/devel/certbot/certbot/certbot/_internal/client.py\", line 630, in _rollback_and_restart\r\n self.installer.restart()\r\n File \"/home/alex/devel/certbot/certbot-nginx/certbot_nginx/_internal/configurator.py\", line 918, in restart\r\n nginx_restart(self.conf('ctl'), self.nginx_conf, self.conf('sleep-seconds'))\r\n File \"/home/alex/devel/certbot/certbot-nginx/certbot_nginx/_internal/configurator.py\", line 1199, in nginx_restart\r\n raise errors.MisconfigurationError(\r\n certbot.errors.MisconfigurationError: nginx restart failed:\r\n nginx: [emerg] bind() to 0.0.0.0:80 failed (98: Address already in use)\r\n nginx: [emerg] bind() to 0.0.0.0:80 failed (98: Address already in use)\r\n nginx: [emerg] bind() to 0.0.0.0:80 failed (98: Address already in use)\r\n nginx: [emerg] bind() to 0.0.0.0:80 failed (98: Address already in use)\r\n nginx: [emerg] bind() to 0.0.0.0:80 failed (98: Address already in use)\r\n nginx: [emerg] still could not bind()\r\n\r\n nginx restart failed:\r\n nginx: [emerg] bind() to 0.0.0.0:80 failed (98: Address already in use)\r\n nginx: [emerg] bind() to 0.0.0.0:80 failed (98: Address already in use)\r\n nginx: [emerg] bind() to 0.0.0.0:80 failed (98: Address already in use)\r\n nginx: [emerg] bind() to 0.0.0.0:80 failed (98: Address already in use)\r\n nginx: [emerg] bind() to 0.0.0.0:80 failed (98: Address already in use)\r\n nginx: [emerg] still could not bind()\r\n\r\n\r\n IMPORTANT NOTES:\r\n - An error occurred and we failed to restore your config and restart\r\n your server. Please post to\r\n https://community.letsencrypt.org/c/help with details about your\r\n configuration and this error you received.\n", "before_files": [{"content": "\"\"\"Registers functions to be called if an exception or signal occurs.\"\"\"\nimport functools\nimport logging\nimport signal\nimport traceback\n\nfrom acme.magic_typing import Any\nfrom acme.magic_typing import Callable\nfrom acme.magic_typing import Dict\nfrom acme.magic_typing import List\nfrom acme.magic_typing import Union\nfrom certbot import errors\nfrom certbot.compat import os\n\nlogger = logging.getLogger(__name__)\n\n\n# _SIGNALS stores the signals that will be handled by the ErrorHandler. These\n# signals were chosen as their default handler terminates the process and could\n# potentially occur from inside Python. Signals such as SIGILL were not\n# included as they could be a sign of something devious and we should terminate\n# immediately.\nif os.name != \"nt\":\n _SIGNALS = [signal.SIGTERM]\n for signal_code in [signal.SIGHUP, signal.SIGQUIT,\n signal.SIGXCPU, signal.SIGXFSZ]:\n # Adding only those signals that their default action is not Ignore.\n # This is platform-dependent, so we check it dynamically.\n if signal.getsignal(signal_code) != signal.SIG_IGN:\n _SIGNALS.append(signal_code)\nelse:\n # POSIX signals are not implemented natively in Windows, but emulated from the C runtime.\n # As consumed by CPython, most of handlers on theses signals are useless, in particular\n # SIGTERM: for instance, os.kill(pid, signal.SIGTERM) will call TerminateProcess, that stops\n # immediately the process without calling the attached handler. Besides, non-POSIX signals\n # (CTRL_C_EVENT and CTRL_BREAK_EVENT) are implemented in a console context to handle the\n # CTRL+C event to a process launched from the console. Only CTRL_C_EVENT has a reliable\n # behavior in fact, and maps to the handler to SIGINT. However in this case, a\n # KeyboardInterrupt is raised, that will be handled by ErrorHandler through the context manager\n # protocol. Finally, no signal on Windows is electable to be handled using ErrorHandler.\n #\n # Refs: https://stackoverflow.com/a/35792192, https://maruel.ca/post/python_windows_signal,\n # https://docs.python.org/2/library/os.html#os.kill,\n # https://www.reddit.com/r/Python/comments/1dsblt/windows_command_line_automation_ctrlc_question\n _SIGNALS = []\n\n\nclass ErrorHandler(object):\n \"\"\"Context manager for running code that must be cleaned up on failure.\n\n The context manager allows you to register functions that will be called\n when an exception (excluding SystemExit) or signal is encountered.\n Usage::\n\n handler = ErrorHandler(cleanup1_func, *cleanup1_args, **cleanup1_kwargs)\n handler.register(cleanup2_func, *cleanup2_args, **cleanup2_kwargs)\n\n with handler:\n do_something()\n\n Or for one cleanup function::\n\n with ErrorHandler(func, args, kwargs):\n do_something()\n\n If an exception is raised out of do_something, the cleanup functions will\n be called in last in first out order. Then the exception is raised.\n Similarly, if a signal is encountered, the cleanup functions are called\n followed by the previously received signal handler.\n\n Each registered cleanup function is called exactly once. If a registered\n function raises an exception, it is logged and the next function is called.\n Signals received while the registered functions are executing are\n deferred until they finish.\n\n \"\"\"\n def __init__(self, func, *args, **kwargs):\n self.call_on_regular_exit = False\n self.body_executed = False\n self.funcs = [] # type: List[Callable[[], Any]]\n self.prev_handlers = {} # type: Dict[int, Union[int, None, Callable]]\n self.received_signals = [] # type: List[int]\n if func is not None:\n self.register(func, *args, **kwargs)\n\n def __enter__(self):\n self.body_executed = False\n self._set_signal_handlers()\n\n def __exit__(self, exec_type, exec_value, trace):\n self.body_executed = True\n retval = False\n # SystemExit is ignored to properly handle forks that don't exec\n if exec_type is SystemExit:\n return retval\n if exec_type is None:\n if not self.call_on_regular_exit:\n return retval\n elif exec_type is errors.SignalExit:\n logger.debug(\"Encountered signals: %s\", self.received_signals)\n retval = True\n else:\n logger.debug(\"Encountered exception:\\n%s\", \"\".join(\n traceback.format_exception(exec_type, exec_value, trace)))\n\n self._call_registered()\n self._reset_signal_handlers()\n self._call_signals()\n return retval\n\n def register(self, func, *args, **kwargs):\n # type: (Callable, *Any, **Any) -> None\n \"\"\"Sets func to be run with the given arguments during cleanup.\n\n :param function func: function to be called in case of an error\n\n \"\"\"\n self.funcs.append(functools.partial(func, *args, **kwargs))\n\n def _call_registered(self):\n \"\"\"Calls all registered functions\"\"\"\n logger.debug(\"Calling registered functions\")\n while self.funcs:\n try:\n self.funcs[-1]()\n except Exception: # pylint: disable=broad-except\n logger.error(\"Encountered exception during recovery: \", exc_info=True)\n self.funcs.pop()\n\n def _set_signal_handlers(self):\n \"\"\"Sets signal handlers for signals in _SIGNALS.\"\"\"\n for signum in _SIGNALS:\n prev_handler = signal.getsignal(signum)\n # If prev_handler is None, the handler was set outside of Python\n if prev_handler is not None:\n self.prev_handlers[signum] = prev_handler\n signal.signal(signum, self._signal_handler)\n\n def _reset_signal_handlers(self):\n \"\"\"Resets signal handlers for signals in _SIGNALS.\"\"\"\n for signum in self.prev_handlers:\n signal.signal(signum, self.prev_handlers[signum])\n self.prev_handlers.clear()\n\n def _signal_handler(self, signum, unused_frame):\n \"\"\"Replacement function for handling received signals.\n\n Store the received signal. If we are executing the code block in\n the body of the context manager, stop by raising signal exit.\n\n :param int signum: number of current signal\n\n \"\"\"\n self.received_signals.append(signum)\n if not self.body_executed:\n raise errors.SignalExit\n\n def _call_signals(self):\n \"\"\"Finally call the deferred signals.\"\"\"\n for signum in self.received_signals:\n logger.debug(\"Calling signal %s\", signum)\n os.kill(os.getpid(), signum)\n\nclass ExitHandler(ErrorHandler):\n \"\"\"Context manager for running code that must be cleaned up.\n\n Subclass of ErrorHandler, with the same usage and parameters.\n In addition to cleaning up on all signals, also cleans up on\n regular exit.\n \"\"\"\n def __init__(self, func, *args, **kwargs):\n ErrorHandler.__init__(self, func, *args, **kwargs)\n self.call_on_regular_exit = True\n", "path": "certbot/certbot/_internal/error_handler.py"}], "after_files": [{"content": "\"\"\"Registers functions to be called if an exception or signal occurs.\"\"\"\nimport functools\nimport logging\nimport signal\nimport traceback\n\nfrom acme.magic_typing import Any\nfrom acme.magic_typing import Callable\nfrom acme.magic_typing import Dict\nfrom acme.magic_typing import List\nfrom acme.magic_typing import Union\nfrom certbot import errors\nfrom certbot.compat import os\n\nlogger = logging.getLogger(__name__)\n\n\n# _SIGNALS stores the signals that will be handled by the ErrorHandler. These\n# signals were chosen as their default handler terminates the process and could\n# potentially occur from inside Python. Signals such as SIGILL were not\n# included as they could be a sign of something devious and we should terminate\n# immediately.\nif os.name != \"nt\":\n _SIGNALS = [signal.SIGTERM]\n for signal_code in [signal.SIGHUP, signal.SIGQUIT,\n signal.SIGXCPU, signal.SIGXFSZ]:\n # Adding only those signals that their default action is not Ignore.\n # This is platform-dependent, so we check it dynamically.\n if signal.getsignal(signal_code) != signal.SIG_IGN:\n _SIGNALS.append(signal_code)\nelse:\n # POSIX signals are not implemented natively in Windows, but emulated from the C runtime.\n # As consumed by CPython, most of handlers on theses signals are useless, in particular\n # SIGTERM: for instance, os.kill(pid, signal.SIGTERM) will call TerminateProcess, that stops\n # immediately the process without calling the attached handler. Besides, non-POSIX signals\n # (CTRL_C_EVENT and CTRL_BREAK_EVENT) are implemented in a console context to handle the\n # CTRL+C event to a process launched from the console. Only CTRL_C_EVENT has a reliable\n # behavior in fact, and maps to the handler to SIGINT. However in this case, a\n # KeyboardInterrupt is raised, that will be handled by ErrorHandler through the context manager\n # protocol. Finally, no signal on Windows is electable to be handled using ErrorHandler.\n #\n # Refs: https://stackoverflow.com/a/35792192, https://maruel.ca/post/python_windows_signal,\n # https://docs.python.org/2/library/os.html#os.kill,\n # https://www.reddit.com/r/Python/comments/1dsblt/windows_command_line_automation_ctrlc_question\n _SIGNALS = []\n\n\nclass ErrorHandler(object):\n \"\"\"Context manager for running code that must be cleaned up on failure.\n\n The context manager allows you to register functions that will be called\n when an exception (excluding SystemExit) or signal is encountered.\n Usage::\n\n handler = ErrorHandler(cleanup1_func, *cleanup1_args, **cleanup1_kwargs)\n handler.register(cleanup2_func, *cleanup2_args, **cleanup2_kwargs)\n\n with handler:\n do_something()\n\n Or for one cleanup function::\n\n with ErrorHandler(func, args, kwargs):\n do_something()\n\n If an exception is raised out of do_something, the cleanup functions will\n be called in last in first out order. Then the exception is raised.\n Similarly, if a signal is encountered, the cleanup functions are called\n followed by the previously received signal handler.\n\n Each registered cleanup function is called exactly once. If a registered\n function raises an exception, it is logged and the next function is called.\n Signals received while the registered functions are executing are\n deferred until they finish.\n\n \"\"\"\n def __init__(self, func, *args, **kwargs):\n self.call_on_regular_exit = False\n self.body_executed = False\n self.funcs = [] # type: List[Callable[[], Any]]\n self.prev_handlers = {} # type: Dict[int, Union[int, None, Callable]]\n self.received_signals = [] # type: List[int]\n if func is not None:\n self.register(func, *args, **kwargs)\n\n def __enter__(self):\n self.body_executed = False\n self._set_signal_handlers()\n\n def __exit__(self, exec_type, exec_value, trace):\n self.body_executed = True\n retval = False\n # SystemExit is ignored to properly handle forks that don't exec\n if exec_type is SystemExit:\n return retval\n if exec_type is None:\n if not self.call_on_regular_exit:\n return retval\n elif exec_type is errors.SignalExit:\n logger.debug(\"Encountered signals: %s\", self.received_signals)\n retval = True\n else:\n logger.debug(\"Encountered exception:\\n%s\", \"\".join(\n traceback.format_exception(exec_type, exec_value, trace)))\n\n self._call_registered()\n self._reset_signal_handlers()\n self._call_signals()\n return retval\n\n def register(self, func, *args, **kwargs):\n # type: (Callable, *Any, **Any) -> None\n \"\"\"Sets func to be run with the given arguments during cleanup.\n\n :param function func: function to be called in case of an error\n\n \"\"\"\n self.funcs.append(functools.partial(func, *args, **kwargs))\n\n def _call_registered(self):\n \"\"\"Calls all registered functions\"\"\"\n logger.debug(\"Calling registered functions\")\n while self.funcs:\n try:\n self.funcs[-1]()\n except Exception as exc: # pylint: disable=broad-except\n output = traceback.format_exception_only(type(exc), exc)\n logger.error(\"Encountered exception during recovery: %s\",\n ''.join(output).rstrip())\n self.funcs.pop()\n\n def _set_signal_handlers(self):\n \"\"\"Sets signal handlers for signals in _SIGNALS.\"\"\"\n for signum in _SIGNALS:\n prev_handler = signal.getsignal(signum)\n # If prev_handler is None, the handler was set outside of Python\n if prev_handler is not None:\n self.prev_handlers[signum] = prev_handler\n signal.signal(signum, self._signal_handler)\n\n def _reset_signal_handlers(self):\n \"\"\"Resets signal handlers for signals in _SIGNALS.\"\"\"\n for signum in self.prev_handlers:\n signal.signal(signum, self.prev_handlers[signum])\n self.prev_handlers.clear()\n\n def _signal_handler(self, signum, unused_frame):\n \"\"\"Replacement function for handling received signals.\n\n Store the received signal. If we are executing the code block in\n the body of the context manager, stop by raising signal exit.\n\n :param int signum: number of current signal\n\n \"\"\"\n self.received_signals.append(signum)\n if not self.body_executed:\n raise errors.SignalExit\n\n def _call_signals(self):\n \"\"\"Finally call the deferred signals.\"\"\"\n for signum in self.received_signals:\n logger.debug(\"Calling signal %s\", signum)\n os.kill(os.getpid(), signum)\n\nclass ExitHandler(ErrorHandler):\n \"\"\"Context manager for running code that must be cleaned up.\n\n Subclass of ErrorHandler, with the same usage and parameters.\n In addition to cleaning up on all signals, also cleans up on\n regular exit.\n \"\"\"\n def __init__(self, func, *args, **kwargs):\n ErrorHandler.__init__(self, func, *args, **kwargs)\n self.call_on_regular_exit = True\n", "path": "certbot/certbot/_internal/error_handler.py"}]} | 3,393 | 187 |
gh_patches_debug_19388 | rasdani/github-patches | git_diff | deepset-ai__haystack-3630 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
TCP port in `launch_opensearch()` is different from default value in `OpenSearchDocumentStore`
In `launch_opensearch()` we are starting an OpenSearch container using the port `9201`. The default port for `OpenSearchDocumentStore` is currently `9200`. I think we should align those two values.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `haystack/utils/doc_store.py`
Content:
```
1 # pylint: disable=missing-timeout
2
3 import time
4 import logging
5 import subprocess
6 from pathlib import Path
7
8 import requests
9
10
11 logger = logging.getLogger(__name__)
12 ELASTICSEARCH_CONTAINER_NAME = "elasticsearch"
13 OPENSEARCH_CONTAINER_NAME = "opensearch"
14 WEAVIATE_CONTAINER_NAME = "weaviate"
15
16
17 def launch_es(sleep=15, delete_existing=False):
18 """
19 Start an Elasticsearch server via Docker.
20 """
21
22 logger.debug("Starting Elasticsearch ...")
23 if delete_existing:
24 _ = subprocess.run([f"docker rm --force {ELASTICSEARCH_CONTAINER_NAME}"], shell=True, stdout=subprocess.DEVNULL)
25 status = subprocess.run(
26 [
27 f'docker start {ELASTICSEARCH_CONTAINER_NAME} > /dev/null 2>&1 || docker run -d -p 9200:9200 -e "discovery.type=single-node" --name {ELASTICSEARCH_CONTAINER_NAME} elasticsearch:7.9.2'
28 ],
29 shell=True,
30 )
31 if status.returncode:
32 logger.warning(
33 "Tried to start Elasticsearch through Docker but this failed. "
34 "It is likely that there is already an existing Elasticsearch instance running. "
35 )
36 else:
37 time.sleep(sleep)
38
39
40 def launch_opensearch(sleep=15, delete_existing=False):
41 """
42 Start an OpenSearch server via Docker.
43 """
44 logger.debug("Starting OpenSearch...")
45 # This line is needed since it is not possible to start a new docker container with the name opensearch if there is a stopped image with the same now
46 # docker rm only succeeds if the container is stopped, not if it is running
47 if delete_existing:
48 _ = subprocess.run([f"docker rm --force {OPENSEARCH_CONTAINER_NAME}"], shell=True, stdout=subprocess.DEVNULL)
49 status = subprocess.run(
50 [
51 f'docker start {OPENSEARCH_CONTAINER_NAME} > /dev/null 2>&1 || docker run -d -p 9201:9200 -p 9600:9600 -e "discovery.type=single-node" --name {OPENSEARCH_CONTAINER_NAME} opensearchproject/opensearch:1.3.5'
52 ],
53 shell=True,
54 )
55 if status.returncode:
56 logger.warning(
57 "Tried to start OpenSearch through Docker but this failed. "
58 "It is likely that there is already an existing OpenSearch instance running. "
59 )
60 else:
61 time.sleep(sleep)
62
63
64 def launch_weaviate(sleep=15):
65 """
66 Start a Weaviate server via Docker.
67 """
68
69 logger.debug("Starting Weaviate ...")
70 status = subprocess.run(
71 [
72 f"docker start {WEAVIATE_CONTAINER_NAME} > /dev/null 2>&1 || docker run -d -p 8080:8080 --env AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED='true' --env PERSISTENCE_DATA_PATH='/var/lib/weaviate' --name {WEAVIATE_CONTAINER_NAME} semitechnologies/weaviate:latest"
73 ],
74 shell=True,
75 )
76 if status.returncode:
77 logger.warning(
78 "Tried to start Weaviate through Docker but this failed. "
79 "It is likely that there is already an existing Weaviate instance running. "
80 )
81 else:
82 time.sleep(sleep)
83
84
85 def stop_container(container_name, delete_container=False):
86 logger.debug("Stopping %s...", container_name)
87 status = subprocess.run([f"docker stop {container_name}"], shell=True)
88 if status.returncode:
89 logger.warning(
90 f"Tried to stop {container_name} but this failed. "
91 f"It is likely that there was no Docker container with the name {container_name}"
92 )
93 if delete_container:
94 status = subprocess.run([f"docker rm {container_name}"], shell=True)
95
96
97 def stop_opensearch(delete_container=False):
98 stop_container(OPENSEARCH_CONTAINER_NAME, delete_container)
99
100
101 def stop_elasticsearch(delete_container=False):
102 stop_container(ELASTICSEARCH_CONTAINER_NAME, delete_container)
103
104
105 def stop_weaviate(delete_container=False):
106 stop_container(WEAVIATE_CONTAINER_NAME, delete_container)
107
108
109 def stop_service(document_store, delete_container=False):
110 ds_class = str(type(document_store))
111 if "OpenSearchDocumentStore" in ds_class:
112 stop_opensearch(delete_container)
113 elif "ElasticsearchDocumentStore" in ds_class:
114 stop_elasticsearch(delete_container)
115 elif "WeaviateDocumentStore" in ds_class:
116 stop_weaviate(delete_container)
117 else:
118 logger.warning("No support yet for auto stopping the service behind a %s", type(document_store))
119
120
121 def launch_milvus(sleep=15, delete_existing=False):
122 """
123 Start a Milvus server via Docker
124 """
125 logger.debug("Starting Milvus ...")
126
127 milvus_dir = Path.home() / "milvus"
128 milvus_dir.mkdir(exist_ok=True)
129
130 request = requests.get(
131 "https://github.com/milvus-io/milvus/releases/download/v2.0.0/milvus-standalone-docker-compose.yml"
132 )
133 with open(milvus_dir / "docker-compose.yml", "wb") as f:
134 f.write(request.content)
135
136 status = subprocess.run(["cd /home/$USER/milvus/ && docker-compose up -d"], shell=True)
137
138 if status.returncode:
139 logger.warning(
140 "Tried to start Milvus through Docker but this failed. "
141 "It is likely that there is already an existing Milvus instance running. "
142 )
143 else:
144 time.sleep(sleep)
145
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/haystack/utils/doc_store.py b/haystack/utils/doc_store.py
--- a/haystack/utils/doc_store.py
+++ b/haystack/utils/doc_store.py
@@ -37,7 +37,7 @@
time.sleep(sleep)
-def launch_opensearch(sleep=15, delete_existing=False):
+def launch_opensearch(sleep=15, delete_existing=False, local_port=9200):
"""
Start an OpenSearch server via Docker.
"""
@@ -48,7 +48,7 @@
_ = subprocess.run([f"docker rm --force {OPENSEARCH_CONTAINER_NAME}"], shell=True, stdout=subprocess.DEVNULL)
status = subprocess.run(
[
- f'docker start {OPENSEARCH_CONTAINER_NAME} > /dev/null 2>&1 || docker run -d -p 9201:9200 -p 9600:9600 -e "discovery.type=single-node" --name {OPENSEARCH_CONTAINER_NAME} opensearchproject/opensearch:1.3.5'
+ f'docker start {OPENSEARCH_CONTAINER_NAME} > /dev/null 2>&1 || docker run -d -p {local_port}:9200 -p 9600:9600 -e "discovery.type=single-node" --name {OPENSEARCH_CONTAINER_NAME} opensearchproject/opensearch:1.3.5'
],
shell=True,
)
| {"golden_diff": "diff --git a/haystack/utils/doc_store.py b/haystack/utils/doc_store.py\n--- a/haystack/utils/doc_store.py\n+++ b/haystack/utils/doc_store.py\n@@ -37,7 +37,7 @@\n time.sleep(sleep)\n \n \n-def launch_opensearch(sleep=15, delete_existing=False):\n+def launch_opensearch(sleep=15, delete_existing=False, local_port=9200):\n \"\"\"\n Start an OpenSearch server via Docker.\n \"\"\"\n@@ -48,7 +48,7 @@\n _ = subprocess.run([f\"docker rm --force {OPENSEARCH_CONTAINER_NAME}\"], shell=True, stdout=subprocess.DEVNULL)\n status = subprocess.run(\n [\n- f'docker start {OPENSEARCH_CONTAINER_NAME} > /dev/null 2>&1 || docker run -d -p 9201:9200 -p 9600:9600 -e \"discovery.type=single-node\" --name {OPENSEARCH_CONTAINER_NAME} opensearchproject/opensearch:1.3.5'\n+ f'docker start {OPENSEARCH_CONTAINER_NAME} > /dev/null 2>&1 || docker run -d -p {local_port}:9200 -p 9600:9600 -e \"discovery.type=single-node\" --name {OPENSEARCH_CONTAINER_NAME} opensearchproject/opensearch:1.3.5'\n ],\n shell=True,\n )\n", "issue": "TCP port in `launch_opensearch()` is different from default value in `OpenSearchDocumentStore`\nIn `launch_opensearch()` we are starting an OpenSearch container using the port `9201`. The default port for `OpenSearchDocumentStore` is currently `9200`. I think we should align those two values.\r\n\n", "before_files": [{"content": "# pylint: disable=missing-timeout\n\nimport time\nimport logging\nimport subprocess\nfrom pathlib import Path\n\nimport requests\n\n\nlogger = logging.getLogger(__name__)\nELASTICSEARCH_CONTAINER_NAME = \"elasticsearch\"\nOPENSEARCH_CONTAINER_NAME = \"opensearch\"\nWEAVIATE_CONTAINER_NAME = \"weaviate\"\n\n\ndef launch_es(sleep=15, delete_existing=False):\n \"\"\"\n Start an Elasticsearch server via Docker.\n \"\"\"\n\n logger.debug(\"Starting Elasticsearch ...\")\n if delete_existing:\n _ = subprocess.run([f\"docker rm --force {ELASTICSEARCH_CONTAINER_NAME}\"], shell=True, stdout=subprocess.DEVNULL)\n status = subprocess.run(\n [\n f'docker start {ELASTICSEARCH_CONTAINER_NAME} > /dev/null 2>&1 || docker run -d -p 9200:9200 -e \"discovery.type=single-node\" --name {ELASTICSEARCH_CONTAINER_NAME} elasticsearch:7.9.2'\n ],\n shell=True,\n )\n if status.returncode:\n logger.warning(\n \"Tried to start Elasticsearch through Docker but this failed. \"\n \"It is likely that there is already an existing Elasticsearch instance running. \"\n )\n else:\n time.sleep(sleep)\n\n\ndef launch_opensearch(sleep=15, delete_existing=False):\n \"\"\"\n Start an OpenSearch server via Docker.\n \"\"\"\n logger.debug(\"Starting OpenSearch...\")\n # This line is needed since it is not possible to start a new docker container with the name opensearch if there is a stopped image with the same now\n # docker rm only succeeds if the container is stopped, not if it is running\n if delete_existing:\n _ = subprocess.run([f\"docker rm --force {OPENSEARCH_CONTAINER_NAME}\"], shell=True, stdout=subprocess.DEVNULL)\n status = subprocess.run(\n [\n f'docker start {OPENSEARCH_CONTAINER_NAME} > /dev/null 2>&1 || docker run -d -p 9201:9200 -p 9600:9600 -e \"discovery.type=single-node\" --name {OPENSEARCH_CONTAINER_NAME} opensearchproject/opensearch:1.3.5'\n ],\n shell=True,\n )\n if status.returncode:\n logger.warning(\n \"Tried to start OpenSearch through Docker but this failed. \"\n \"It is likely that there is already an existing OpenSearch instance running. \"\n )\n else:\n time.sleep(sleep)\n\n\ndef launch_weaviate(sleep=15):\n \"\"\"\n Start a Weaviate server via Docker.\n \"\"\"\n\n logger.debug(\"Starting Weaviate ...\")\n status = subprocess.run(\n [\n f\"docker start {WEAVIATE_CONTAINER_NAME} > /dev/null 2>&1 || docker run -d -p 8080:8080 --env AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED='true' --env PERSISTENCE_DATA_PATH='/var/lib/weaviate' --name {WEAVIATE_CONTAINER_NAME} semitechnologies/weaviate:latest\"\n ],\n shell=True,\n )\n if status.returncode:\n logger.warning(\n \"Tried to start Weaviate through Docker but this failed. \"\n \"It is likely that there is already an existing Weaviate instance running. \"\n )\n else:\n time.sleep(sleep)\n\n\ndef stop_container(container_name, delete_container=False):\n logger.debug(\"Stopping %s...\", container_name)\n status = subprocess.run([f\"docker stop {container_name}\"], shell=True)\n if status.returncode:\n logger.warning(\n f\"Tried to stop {container_name} but this failed. \"\n f\"It is likely that there was no Docker container with the name {container_name}\"\n )\n if delete_container:\n status = subprocess.run([f\"docker rm {container_name}\"], shell=True)\n\n\ndef stop_opensearch(delete_container=False):\n stop_container(OPENSEARCH_CONTAINER_NAME, delete_container)\n\n\ndef stop_elasticsearch(delete_container=False):\n stop_container(ELASTICSEARCH_CONTAINER_NAME, delete_container)\n\n\ndef stop_weaviate(delete_container=False):\n stop_container(WEAVIATE_CONTAINER_NAME, delete_container)\n\n\ndef stop_service(document_store, delete_container=False):\n ds_class = str(type(document_store))\n if \"OpenSearchDocumentStore\" in ds_class:\n stop_opensearch(delete_container)\n elif \"ElasticsearchDocumentStore\" in ds_class:\n stop_elasticsearch(delete_container)\n elif \"WeaviateDocumentStore\" in ds_class:\n stop_weaviate(delete_container)\n else:\n logger.warning(\"No support yet for auto stopping the service behind a %s\", type(document_store))\n\n\ndef launch_milvus(sleep=15, delete_existing=False):\n \"\"\"\n Start a Milvus server via Docker\n \"\"\"\n logger.debug(\"Starting Milvus ...\")\n\n milvus_dir = Path.home() / \"milvus\"\n milvus_dir.mkdir(exist_ok=True)\n\n request = requests.get(\n \"https://github.com/milvus-io/milvus/releases/download/v2.0.0/milvus-standalone-docker-compose.yml\"\n )\n with open(milvus_dir / \"docker-compose.yml\", \"wb\") as f:\n f.write(request.content)\n\n status = subprocess.run([\"cd /home/$USER/milvus/ && docker-compose up -d\"], shell=True)\n\n if status.returncode:\n logger.warning(\n \"Tried to start Milvus through Docker but this failed. \"\n \"It is likely that there is already an existing Milvus instance running. \"\n )\n else:\n time.sleep(sleep)\n", "path": "haystack/utils/doc_store.py"}], "after_files": [{"content": "# pylint: disable=missing-timeout\n\nimport time\nimport logging\nimport subprocess\nfrom pathlib import Path\n\nimport requests\n\n\nlogger = logging.getLogger(__name__)\nELASTICSEARCH_CONTAINER_NAME = \"elasticsearch\"\nOPENSEARCH_CONTAINER_NAME = \"opensearch\"\nWEAVIATE_CONTAINER_NAME = \"weaviate\"\n\n\ndef launch_es(sleep=15, delete_existing=False):\n \"\"\"\n Start an Elasticsearch server via Docker.\n \"\"\"\n\n logger.debug(\"Starting Elasticsearch ...\")\n if delete_existing:\n _ = subprocess.run([f\"docker rm --force {ELASTICSEARCH_CONTAINER_NAME}\"], shell=True, stdout=subprocess.DEVNULL)\n status = subprocess.run(\n [\n f'docker start {ELASTICSEARCH_CONTAINER_NAME} > /dev/null 2>&1 || docker run -d -p 9200:9200 -e \"discovery.type=single-node\" --name {ELASTICSEARCH_CONTAINER_NAME} elasticsearch:7.9.2'\n ],\n shell=True,\n )\n if status.returncode:\n logger.warning(\n \"Tried to start Elasticsearch through Docker but this failed. \"\n \"It is likely that there is already an existing Elasticsearch instance running. \"\n )\n else:\n time.sleep(sleep)\n\n\ndef launch_opensearch(sleep=15, delete_existing=False, local_port=9200):\n \"\"\"\n Start an OpenSearch server via Docker.\n \"\"\"\n logger.debug(\"Starting OpenSearch...\")\n # This line is needed since it is not possible to start a new docker container with the name opensearch if there is a stopped image with the same now\n # docker rm only succeeds if the container is stopped, not if it is running\n if delete_existing:\n _ = subprocess.run([f\"docker rm --force {OPENSEARCH_CONTAINER_NAME}\"], shell=True, stdout=subprocess.DEVNULL)\n status = subprocess.run(\n [\n f'docker start {OPENSEARCH_CONTAINER_NAME} > /dev/null 2>&1 || docker run -d -p {local_port}:9200 -p 9600:9600 -e \"discovery.type=single-node\" --name {OPENSEARCH_CONTAINER_NAME} opensearchproject/opensearch:1.3.5'\n ],\n shell=True,\n )\n if status.returncode:\n logger.warning(\n \"Tried to start OpenSearch through Docker but this failed. \"\n \"It is likely that there is already an existing OpenSearch instance running. \"\n )\n else:\n time.sleep(sleep)\n\n\ndef launch_weaviate(sleep=15):\n \"\"\"\n Start a Weaviate server via Docker.\n \"\"\"\n\n logger.debug(\"Starting Weaviate ...\")\n status = subprocess.run(\n [\n f\"docker start {WEAVIATE_CONTAINER_NAME} > /dev/null 2>&1 || docker run -d -p 8080:8080 --env AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED='true' --env PERSISTENCE_DATA_PATH='/var/lib/weaviate' --name {WEAVIATE_CONTAINER_NAME} semitechnologies/weaviate:latest\"\n ],\n shell=True,\n )\n if status.returncode:\n logger.warning(\n \"Tried to start Weaviate through Docker but this failed. \"\n \"It is likely that there is already an existing Weaviate instance running. \"\n )\n else:\n time.sleep(sleep)\n\n\ndef stop_container(container_name, delete_container=False):\n logger.debug(\"Stopping %s...\", container_name)\n status = subprocess.run([f\"docker stop {container_name}\"], shell=True)\n if status.returncode:\n logger.warning(\n f\"Tried to stop {container_name} but this failed. \"\n f\"It is likely that there was no Docker container with the name {container_name}\"\n )\n if delete_container:\n status = subprocess.run([f\"docker rm {container_name}\"], shell=True)\n\n\ndef stop_opensearch(delete_container=False):\n stop_container(OPENSEARCH_CONTAINER_NAME, delete_container)\n\n\ndef stop_elasticsearch(delete_container=False):\n stop_container(ELASTICSEARCH_CONTAINER_NAME, delete_container)\n\n\ndef stop_weaviate(delete_container=False):\n stop_container(WEAVIATE_CONTAINER_NAME, delete_container)\n\n\ndef stop_service(document_store, delete_container=False):\n ds_class = str(type(document_store))\n if \"OpenSearchDocumentStore\" in ds_class:\n stop_opensearch(delete_container)\n elif \"ElasticsearchDocumentStore\" in ds_class:\n stop_elasticsearch(delete_container)\n elif \"WeaviateDocumentStore\" in ds_class:\n stop_weaviate(delete_container)\n else:\n logger.warning(\"No support yet for auto stopping the service behind a %s\", type(document_store))\n\n\ndef launch_milvus(sleep=15, delete_existing=False):\n \"\"\"\n Start a Milvus server via Docker\n \"\"\"\n logger.debug(\"Starting Milvus ...\")\n\n milvus_dir = Path.home() / \"milvus\"\n milvus_dir.mkdir(exist_ok=True)\n\n request = requests.get(\n \"https://github.com/milvus-io/milvus/releases/download/v2.0.0/milvus-standalone-docker-compose.yml\"\n )\n with open(milvus_dir / \"docker-compose.yml\", \"wb\") as f:\n f.write(request.content)\n\n status = subprocess.run([\"cd /home/$USER/milvus/ && docker-compose up -d\"], shell=True)\n\n if status.returncode:\n logger.warning(\n \"Tried to start Milvus through Docker but this failed. \"\n \"It is likely that there is already an existing Milvus instance running. \"\n )\n else:\n time.sleep(sleep)\n", "path": "haystack/utils/doc_store.py"}]} | 1,914 | 332 |
gh_patches_debug_7446 | rasdani/github-patches | git_diff | pyg-team__pytorch_geometric-8388 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
LatexBuilder for docs fails
### 📚 Describe the documentation issue
The following line makes the docs building crash when using a LatexBuilder
https://github.com/pyg-team/pytorch_geometric/blob/88d7986b6d0a6de5895872270d2ff4fc95fae3b7/docs/source/conf.py#L69C1-L75C43
To reproduce build the docs with the latex builder
```bash
python -m sphinx -T -E -b latex -d _build/doctrees -D language=en . ./build
```
```bash
Extension error:
Handler <function setup.<locals>.rst_jinja_render at 0x1230b4dc0> for event 'source-read' threw an exception (exception: 'LaTeXBuilder' object has no attribute 'templates')
```
### Suggest a potential alternative/fix
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/source/conf.py`
Content:
```
1 import datetime
2 import os.path as osp
3 import sys
4
5 import pyg_sphinx_theme
6
7 import torch_geometric
8
9 author = 'PyG Team'
10 project = 'pytorch_geometric'
11 version = torch_geometric.__version__
12 copyright = f'{datetime.datetime.now().year}, {author}'
13
14 sys.path.append(osp.join(osp.dirname(pyg_sphinx_theme.__file__), 'extension'))
15
16 extensions = [
17 'sphinx.ext.autodoc',
18 'sphinx.ext.autosummary',
19 'sphinx.ext.intersphinx',
20 'sphinx.ext.mathjax',
21 'sphinx.ext.napoleon',
22 'sphinx.ext.viewcode',
23 'nbsphinx',
24 'pyg',
25 ]
26
27 html_theme = 'pyg_sphinx_theme'
28 html_logo = ('https://raw.githubusercontent.com/pyg-team/pyg_sphinx_theme/'
29 'master/pyg_sphinx_theme/static/img/pyg_logo.png')
30 html_favicon = ('https://raw.githubusercontent.com/pyg-team/pyg_sphinx_theme/'
31 'master/pyg_sphinx_theme/static/img/favicon.png')
32 html_static_path = ['_static']
33 templates_path = ['_templates']
34
35 add_module_names = False
36 autodoc_member_order = 'bysource'
37
38 suppress_warnings = ['autodoc.import_object']
39
40 intersphinx_mapping = {
41 'python': ('https://docs.python.org/', None),
42 # 'numpy': ('http://docs.scipy.org/doc/numpy', None),
43 'pandas': ('http://pandas.pydata.org/pandas-docs/dev', None),
44 'torch': ('https://pytorch.org/docs/master', None),
45 }
46
47 nbsphinx_thumbnails = {
48 'tutorial/create_gnn':
49 '_static/thumbnails/create_gnn.png',
50 'tutorial/heterogeneous':
51 '_static/thumbnails/heterogeneous.png',
52 'tutorial/create_dataset':
53 '_static/thumbnails/create_dataset.png',
54 'tutorial/load_csv':
55 '_static/thumbnails/load_csv.png',
56 'tutorial/neighbor_loader':
57 '_static/thumbnails/neighbor_loader.png',
58 'tutorial/explain':
59 '_static/thumbnails/explain.png',
60 'tutorial/shallow_node_embeddings':
61 '_static/thumbnails/shallow_node_embeddings.png',
62 'tutorial/multi_gpu_vanilla':
63 '_static/thumbnails/multi_gpu_vanilla.png',
64 'tutorial/multi_node_multi_gpu_vanilla':
65 '_static/thumbnails/multi_gpu_vanilla.png',
66 }
67
68
69 def setup(app):
70 def rst_jinja_render(app, _, source):
71 rst_context = {'torch_geometric': torch_geometric}
72 source[0] = app.builder.templates.render_string(source[0], rst_context)
73
74 app.connect('source-read', rst_jinja_render)
75 app.add_js_file('js/version_alert.js')
76
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docs/source/conf.py b/docs/source/conf.py
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -66,10 +66,12 @@
}
-def setup(app):
- def rst_jinja_render(app, _, source):
+def rst_jinja_render(app, _, source):
+ if hasattr(app.builder, 'templates'):
rst_context = {'torch_geometric': torch_geometric}
source[0] = app.builder.templates.render_string(source[0], rst_context)
+
+def setup(app):
app.connect('source-read', rst_jinja_render)
app.add_js_file('js/version_alert.js')
| {"golden_diff": "diff --git a/docs/source/conf.py b/docs/source/conf.py\n--- a/docs/source/conf.py\n+++ b/docs/source/conf.py\n@@ -66,10 +66,12 @@\n }\n \n \n-def setup(app):\n- def rst_jinja_render(app, _, source):\n+def rst_jinja_render(app, _, source):\n+ if hasattr(app.builder, 'templates'):\n rst_context = {'torch_geometric': torch_geometric}\n source[0] = app.builder.templates.render_string(source[0], rst_context)\n \n+\n+def setup(app):\n app.connect('source-read', rst_jinja_render)\n app.add_js_file('js/version_alert.js')\n", "issue": "LatexBuilder for docs fails\n### \ud83d\udcda Describe the documentation issue\r\n\r\nThe following line makes the docs building crash when using a LatexBuilder\r\n\r\nhttps://github.com/pyg-team/pytorch_geometric/blob/88d7986b6d0a6de5895872270d2ff4fc95fae3b7/docs/source/conf.py#L69C1-L75C43\r\n\r\nTo reproduce build the docs with the latex builder\r\n```bash\r\npython -m sphinx -T -E -b latex -d _build/doctrees -D language=en . ./build\r\n```\r\n\r\n```bash\r\nExtension error:\r\nHandler <function setup.<locals>.rst_jinja_render at 0x1230b4dc0> for event 'source-read' threw an exception (exception: 'LaTeXBuilder' object has no attribute 'templates')\r\n\r\n```\r\n\r\n### Suggest a potential alternative/fix\r\n\r\n_No response_\n", "before_files": [{"content": "import datetime\nimport os.path as osp\nimport sys\n\nimport pyg_sphinx_theme\n\nimport torch_geometric\n\nauthor = 'PyG Team'\nproject = 'pytorch_geometric'\nversion = torch_geometric.__version__\ncopyright = f'{datetime.datetime.now().year}, {author}'\n\nsys.path.append(osp.join(osp.dirname(pyg_sphinx_theme.__file__), 'extension'))\n\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.autosummary',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.mathjax',\n 'sphinx.ext.napoleon',\n 'sphinx.ext.viewcode',\n 'nbsphinx',\n 'pyg',\n]\n\nhtml_theme = 'pyg_sphinx_theme'\nhtml_logo = ('https://raw.githubusercontent.com/pyg-team/pyg_sphinx_theme/'\n 'master/pyg_sphinx_theme/static/img/pyg_logo.png')\nhtml_favicon = ('https://raw.githubusercontent.com/pyg-team/pyg_sphinx_theme/'\n 'master/pyg_sphinx_theme/static/img/favicon.png')\nhtml_static_path = ['_static']\ntemplates_path = ['_templates']\n\nadd_module_names = False\nautodoc_member_order = 'bysource'\n\nsuppress_warnings = ['autodoc.import_object']\n\nintersphinx_mapping = {\n 'python': ('https://docs.python.org/', None),\n # 'numpy': ('http://docs.scipy.org/doc/numpy', None),\n 'pandas': ('http://pandas.pydata.org/pandas-docs/dev', None),\n 'torch': ('https://pytorch.org/docs/master', None),\n}\n\nnbsphinx_thumbnails = {\n 'tutorial/create_gnn':\n '_static/thumbnails/create_gnn.png',\n 'tutorial/heterogeneous':\n '_static/thumbnails/heterogeneous.png',\n 'tutorial/create_dataset':\n '_static/thumbnails/create_dataset.png',\n 'tutorial/load_csv':\n '_static/thumbnails/load_csv.png',\n 'tutorial/neighbor_loader':\n '_static/thumbnails/neighbor_loader.png',\n 'tutorial/explain':\n '_static/thumbnails/explain.png',\n 'tutorial/shallow_node_embeddings':\n '_static/thumbnails/shallow_node_embeddings.png',\n 'tutorial/multi_gpu_vanilla':\n '_static/thumbnails/multi_gpu_vanilla.png',\n 'tutorial/multi_node_multi_gpu_vanilla':\n '_static/thumbnails/multi_gpu_vanilla.png',\n}\n\n\ndef setup(app):\n def rst_jinja_render(app, _, source):\n rst_context = {'torch_geometric': torch_geometric}\n source[0] = app.builder.templates.render_string(source[0], rst_context)\n\n app.connect('source-read', rst_jinja_render)\n app.add_js_file('js/version_alert.js')\n", "path": "docs/source/conf.py"}], "after_files": [{"content": "import datetime\nimport os.path as osp\nimport sys\n\nimport pyg_sphinx_theme\n\nimport torch_geometric\n\nauthor = 'PyG Team'\nproject = 'pytorch_geometric'\nversion = torch_geometric.__version__\ncopyright = f'{datetime.datetime.now().year}, {author}'\n\nsys.path.append(osp.join(osp.dirname(pyg_sphinx_theme.__file__), 'extension'))\n\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.autosummary',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.mathjax',\n 'sphinx.ext.napoleon',\n 'sphinx.ext.viewcode',\n 'nbsphinx',\n 'pyg',\n]\n\nhtml_theme = 'pyg_sphinx_theme'\nhtml_logo = ('https://raw.githubusercontent.com/pyg-team/pyg_sphinx_theme/'\n 'master/pyg_sphinx_theme/static/img/pyg_logo.png')\nhtml_favicon = ('https://raw.githubusercontent.com/pyg-team/pyg_sphinx_theme/'\n 'master/pyg_sphinx_theme/static/img/favicon.png')\nhtml_static_path = ['_static']\ntemplates_path = ['_templates']\n\nadd_module_names = False\nautodoc_member_order = 'bysource'\n\nsuppress_warnings = ['autodoc.import_object']\n\nintersphinx_mapping = {\n 'python': ('https://docs.python.org/', None),\n # 'numpy': ('http://docs.scipy.org/doc/numpy', None),\n 'pandas': ('http://pandas.pydata.org/pandas-docs/dev', None),\n 'torch': ('https://pytorch.org/docs/master', None),\n}\n\nnbsphinx_thumbnails = {\n 'tutorial/create_gnn':\n '_static/thumbnails/create_gnn.png',\n 'tutorial/heterogeneous':\n '_static/thumbnails/heterogeneous.png',\n 'tutorial/create_dataset':\n '_static/thumbnails/create_dataset.png',\n 'tutorial/load_csv':\n '_static/thumbnails/load_csv.png',\n 'tutorial/neighbor_loader':\n '_static/thumbnails/neighbor_loader.png',\n 'tutorial/explain':\n '_static/thumbnails/explain.png',\n 'tutorial/shallow_node_embeddings':\n '_static/thumbnails/shallow_node_embeddings.png',\n 'tutorial/multi_gpu_vanilla':\n '_static/thumbnails/multi_gpu_vanilla.png',\n 'tutorial/multi_node_multi_gpu_vanilla':\n '_static/thumbnails/multi_gpu_vanilla.png',\n}\n\n\ndef rst_jinja_render(app, _, source):\n if hasattr(app.builder, 'templates'):\n rst_context = {'torch_geometric': torch_geometric}\n source[0] = app.builder.templates.render_string(source[0], rst_context)\n\n\ndef setup(app):\n app.connect('source-read', rst_jinja_render)\n app.add_js_file('js/version_alert.js')\n", "path": "docs/source/conf.py"}]} | 1,199 | 142 |
gh_patches_debug_205 | rasdani/github-patches | git_diff | microsoft__superbenchmark-209 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
V0.3.0 Release Plan
# Release Manager
@TobeyQin
# Endgame
Code freeze: 9/1/2021
Bug Bash date: 9/2/2021
Release date: 9/17/2021
# Main Features
## SuperBench Framework
### SB Runner -- @abuccts
- [x] MPI mode implementation
PR: #146
### SB Benchmarks -- @guoshzhao
- [x] Docker Base
PR: #179 and #180
## Single-node Validation
### Micro-benchmarks -- @guoshzhao @yukirora
1. - [x] Memory (Tool: Nvidia Bandwidth Test Tool) -- @yukirora ETA: 5/28/2021
PR: #114
| Metrics | Unit | Description |
|---|---|---|
| H2D_Mem_BW_\<GPU ID> | GB/s | host-to-GPU bandwidth for each GPU |
| D2H_Mem_BW_\<GPU ID> | GB/s | GPU-to-host bandwidth for each GPU |
2. - [ ] Device P2P Bandwidth (Tool: Nvidia p2pBandwidthLatencyTest Tool) -- Delayed
| Metrics | Unit | Description |
|---|---|---|
| P2P_BW_Max | GB/s | The maximum bandwidth in Bidirectional P2P=Enabled Bandwidth Matrix for all GPUs |
| P2P_BW_Min | GB/s | The minimum bandwidth |
| P2P_BW_Avg | GB/s | The average bandwidth |
3. - [x] IBLoopback (Tool: PerfTest – Standard RDMA Test Tool) -- @yukirora ETA: 7/30/2021
PR: #112 and #129
| Metrics | Unit | Description |
|---|---|---|
| IB_Write | MB/s | The IB write loopback throughput with different message size |
| IB_Read | MB/s | The IB read loopback throughput with different message size |
| IB_Send | MB/s | The IB send loopback throughput with different message size |
4. - [x] NCCL (Tool: Nvidia NCCL Test) -- @yukirora ETA: 7/30/2021
PR: #113 and #128
| Metrics | Unit | Description |
|---|---|---|
| NCCL_AllReduce | GB/s | The NCCL AllReduce performance with different message size |
| NCCL_AllGather | GB/s | The NCCL AllGather performance with different message size |
| NCCL_broadcast | GB/s | The NCCL Broadcast performance with different message size |
| NCCL_reduce | GB/s | The NCCL Reduce performance with different message size |
| NCCL_reduce_scatter | GB/s | The NCCL ReduceScatter performance with different message size |
5. - [x] Disk (Tool: FIO – Standard Disk Performance Tool) -- @yzygitzh ETA: 7/30/2021
PR: #127 and #132 and #161
| Metrics | Unit | Description |
|---|---|---|
| Seq_Read | MB/s | Sequential read performance |
| Seq_Write | MB/s | Sequential write performance |
| Rand_Read | MB/s | Random read performance |
| Rand_Write | MB/s | Random write performance |
| Seq_R/W_Read | MB/s | Read performance in sequential read/write, fixed measurement (read:write = 4:1)|
| Seq_R/W_Write | MB/s | Write performance in sequential read/write (read:write = 4:1)|
| Rand_R/W_Read | MB/s | Read performance in random read/write (read:write = 4:1)|
| Rand_R/W_Write | MB/s | Write performance in random read/write (read:write = 4:1)|
6. - [x] H2D/D2H SM Transmission Bandwidth (Tool: MSR-A build) -- @yzygitzh ETA: 8/6/2021
PR: #162 and #169
| Metrics | Unit | Description |
|---|---|---|
| H2D_SM_BW_\<GPU ID>| GB/s | host-to-GPU bandwidth using GPU kernel for each GPU |
| D2H_SM_BW_\<GPU ID> | GB/s | GPU-to-host bandwidth using GPU kernel for each GPU |
###
## Support AMD
### Docker Image Support -- @guoshzhao ETA: 7/16/2021
- [x] ROCm 4.2 PyTorch 1.7 PR: #164
- [x] ROCm 4.0 PyTorch 1.7 PR: #164
### Micro Benchmarks
1. - [x] Kernel Launch (Tool: MSR-A build) -- @yukirora ETA: 7/30/2021
PR: #137 and #136
| Metrics | Unit | Description |
|---|---|---|
| Kernel_Launch_Event_Time | Time (ms) | Dispatch latency measured in GPU time using hipEventRecord() |
|Kernel_Launch_Wall_Time| Time (ms) | Dispatch latency measured in CPU time |
2. - [x] RCCL (Tool: AMD RCCL Test) -- @yukirora ETA: 7/30/2021
PR: #139 and #143
| Metrics | Unit | Description |
|---|---|---|
| RCCL_AllReduce | GB/s | The RCCL AllReduce performance with different message size |
| RCCL_AllGather | GB/s | The RCCL AllGather performance with different message size |
| RCCL_broadcast | GB/s | The RCCL Broadcast performance with different message size |
| RCCL_reduce | GB/s | The RCCL Reduce performance with different message size |
| RCCL_reduce_scatter | GB/s | The RCCL ReduceScatter performance with different message size |
3. - [x] GEMM FLOPS (Tool: AMD rocblas-bench Tool) -- @yukirora ETA: 8/27/2021
PR: #144 and #165
| Metrics | Unit | Description |
|---|---|---|
| FP64 | GFLOPS | FP64 FLOPS without MatrixCore |
| FP32 | GFLOPS | FP32 FLOPS without MatrixCore |
| FP16 | GFLOPS | FP16 FLOPS without MatrixCore |
| FP32(MC) | GFLOPS | TF32 FLOPS with MatrixCore |
| FP16(MC) | GFLOPS | FP16 FLOPS with MatrixCore |
| BF16(MC) | GFLOPS | BF16 FLOPS with MatrixCore |
| INT8(MC) | GOPS | INT8 FLOPS with MatrixCore |
| INT4(MC) | GOPS | INT4 FLOPS with MatrixCore |
4. - [x] Memory (Tool: HIP Bandwidth Test Tool) -- @yukirora ETA: 8/27/2021
PR: #159 and #153
| Metrics | Unit | Description |
|---|---|---|
| H2D_Mem_BW_\<GPU ID> | GB/s | host-to-GPU bandwidth for each GPU |
| D2H_Mem_BW_\<GPU ID> | GB/s | GPU-to-host bandwidth for each GPU |
### E2E Benchmarks -- @guoshzhao ETA: 7/16/2021
1. - [x] CNN models -- User PyTorch TORCHVISION.MODELS sub-package
- ResNet: ResNet-50, ResNet-101, ResNet-152
- DenseNet: DenseNet-169, DenseNet-201
- VGG: VGG-11, VGG-13, VGG-16, VGG-19
2. - [x] BERT -- Use huggingface Transformers
- BERT
- BERT LARGE
3. - [x] LSTM -- Use PyTorch TORCH.NN sub-package
4. - [x] GPT-2 -- Use huggingface Transformers
## Result Summary -- @cp5555
- [x] Generate a report to summarize the results -- @guoshzhao ETA: 7/30/2021
PR: #147, #149, and #157
- [ ] Support basic analysis feature (boxplot figure, outlier detection, etc.)
## Bug Fix
- [x] VGG models failed on A100 GPU with batch_size=128 #115
PR: #134
## Other Improvement
1. Contribution related -- @lynex
- [x] Contribute rule (#131)
- [x] system information collection (#160)
2. Document -- @TobeyQin
- [x] Add release process doc (#130)
- [x] Add design documents (#125)
- [x] Add developer guide doc for coding style (#155)
- [x] Add contribution rules (#131)
- [x] Add docker image list (#154)
- [x] Add initial validation results
- [x] ~~Add metric reasoning doc -- @cp5555 @guoshzhao~~
3. Process monitor
- [ ] Add Heart beat to monitor process health
- [ ] Auto kill all processes on all nodes
4. Coding style -- @abuccts
- [x] Add vscode online
------------
## Backlogs
### Multi-Node Benchmarks
- Mellanox ClusterKit
- GPCNeT
### UI Design
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `superbench/__init__.py`
Content:
```
1 # Copyright (c) Microsoft Corporation.
2 # Licensed under the MIT License.
3
4 """SuperBench Python module.
5
6 Provide hardware and software benchmarks for AI systems.
7 """
8
9 __version__ = '0.2.1'
10 __author__ = 'Microsoft'
11
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/superbench/__init__.py b/superbench/__init__.py
--- a/superbench/__init__.py
+++ b/superbench/__init__.py
@@ -6,5 +6,5 @@
Provide hardware and software benchmarks for AI systems.
"""
-__version__ = '0.2.1'
+__version__ = '0.3.0'
__author__ = 'Microsoft'
| {"golden_diff": "diff --git a/superbench/__init__.py b/superbench/__init__.py\n--- a/superbench/__init__.py\n+++ b/superbench/__init__.py\n@@ -6,5 +6,5 @@\n Provide hardware and software benchmarks for AI systems.\n \"\"\"\n \n-__version__ = '0.2.1'\n+__version__ = '0.3.0'\n __author__ = 'Microsoft'\n", "issue": "V0.3.0 Release Plan\n# Release Manager\r\n@TobeyQin \r\n\r\n# Endgame\r\nCode freeze: 9/1/2021\r\nBug Bash date: 9/2/2021\r\nRelease date: 9/17/2021\r\n\r\n# Main Features\r\n## SuperBench Framework\r\n\r\n### SB Runner -- @abuccts \r\n\r\n- [x] MPI mode implementation\r\n PR: #146\r\n\r\n### SB Benchmarks -- @guoshzhao \r\n\r\n- [x] Docker Base\r\n PR: #179 and #180\r\n\r\n## Single-node Validation\r\n### Micro-benchmarks -- @guoshzhao @yukirora \r\n\r\n1. - [x] Memory (Tool: Nvidia Bandwidth Test Tool) -- @yukirora ETA: 5/28/2021 \r\n PR: #114 \r\n | Metrics | Unit | Description | \r\n |---|---|---| \r\n | H2D_Mem_BW_\\<GPU ID> | GB/s | host-to-GPU bandwidth for each GPU |\r\n | D2H_Mem_BW_\\<GPU ID> | GB/s | GPU-to-host bandwidth for each GPU |\r\n\r\n2. - [ ] Device P2P Bandwidth (Tool: Nvidia p2pBandwidthLatencyTest Tool) -- Delayed\r\n\r\n | Metrics | Unit | Description | \r\n |---|---|---|\r\n | P2P_BW_Max | GB/s | The maximum bandwidth in Bidirectional P2P=Enabled Bandwidth Matrix for all GPUs |\r\n | P2P_BW_Min | GB/s | The minimum bandwidth |\r\n | P2P_BW_Avg | GB/s | The average bandwidth |\r\n \r\n3. - [x] IBLoopback (Tool: PerfTest \u2013 Standard RDMA Test Tool) -- @yukirora ETA: 7/30/2021 \r\n PR: #112 and #129 \r\n | Metrics | Unit | Description | \r\n |---|---|---| \r\n | IB_Write | MB/s | The IB write loopback throughput with different message size |\r\n | IB_Read | MB/s | The IB read loopback throughput with different message size |\r\n | IB_Send | MB/s | The IB send loopback throughput with different message size |\r\n\r\n4. - [x] NCCL (Tool: Nvidia NCCL Test) -- @yukirora ETA: 7/30/2021\r\n PR: #113 and #128\r\n | Metrics | Unit | Description | \r\n |---|---|---|\r\n | NCCL_AllReduce | GB/s | The NCCL AllReduce performance with different message size |\r\n | NCCL_AllGather | GB/s | The NCCL AllGather performance with different message size |\r\n | NCCL_broadcast | GB/s | The NCCL Broadcast performance with different message size |\r\n | NCCL_reduce | GB/s | The NCCL Reduce performance with different message size |\r\n | NCCL_reduce_scatter | GB/s | The NCCL ReduceScatter performance with different message size |\r\n\r\n5. - [x] Disk (Tool: FIO \u2013 Standard Disk Performance Tool) -- @yzygitzh ETA: 7/30/2021\r\n PR: #127 and #132 and #161\r\n | Metrics | Unit | Description | \r\n |---|---|---|\r\n | Seq_Read | MB/s | Sequential read performance |\r\n | Seq_Write | MB/s | Sequential write performance |\r\n | Rand_Read | MB/s | Random read performance |\r\n | Rand_Write | MB/s | Random write performance |\r\n | Seq_R/W_Read | MB/s | Read performance in sequential read/write, fixed measurement (read:write = 4:1)|\r\n | Seq_R/W_Write | MB/s | Write performance in sequential read/write (read:write = 4:1)|\r\n | Rand_R/W_Read | MB/s | Read performance in random read/write (read:write = 4:1)|\r\n | Rand_R/W_Write | MB/s | Write performance in random read/write (read:write = 4:1)|\r\n\r\n6. - [x] H2D/D2H SM Transmission Bandwidth (Tool: MSR-A build) -- @yzygitzh ETA: 8/6/2021\r\n PR: #162 and #169 \r\n | Metrics | Unit | Description | \r\n |---|---|---|\r\n | H2D_SM_BW_\\<GPU ID>| GB/s | host-to-GPU bandwidth using GPU kernel for each GPU |\r\n | D2H_SM_BW_\\<GPU ID> | GB/s | GPU-to-host bandwidth using GPU kernel for each GPU |\r\n\r\n### \r\n## Support AMD\r\n\r\n### Docker Image Support -- @guoshzhao ETA: 7/16/2021\r\n\r\n- [x] ROCm 4.2 PyTorch 1.7 PR: #164\r\n- [x] ROCm 4.0 PyTorch 1.7 PR: #164\r\n\r\n### Micro Benchmarks\r\n1. - [x] Kernel Launch (Tool: MSR-A build) -- @yukirora ETA: 7/30/2021\r\n PR: #137 and #136 \r\n | Metrics | Unit | Description |\r\n |---|---|---|\r\n | Kernel_Launch_Event_Time | Time (ms) | Dispatch latency measured in GPU time using hipEventRecord() |\r\n |Kernel_Launch_Wall_Time| Time (ms) | Dispatch latency measured in CPU time |\r\n\r\n2. - [x] RCCL (Tool: AMD RCCL Test) -- @yukirora ETA: 7/30/2021\r\n PR: #139 and #143 \r\n | Metrics | Unit | Description | \r\n |---|---|---|\r\n | RCCL_AllReduce | GB/s | The RCCL AllReduce performance with different message size |\r\n | RCCL_AllGather | GB/s | The RCCL AllGather performance with different message size |\r\n | RCCL_broadcast | GB/s | The RCCL Broadcast performance with different message size |\r\n | RCCL_reduce | GB/s | The RCCL Reduce performance with different message size |\r\n | RCCL_reduce_scatter | GB/s | The RCCL ReduceScatter performance with different message size |\r\n\r\n3. - [x] GEMM FLOPS (Tool: AMD rocblas-bench Tool) -- @yukirora ETA: 8/27/2021\r\n PR: #144 and #165\r\n | Metrics | Unit | Description |\r\n |---|---|---|\r\n | FP64 | GFLOPS | FP64 FLOPS without MatrixCore |\r\n | FP32 | GFLOPS | FP32 FLOPS without MatrixCore |\r\n | FP16 | GFLOPS | FP16 FLOPS without MatrixCore |\r\n | FP32(MC) | GFLOPS | TF32 FLOPS with MatrixCore |\r\n | FP16(MC) | GFLOPS | FP16 FLOPS with MatrixCore |\r\n | BF16(MC) | GFLOPS | BF16 FLOPS with MatrixCore |\r\n | INT8(MC) | GOPS | INT8 FLOPS with MatrixCore |\r\n | INT4(MC) | GOPS | INT4 FLOPS with MatrixCore |\r\n\r\n4. - [x] Memory (Tool: HIP Bandwidth Test Tool) -- @yukirora ETA: 8/27/2021 \r\n PR: #159 and #153\r\n | Metrics | Unit | Description | \r\n |---|---|---| \r\n | H2D_Mem_BW_\\<GPU ID> | GB/s | host-to-GPU bandwidth for each GPU |\r\n | D2H_Mem_BW_\\<GPU ID> | GB/s | GPU-to-host bandwidth for each GPU |\r\n\r\n### E2E Benchmarks -- @guoshzhao ETA: 7/16/2021\r\n1. - [x] CNN models -- User PyTorch TORCHVISION.MODELS sub-package\r\n - ResNet: ResNet-50, ResNet-101, ResNet-152\r\n - DenseNet: DenseNet-169, DenseNet-201\u00a0\u200b\r\n - VGG: VGG-11, VGG-13, VGG-16, VGG-19\u200b\r\n2. - [x] BERT -- Use huggingface Transformers\r\n - BERT\r\n - BERT LARGE\r\n3. - [x] LSTM -- Use PyTorch TORCH.NN sub-package\r\n4. - [x] GPT-2 -- Use huggingface Transformers\r\n\r\n## Result Summary -- @cp5555\r\n\r\n- [x] Generate a report to summarize the results -- @guoshzhao ETA: 7/30/2021\r\n PR: #147, #149, and #157\r\n- [ ] Support basic analysis feature (boxplot figure, outlier detection, etc.) \r\n\r\n## Bug Fix\r\n- [x] VGG models failed on A100 GPU with batch_size=128 #115 \r\n PR: #134 \r\n## Other Improvement\r\n\r\n1. Contribution related -- @lynex \r\n - [x] Contribute rule (#131)\r\n - [x] system information collection (#160)\r\n\r\n2. Document -- @TobeyQin \r\n - [x] Add release process doc (#130)\r\n - [x] Add design documents (#125)\r\n - [x] Add developer guide doc for coding style (#155)\r\n - [x] Add contribution rules (#131)\r\n - [x] Add docker image list (#154)\r\n - [x] Add initial validation results\r\n - [x] ~~Add metric reasoning doc -- @cp5555 @guoshzhao~~\r\n\r\n3. Process monitor\r\n - [ ] Add Heart beat to monitor process health\r\n - [ ] Auto kill all processes on all nodes\r\n\r\n4. Coding style -- @abuccts \r\n - [x] Add vscode online\r\n\r\n------------\r\n## Backlogs\r\n\r\n### Multi-Node Benchmarks\r\n- Mellanox ClusterKit\r\n- GPCNeT\r\n\r\n### UI Design\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT License.\n\n\"\"\"SuperBench Python module.\n\nProvide hardware and software benchmarks for AI systems.\n\"\"\"\n\n__version__ = '0.2.1'\n__author__ = 'Microsoft'\n", "path": "superbench/__init__.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT License.\n\n\"\"\"SuperBench Python module.\n\nProvide hardware and software benchmarks for AI systems.\n\"\"\"\n\n__version__ = '0.3.0'\n__author__ = 'Microsoft'\n", "path": "superbench/__init__.py"}]} | 2,640 | 92 |
gh_patches_debug_22791 | rasdani/github-patches | git_diff | azavea__raster-vision-1495 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Allow `neg_ratio` to be None in `ObjectDetectionGeoDataWindowConfig`
## 🚀 Feature
See the following discussion on Gitter: https://gitter.im/azavea/raster-vision?at=6298bff6ef00bd1dc60fc7ee
`ObjectDetectionGeoDataWindowConfig` should allow `neg_ratio` to be `None` since `ObjectDetectionRandomWindowGeoDataset` allows it.
## Motivation
It's a feature that already exists. This will allow users to actually make use of it.
## Pitch
`ObjectDetectionGeoDataWindowConfig.neg_ratio`'s type hint will need to be changed to `Optional[float]`.
## Alternatives
N/A
## Additional context
N/A
Allow `neg_ratio` to be None in `ObjectDetectionGeoDataWindowConfig`
## 🚀 Feature
See the following discussion on Gitter: https://gitter.im/azavea/raster-vision?at=6298bff6ef00bd1dc60fc7ee
`ObjectDetectionGeoDataWindowConfig` should allow `neg_ratio` to be `None` since `ObjectDetectionRandomWindowGeoDataset` allows it.
## Motivation
It's a feature that already exists. This will allow users to actually make use of it.
## Pitch
`ObjectDetectionGeoDataWindowConfig.neg_ratio`'s type hint will need to be changed to `Optional[float]`.
## Alternatives
N/A
## Additional context
N/A
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `rastervision_pytorch_learner/rastervision/pytorch_learner/object_detection_learner_config.py`
Content:
```
1 from typing import TYPE_CHECKING, Optional, Union
2 from enum import Enum
3 from os.path import join
4 import logging
5
6 import albumentations as A
7
8 from torchvision.models.detection.backbone_utils import resnet_fpn_backbone
9
10 from rastervision.core.data import Scene
11 from rastervision.pipeline.config import (Config, register_config, Field,
12 validator, ConfigError)
13 from rastervision.pytorch_learner.learner_config import (
14 LearnerConfig, ModelConfig, Backbone, ImageDataConfig, GeoDataConfig,
15 GeoDataWindowMethod, GeoDataWindowConfig)
16 from rastervision.pytorch_learner.dataset import (
17 ObjectDetectionImageDataset, ObjectDetectionSlidingWindowGeoDataset,
18 ObjectDetectionRandomWindowGeoDataset)
19 from rastervision.pytorch_learner.utils import adjust_conv_channels
20 from torchvision.models.detection.faster_rcnn import FasterRCNN
21
22 if TYPE_CHECKING:
23 from rastervision.pytorch_learner.learner_config import SolverConfig
24
25 log = logging.getLogger(__name__)
26
27 DEFAULT_BBOX_PARAMS = A.BboxParams(
28 format='albumentations', label_fields=['category_id'])
29
30
31 class ObjectDetectionDataFormat(Enum):
32 coco = 'coco'
33
34
35 def objdet_data_config_upgrader(cfg_dict, version):
36 if version == 1:
37 cfg_dict['type_hint'] = 'object_detection_image_data'
38 return cfg_dict
39
40
41 @register_config('object_detection_data', upgrader=objdet_data_config_upgrader)
42 class ObjectDetectionDataConfig(Config):
43 def get_bbox_params(self):
44 return DEFAULT_BBOX_PARAMS
45
46
47 @register_config('object_detection_image_data')
48 class ObjectDetectionImageDataConfig(ObjectDetectionDataConfig,
49 ImageDataConfig):
50 data_format: ObjectDetectionDataFormat = ObjectDetectionDataFormat.coco
51
52 def dir_to_dataset(self, data_dir: str, transform: A.BasicTransform
53 ) -> ObjectDetectionImageDataset:
54 img_dir = join(data_dir, 'img')
55 annotation_uri = join(data_dir, 'labels.json')
56 ds = ObjectDetectionImageDataset(
57 img_dir, annotation_uri, transform=transform)
58 return ds
59
60
61 @register_config('object_detection_geo_data_window')
62 class ObjectDetectionGeoDataWindowConfig(GeoDataWindowConfig):
63 ioa_thresh: float = Field(
64 0.8,
65 description='When a box is partially outside of a training chip, it '
66 'is not clear if (a clipped version) of the box should be included in '
67 'the chip. If the IOA (intersection over area) of the box with the '
68 'chip is greater than ioa_thresh, it is included in the chip. '
69 'Defaults to 0.8.')
70 clip: bool = Field(
71 False,
72 description='Clip bounding boxes to window limits when retrieving '
73 'labels for a window.')
74 neg_ratio: float = Field(
75 1.0,
76 description='The ratio of negative chips (those containing no '
77 'bounding boxes) to positive chips. This can be useful if the '
78 'statistics of the background is different in positive chips. For '
79 'example, in car detection, the positive chips will always contain '
80 'roads, but no examples of rooftops since cars tend to not be near '
81 'rooftops. Defaults to 1.0.')
82 neg_ioa_thresh: float = Field(
83 0.2,
84 description='A window will be considered negative if its max IoA with '
85 'any bounding box is less than this threshold. Defaults to 0.2.')
86
87
88 @register_config('object_detection_geo_data')
89 class ObjectDetectionGeoDataConfig(ObjectDetectionDataConfig, GeoDataConfig):
90 def scene_to_dataset(
91 self,
92 scene: Scene,
93 transform: Optional[A.BasicTransform] = None,
94 bbox_params: Optional[A.BboxParams] = DEFAULT_BBOX_PARAMS
95 ) -> Union[ObjectDetectionSlidingWindowGeoDataset,
96 ObjectDetectionRandomWindowGeoDataset]:
97 if isinstance(self.window_opts, dict):
98 opts = self.window_opts[scene.id]
99 else:
100 opts = self.window_opts
101
102 if opts.method == GeoDataWindowMethod.sliding:
103 ds = ObjectDetectionSlidingWindowGeoDataset(
104 scene,
105 size=opts.size,
106 stride=opts.stride,
107 padding=opts.padding,
108 transform=transform)
109 elif opts.method == GeoDataWindowMethod.random:
110 ds = ObjectDetectionRandomWindowGeoDataset(
111 scene,
112 size_lims=opts.size_lims,
113 h_lims=opts.h_lims,
114 w_lims=opts.w_lims,
115 out_size=opts.size,
116 padding=opts.padding,
117 max_windows=opts.max_windows,
118 max_sample_attempts=opts.max_sample_attempts,
119 bbox_params=bbox_params,
120 ioa_thresh=opts.ioa_thresh,
121 clip=opts.clip,
122 neg_ratio=opts.neg_ratio,
123 neg_ioa_thresh=opts.neg_ioa_thresh,
124 efficient_aoi_sampling=opts.efficient_aoi_sampling,
125 transform=transform)
126 else:
127 raise NotImplementedError()
128 return ds
129
130
131 @register_config('object_detection_model')
132 class ObjectDetectionModelConfig(ModelConfig):
133 backbone: Backbone = Field(
134 Backbone.resnet50,
135 description=
136 ('The torchvision.models backbone to use, which must be in the resnet* '
137 'family.'))
138
139 @validator('backbone')
140 def only_valid_backbones(cls, v):
141 if v not in [
142 Backbone.resnet18, Backbone.resnet34, Backbone.resnet50,
143 Backbone.resnet101, Backbone.resnet152
144 ]:
145 raise ValueError(
146 'The backbone for Faster-RCNN must be in the resnet* '
147 'family.')
148 return v
149
150 def build_default_model(self, num_classes: int, in_channels: int,
151 img_sz: int) -> FasterRCNN:
152 """Returns a FasterRCNN model.
153
154 Note that the model returned will have (num_classes + 2) output
155 classes. +1 for the null class (zeroth index), and another +1
156 (last index) for backward compatibility with earlier Raster Vision
157 versions.
158
159 Returns:
160 FasterRCNN: a FasterRCNN model.
161 """
162 pretrained = self.pretrained
163 backbone_arch = self.get_backbone_str()
164 backbone = resnet_fpn_backbone(backbone_arch, pretrained)
165
166 # default values from FasterRCNN constructor
167 image_mean = [0.485, 0.456, 0.406]
168 image_std = [0.229, 0.224, 0.225]
169
170 if in_channels != 3:
171 extra_channels = in_channels - backbone.body['conv1'].in_channels
172
173 # adjust channels
174 backbone.body['conv1'] = adjust_conv_channels(
175 old_conv=backbone.body['conv1'],
176 in_channels=in_channels,
177 pretrained=pretrained)
178
179 # adjust stats
180 if extra_channels < 0:
181 image_mean = image_mean[:extra_channels]
182 image_std = image_std[:extra_channels]
183 else:
184 # arbitrarily set mean and stds of the new channels to
185 # something similar to the values of the other 3 channels
186 image_mean = image_mean + [.45] * extra_channels
187 image_std = image_std + [.225] * extra_channels
188
189 model = FasterRCNN(
190 backbone=backbone,
191 # +1 because torchvision detection models reserve 0 for the null
192 # class, another +1 for backward compatibility with earlier Raster
193 # Vision versions
194 num_classes=num_classes + 1 + 1,
195 # TODO we shouldn't need to pass the image size here
196 min_size=img_sz,
197 max_size=img_sz,
198 image_mean=image_mean,
199 image_std=image_std)
200 return model
201
202
203 @register_config('object_detection_learner')
204 class ObjectDetectionLearnerConfig(LearnerConfig):
205 data: Union[ObjectDetectionImageDataConfig, ObjectDetectionGeoDataConfig]
206 model: Optional[ObjectDetectionModelConfig]
207
208 def build(self,
209 tmp_dir=None,
210 model_weights_path=None,
211 model_def_path=None,
212 loss_def_path=None,
213 training=True):
214 from rastervision.pytorch_learner.object_detection_learner import (
215 ObjectDetectionLearner)
216 return ObjectDetectionLearner(
217 self,
218 tmp_dir=tmp_dir,
219 model_weights_path=model_weights_path,
220 model_def_path=model_def_path,
221 loss_def_path=loss_def_path,
222 training=training)
223
224 @validator('solver')
225 def validate_solver_config(cls, v: 'SolverConfig') -> 'SolverConfig':
226 if v.ignore_class_index is not None:
227 raise ConfigError(
228 'ignore_last_class is not supported for Object Detection.')
229 if v.class_loss_weights is not None:
230 raise ConfigError(
231 'class_loss_weights is currently not supported for '
232 'Object Detection.')
233 if v.external_loss_def is not None:
234 raise ConfigError(
235 'external_loss_def is currently not supported for '
236 'Object Detection. Raster Vision expects object '
237 'detection models to behave like TorchVision object detection '
238 'models, and these models compute losses internally. So, if '
239 'you want to use a custom loss function, you can create a '
240 'custom model that implements that loss function and use that '
241 'model via external_model_def. See cowc_potsdam.py for an '
242 'example of how to use a custom object detection model.')
243 return v
244
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/rastervision_pytorch_learner/rastervision/pytorch_learner/object_detection_learner_config.py b/rastervision_pytorch_learner/rastervision/pytorch_learner/object_detection_learner_config.py
--- a/rastervision_pytorch_learner/rastervision/pytorch_learner/object_detection_learner_config.py
+++ b/rastervision_pytorch_learner/rastervision/pytorch_learner/object_detection_learner_config.py
@@ -71,14 +71,14 @@
False,
description='Clip bounding boxes to window limits when retrieving '
'labels for a window.')
- neg_ratio: float = Field(
- 1.0,
+ neg_ratio: Optional[float] = Field(
+ None,
description='The ratio of negative chips (those containing no '
'bounding boxes) to positive chips. This can be useful if the '
'statistics of the background is different in positive chips. For '
'example, in car detection, the positive chips will always contain '
'roads, but no examples of rooftops since cars tend to not be near '
- 'rooftops. Defaults to 1.0.')
+ 'rooftops. Defaults to None.')
neg_ioa_thresh: float = Field(
0.2,
description='A window will be considered negative if its max IoA with '
| {"golden_diff": "diff --git a/rastervision_pytorch_learner/rastervision/pytorch_learner/object_detection_learner_config.py b/rastervision_pytorch_learner/rastervision/pytorch_learner/object_detection_learner_config.py\n--- a/rastervision_pytorch_learner/rastervision/pytorch_learner/object_detection_learner_config.py\n+++ b/rastervision_pytorch_learner/rastervision/pytorch_learner/object_detection_learner_config.py\n@@ -71,14 +71,14 @@\n False,\n description='Clip bounding boxes to window limits when retrieving '\n 'labels for a window.')\n- neg_ratio: float = Field(\n- 1.0,\n+ neg_ratio: Optional[float] = Field(\n+ None,\n description='The ratio of negative chips (those containing no '\n 'bounding boxes) to positive chips. This can be useful if the '\n 'statistics of the background is different in positive chips. For '\n 'example, in car detection, the positive chips will always contain '\n 'roads, but no examples of rooftops since cars tend to not be near '\n- 'rooftops. Defaults to 1.0.')\n+ 'rooftops. Defaults to None.')\n neg_ioa_thresh: float = Field(\n 0.2,\n description='A window will be considered negative if its max IoA with '\n", "issue": "Allow `neg_ratio` to be None in `ObjectDetectionGeoDataWindowConfig`\n## \ud83d\ude80 Feature\r\n\r\nSee the following discussion on Gitter: https://gitter.im/azavea/raster-vision?at=6298bff6ef00bd1dc60fc7ee\r\n\r\n`ObjectDetectionGeoDataWindowConfig` should allow `neg_ratio` to be `None` since `ObjectDetectionRandomWindowGeoDataset` allows it.\r\n\r\n## Motivation\r\n\r\nIt's a feature that already exists. This will allow users to actually make use of it.\r\n\r\n## Pitch\r\n\r\n`ObjectDetectionGeoDataWindowConfig.neg_ratio`'s type hint will need to be changed to `Optional[float]`.\r\n\r\n## Alternatives\r\n\r\nN/A\r\n\r\n## Additional context\r\n\r\nN/A\r\n\nAllow `neg_ratio` to be None in `ObjectDetectionGeoDataWindowConfig`\n## \ud83d\ude80 Feature\r\n\r\nSee the following discussion on Gitter: https://gitter.im/azavea/raster-vision?at=6298bff6ef00bd1dc60fc7ee\r\n\r\n`ObjectDetectionGeoDataWindowConfig` should allow `neg_ratio` to be `None` since `ObjectDetectionRandomWindowGeoDataset` allows it.\r\n\r\n## Motivation\r\n\r\nIt's a feature that already exists. This will allow users to actually make use of it.\r\n\r\n## Pitch\r\n\r\n`ObjectDetectionGeoDataWindowConfig.neg_ratio`'s type hint will need to be changed to `Optional[float]`.\r\n\r\n## Alternatives\r\n\r\nN/A\r\n\r\n## Additional context\r\n\r\nN/A\r\n\n", "before_files": [{"content": "from typing import TYPE_CHECKING, Optional, Union\nfrom enum import Enum\nfrom os.path import join\nimport logging\n\nimport albumentations as A\n\nfrom torchvision.models.detection.backbone_utils import resnet_fpn_backbone\n\nfrom rastervision.core.data import Scene\nfrom rastervision.pipeline.config import (Config, register_config, Field,\n validator, ConfigError)\nfrom rastervision.pytorch_learner.learner_config import (\n LearnerConfig, ModelConfig, Backbone, ImageDataConfig, GeoDataConfig,\n GeoDataWindowMethod, GeoDataWindowConfig)\nfrom rastervision.pytorch_learner.dataset import (\n ObjectDetectionImageDataset, ObjectDetectionSlidingWindowGeoDataset,\n ObjectDetectionRandomWindowGeoDataset)\nfrom rastervision.pytorch_learner.utils import adjust_conv_channels\nfrom torchvision.models.detection.faster_rcnn import FasterRCNN\n\nif TYPE_CHECKING:\n from rastervision.pytorch_learner.learner_config import SolverConfig\n\nlog = logging.getLogger(__name__)\n\nDEFAULT_BBOX_PARAMS = A.BboxParams(\n format='albumentations', label_fields=['category_id'])\n\n\nclass ObjectDetectionDataFormat(Enum):\n coco = 'coco'\n\n\ndef objdet_data_config_upgrader(cfg_dict, version):\n if version == 1:\n cfg_dict['type_hint'] = 'object_detection_image_data'\n return cfg_dict\n\n\n@register_config('object_detection_data', upgrader=objdet_data_config_upgrader)\nclass ObjectDetectionDataConfig(Config):\n def get_bbox_params(self):\n return DEFAULT_BBOX_PARAMS\n\n\n@register_config('object_detection_image_data')\nclass ObjectDetectionImageDataConfig(ObjectDetectionDataConfig,\n ImageDataConfig):\n data_format: ObjectDetectionDataFormat = ObjectDetectionDataFormat.coco\n\n def dir_to_dataset(self, data_dir: str, transform: A.BasicTransform\n ) -> ObjectDetectionImageDataset:\n img_dir = join(data_dir, 'img')\n annotation_uri = join(data_dir, 'labels.json')\n ds = ObjectDetectionImageDataset(\n img_dir, annotation_uri, transform=transform)\n return ds\n\n\n@register_config('object_detection_geo_data_window')\nclass ObjectDetectionGeoDataWindowConfig(GeoDataWindowConfig):\n ioa_thresh: float = Field(\n 0.8,\n description='When a box is partially outside of a training chip, it '\n 'is not clear if (a clipped version) of the box should be included in '\n 'the chip. If the IOA (intersection over area) of the box with the '\n 'chip is greater than ioa_thresh, it is included in the chip. '\n 'Defaults to 0.8.')\n clip: bool = Field(\n False,\n description='Clip bounding boxes to window limits when retrieving '\n 'labels for a window.')\n neg_ratio: float = Field(\n 1.0,\n description='The ratio of negative chips (those containing no '\n 'bounding boxes) to positive chips. This can be useful if the '\n 'statistics of the background is different in positive chips. For '\n 'example, in car detection, the positive chips will always contain '\n 'roads, but no examples of rooftops since cars tend to not be near '\n 'rooftops. Defaults to 1.0.')\n neg_ioa_thresh: float = Field(\n 0.2,\n description='A window will be considered negative if its max IoA with '\n 'any bounding box is less than this threshold. Defaults to 0.2.')\n\n\n@register_config('object_detection_geo_data')\nclass ObjectDetectionGeoDataConfig(ObjectDetectionDataConfig, GeoDataConfig):\n def scene_to_dataset(\n self,\n scene: Scene,\n transform: Optional[A.BasicTransform] = None,\n bbox_params: Optional[A.BboxParams] = DEFAULT_BBOX_PARAMS\n ) -> Union[ObjectDetectionSlidingWindowGeoDataset,\n ObjectDetectionRandomWindowGeoDataset]:\n if isinstance(self.window_opts, dict):\n opts = self.window_opts[scene.id]\n else:\n opts = self.window_opts\n\n if opts.method == GeoDataWindowMethod.sliding:\n ds = ObjectDetectionSlidingWindowGeoDataset(\n scene,\n size=opts.size,\n stride=opts.stride,\n padding=opts.padding,\n transform=transform)\n elif opts.method == GeoDataWindowMethod.random:\n ds = ObjectDetectionRandomWindowGeoDataset(\n scene,\n size_lims=opts.size_lims,\n h_lims=opts.h_lims,\n w_lims=opts.w_lims,\n out_size=opts.size,\n padding=opts.padding,\n max_windows=opts.max_windows,\n max_sample_attempts=opts.max_sample_attempts,\n bbox_params=bbox_params,\n ioa_thresh=opts.ioa_thresh,\n clip=opts.clip,\n neg_ratio=opts.neg_ratio,\n neg_ioa_thresh=opts.neg_ioa_thresh,\n efficient_aoi_sampling=opts.efficient_aoi_sampling,\n transform=transform)\n else:\n raise NotImplementedError()\n return ds\n\n\n@register_config('object_detection_model')\nclass ObjectDetectionModelConfig(ModelConfig):\n backbone: Backbone = Field(\n Backbone.resnet50,\n description=\n ('The torchvision.models backbone to use, which must be in the resnet* '\n 'family.'))\n\n @validator('backbone')\n def only_valid_backbones(cls, v):\n if v not in [\n Backbone.resnet18, Backbone.resnet34, Backbone.resnet50,\n Backbone.resnet101, Backbone.resnet152\n ]:\n raise ValueError(\n 'The backbone for Faster-RCNN must be in the resnet* '\n 'family.')\n return v\n\n def build_default_model(self, num_classes: int, in_channels: int,\n img_sz: int) -> FasterRCNN:\n \"\"\"Returns a FasterRCNN model.\n\n Note that the model returned will have (num_classes + 2) output\n classes. +1 for the null class (zeroth index), and another +1\n (last index) for backward compatibility with earlier Raster Vision\n versions.\n\n Returns:\n FasterRCNN: a FasterRCNN model.\n \"\"\"\n pretrained = self.pretrained\n backbone_arch = self.get_backbone_str()\n backbone = resnet_fpn_backbone(backbone_arch, pretrained)\n\n # default values from FasterRCNN constructor\n image_mean = [0.485, 0.456, 0.406]\n image_std = [0.229, 0.224, 0.225]\n\n if in_channels != 3:\n extra_channels = in_channels - backbone.body['conv1'].in_channels\n\n # adjust channels\n backbone.body['conv1'] = adjust_conv_channels(\n old_conv=backbone.body['conv1'],\n in_channels=in_channels,\n pretrained=pretrained)\n\n # adjust stats\n if extra_channels < 0:\n image_mean = image_mean[:extra_channels]\n image_std = image_std[:extra_channels]\n else:\n # arbitrarily set mean and stds of the new channels to\n # something similar to the values of the other 3 channels\n image_mean = image_mean + [.45] * extra_channels\n image_std = image_std + [.225] * extra_channels\n\n model = FasterRCNN(\n backbone=backbone,\n # +1 because torchvision detection models reserve 0 for the null\n # class, another +1 for backward compatibility with earlier Raster\n # Vision versions\n num_classes=num_classes + 1 + 1,\n # TODO we shouldn't need to pass the image size here\n min_size=img_sz,\n max_size=img_sz,\n image_mean=image_mean,\n image_std=image_std)\n return model\n\n\n@register_config('object_detection_learner')\nclass ObjectDetectionLearnerConfig(LearnerConfig):\n data: Union[ObjectDetectionImageDataConfig, ObjectDetectionGeoDataConfig]\n model: Optional[ObjectDetectionModelConfig]\n\n def build(self,\n tmp_dir=None,\n model_weights_path=None,\n model_def_path=None,\n loss_def_path=None,\n training=True):\n from rastervision.pytorch_learner.object_detection_learner import (\n ObjectDetectionLearner)\n return ObjectDetectionLearner(\n self,\n tmp_dir=tmp_dir,\n model_weights_path=model_weights_path,\n model_def_path=model_def_path,\n loss_def_path=loss_def_path,\n training=training)\n\n @validator('solver')\n def validate_solver_config(cls, v: 'SolverConfig') -> 'SolverConfig':\n if v.ignore_class_index is not None:\n raise ConfigError(\n 'ignore_last_class is not supported for Object Detection.')\n if v.class_loss_weights is not None:\n raise ConfigError(\n 'class_loss_weights is currently not supported for '\n 'Object Detection.')\n if v.external_loss_def is not None:\n raise ConfigError(\n 'external_loss_def is currently not supported for '\n 'Object Detection. Raster Vision expects object '\n 'detection models to behave like TorchVision object detection '\n 'models, and these models compute losses internally. So, if '\n 'you want to use a custom loss function, you can create a '\n 'custom model that implements that loss function and use that '\n 'model via external_model_def. See cowc_potsdam.py for an '\n 'example of how to use a custom object detection model.')\n return v\n", "path": "rastervision_pytorch_learner/rastervision/pytorch_learner/object_detection_learner_config.py"}], "after_files": [{"content": "from typing import TYPE_CHECKING, Optional, Union\nfrom enum import Enum\nfrom os.path import join\nimport logging\n\nimport albumentations as A\n\nfrom torchvision.models.detection.backbone_utils import resnet_fpn_backbone\n\nfrom rastervision.core.data import Scene\nfrom rastervision.pipeline.config import (Config, register_config, Field,\n validator, ConfigError)\nfrom rastervision.pytorch_learner.learner_config import (\n LearnerConfig, ModelConfig, Backbone, ImageDataConfig, GeoDataConfig,\n GeoDataWindowMethod, GeoDataWindowConfig)\nfrom rastervision.pytorch_learner.dataset import (\n ObjectDetectionImageDataset, ObjectDetectionSlidingWindowGeoDataset,\n ObjectDetectionRandomWindowGeoDataset)\nfrom rastervision.pytorch_learner.utils import adjust_conv_channels\nfrom torchvision.models.detection.faster_rcnn import FasterRCNN\n\nif TYPE_CHECKING:\n from rastervision.pytorch_learner.learner_config import SolverConfig\n\nlog = logging.getLogger(__name__)\n\nDEFAULT_BBOX_PARAMS = A.BboxParams(\n format='albumentations', label_fields=['category_id'])\n\n\nclass ObjectDetectionDataFormat(Enum):\n coco = 'coco'\n\n\ndef objdet_data_config_upgrader(cfg_dict, version):\n if version == 1:\n cfg_dict['type_hint'] = 'object_detection_image_data'\n return cfg_dict\n\n\n@register_config('object_detection_data', upgrader=objdet_data_config_upgrader)\nclass ObjectDetectionDataConfig(Config):\n def get_bbox_params(self):\n return DEFAULT_BBOX_PARAMS\n\n\n@register_config('object_detection_image_data')\nclass ObjectDetectionImageDataConfig(ObjectDetectionDataConfig,\n ImageDataConfig):\n data_format: ObjectDetectionDataFormat = ObjectDetectionDataFormat.coco\n\n def dir_to_dataset(self, data_dir: str, transform: A.BasicTransform\n ) -> ObjectDetectionImageDataset:\n img_dir = join(data_dir, 'img')\n annotation_uri = join(data_dir, 'labels.json')\n ds = ObjectDetectionImageDataset(\n img_dir, annotation_uri, transform=transform)\n return ds\n\n\n@register_config('object_detection_geo_data_window')\nclass ObjectDetectionGeoDataWindowConfig(GeoDataWindowConfig):\n ioa_thresh: float = Field(\n 0.8,\n description='When a box is partially outside of a training chip, it '\n 'is not clear if (a clipped version) of the box should be included in '\n 'the chip. If the IOA (intersection over area) of the box with the '\n 'chip is greater than ioa_thresh, it is included in the chip. '\n 'Defaults to 0.8.')\n clip: bool = Field(\n False,\n description='Clip bounding boxes to window limits when retrieving '\n 'labels for a window.')\n neg_ratio: Optional[float] = Field(\n None,\n description='The ratio of negative chips (those containing no '\n 'bounding boxes) to positive chips. This can be useful if the '\n 'statistics of the background is different in positive chips. For '\n 'example, in car detection, the positive chips will always contain '\n 'roads, but no examples of rooftops since cars tend to not be near '\n 'rooftops. Defaults to None.')\n neg_ioa_thresh: float = Field(\n 0.2,\n description='A window will be considered negative if its max IoA with '\n 'any bounding box is less than this threshold. Defaults to 0.2.')\n\n\n@register_config('object_detection_geo_data')\nclass ObjectDetectionGeoDataConfig(ObjectDetectionDataConfig, GeoDataConfig):\n def scene_to_dataset(\n self,\n scene: Scene,\n transform: Optional[A.BasicTransform] = None,\n bbox_params: Optional[A.BboxParams] = DEFAULT_BBOX_PARAMS\n ) -> Union[ObjectDetectionSlidingWindowGeoDataset,\n ObjectDetectionRandomWindowGeoDataset]:\n if isinstance(self.window_opts, dict):\n opts = self.window_opts[scene.id]\n else:\n opts = self.window_opts\n\n if opts.method == GeoDataWindowMethod.sliding:\n ds = ObjectDetectionSlidingWindowGeoDataset(\n scene,\n size=opts.size,\n stride=opts.stride,\n padding=opts.padding,\n transform=transform)\n elif opts.method == GeoDataWindowMethod.random:\n ds = ObjectDetectionRandomWindowGeoDataset(\n scene,\n size_lims=opts.size_lims,\n h_lims=opts.h_lims,\n w_lims=opts.w_lims,\n out_size=opts.size,\n padding=opts.padding,\n max_windows=opts.max_windows,\n max_sample_attempts=opts.max_sample_attempts,\n bbox_params=bbox_params,\n ioa_thresh=opts.ioa_thresh,\n clip=opts.clip,\n neg_ratio=opts.neg_ratio,\n neg_ioa_thresh=opts.neg_ioa_thresh,\n efficient_aoi_sampling=opts.efficient_aoi_sampling,\n transform=transform)\n else:\n raise NotImplementedError()\n return ds\n\n\n@register_config('object_detection_model')\nclass ObjectDetectionModelConfig(ModelConfig):\n backbone: Backbone = Field(\n Backbone.resnet50,\n description=\n ('The torchvision.models backbone to use, which must be in the resnet* '\n 'family.'))\n\n @validator('backbone')\n def only_valid_backbones(cls, v):\n if v not in [\n Backbone.resnet18, Backbone.resnet34, Backbone.resnet50,\n Backbone.resnet101, Backbone.resnet152\n ]:\n raise ValueError(\n 'The backbone for Faster-RCNN must be in the resnet* '\n 'family.')\n return v\n\n def build_default_model(self, num_classes: int, in_channels: int,\n img_sz: int) -> FasterRCNN:\n \"\"\"Returns a FasterRCNN model.\n\n Note that the model returned will have (num_classes + 2) output\n classes. +1 for the null class (zeroth index), and another +1\n (last index) for backward compatibility with earlier Raster Vision\n versions.\n\n Returns:\n FasterRCNN: a FasterRCNN model.\n \"\"\"\n pretrained = self.pretrained\n backbone_arch = self.get_backbone_str()\n backbone = resnet_fpn_backbone(backbone_arch, pretrained)\n\n # default values from FasterRCNN constructor\n image_mean = [0.485, 0.456, 0.406]\n image_std = [0.229, 0.224, 0.225]\n\n if in_channels != 3:\n extra_channels = in_channels - backbone.body['conv1'].in_channels\n\n # adjust channels\n backbone.body['conv1'] = adjust_conv_channels(\n old_conv=backbone.body['conv1'],\n in_channels=in_channels,\n pretrained=pretrained)\n\n # adjust stats\n if extra_channels < 0:\n image_mean = image_mean[:extra_channels]\n image_std = image_std[:extra_channels]\n else:\n # arbitrarily set mean and stds of the new channels to\n # something similar to the values of the other 3 channels\n image_mean = image_mean + [.45] * extra_channels\n image_std = image_std + [.225] * extra_channels\n\n model = FasterRCNN(\n backbone=backbone,\n # +1 because torchvision detection models reserve 0 for the null\n # class, another +1 for backward compatibility with earlier Raster\n # Vision versions\n num_classes=num_classes + 1 + 1,\n # TODO we shouldn't need to pass the image size here\n min_size=img_sz,\n max_size=img_sz,\n image_mean=image_mean,\n image_std=image_std)\n return model\n\n\n@register_config('object_detection_learner')\nclass ObjectDetectionLearnerConfig(LearnerConfig):\n data: Union[ObjectDetectionImageDataConfig, ObjectDetectionGeoDataConfig]\n model: Optional[ObjectDetectionModelConfig]\n\n def build(self,\n tmp_dir=None,\n model_weights_path=None,\n model_def_path=None,\n loss_def_path=None,\n training=True):\n from rastervision.pytorch_learner.object_detection_learner import (\n ObjectDetectionLearner)\n return ObjectDetectionLearner(\n self,\n tmp_dir=tmp_dir,\n model_weights_path=model_weights_path,\n model_def_path=model_def_path,\n loss_def_path=loss_def_path,\n training=training)\n\n @validator('solver')\n def validate_solver_config(cls, v: 'SolverConfig') -> 'SolverConfig':\n if v.ignore_class_index is not None:\n raise ConfigError(\n 'ignore_last_class is not supported for Object Detection.')\n if v.class_loss_weights is not None:\n raise ConfigError(\n 'class_loss_weights is currently not supported for '\n 'Object Detection.')\n if v.external_loss_def is not None:\n raise ConfigError(\n 'external_loss_def is currently not supported for '\n 'Object Detection. Raster Vision expects object '\n 'detection models to behave like TorchVision object detection '\n 'models, and these models compute losses internally. So, if '\n 'you want to use a custom loss function, you can create a '\n 'custom model that implements that loss function and use that '\n 'model via external_model_def. See cowc_potsdam.py for an '\n 'example of how to use a custom object detection model.')\n return v\n", "path": "rastervision_pytorch_learner/rastervision/pytorch_learner/object_detection_learner_config.py"}]} | 3,292 | 300 |
gh_patches_debug_61190 | rasdani/github-patches | git_diff | benoitc__gunicorn-960 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Reloading sometimes gives a TypeError: 'NoneType' object is not callable
I'm running a custom app (subclass of `BaseApplication`) with reload set to True. In some situations, changing a file causes the following traceback:
```
Exception in thread Thread-1 (most likely raised during interpreter shutdown):
Traceback (most recent call last):
File "/usr/maxm/lib/python2.7/threading.py", line 551, in __bootstrap_inner
File "/usr/maxm/lib/python2.7/site-packages/gunicorn/reloader.py", line 52, in run
File "/usr/maxm/lib/python2.7/site-packages/gunicorn/workers/base.py", line 87, in changed
<type 'exceptions.TypeError'>: 'NoneType' object is not callable
```
It's intermittent; I can sometimes reproduce it several times in a row by touching the same file, and then it stops happening. It certainly doesn't seem to interfere with the reloading behavior.
Line 87 is only `raise SystemExit()`. But line 86 is `os.kill(self.pid, signal.SIGQUIT)`, so I think what's happening is that the interpreter has started to tear down the environment and `SystemExit` has become `None`. (See also [this](http://article.gmane.org/gmane.comp.python.general/387087/) mailing list post.)
Anything I can do about this?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gunicorn/workers/base.py`
Content:
```
1 # -*- coding: utf-8 -
2 #
3 # This file is part of gunicorn released under the MIT license.
4 # See the NOTICE for more information.
5
6 from datetime import datetime
7 import os
8 import signal
9 import sys
10 from random import randint
11
12
13 from gunicorn import util
14 from gunicorn.workers.workertmp import WorkerTmp
15 from gunicorn.reloader import Reloader
16 from gunicorn.http.errors import (
17 InvalidHeader, InvalidHeaderName, InvalidRequestLine, InvalidRequestMethod,
18 InvalidHTTPVersion, LimitRequestLine, LimitRequestHeaders,
19 )
20 from gunicorn.http.errors import InvalidProxyLine, ForbiddenProxyRequest
21 from gunicorn.http.wsgi import default_environ, Response
22 from gunicorn.six import MAXSIZE
23
24
25 class Worker(object):
26
27 SIGNALS = [getattr(signal, "SIG%s" % x)
28 for x in "ABRT HUP QUIT INT TERM USR1 USR2 WINCH CHLD".split()]
29
30 PIPE = []
31
32 def __init__(self, age, ppid, sockets, app, timeout, cfg, log):
33 """\
34 This is called pre-fork so it shouldn't do anything to the
35 current process. If there's a need to make process wide
36 changes you'll want to do that in ``self.init_process()``.
37 """
38 self.age = age
39 self.ppid = ppid
40 self.sockets = sockets
41 self.app = app
42 self.timeout = timeout
43 self.cfg = cfg
44 self.booted = False
45 self.aborted = False
46
47 self.nr = 0
48 jitter = randint(0, cfg.max_requests_jitter)
49 self.max_requests = cfg.max_requests + jitter or MAXSIZE
50 self.alive = True
51 self.log = log
52 self.tmp = WorkerTmp(cfg)
53
54 def __str__(self):
55 return "<Worker %s>" % self.pid
56
57 @property
58 def pid(self):
59 return os.getpid()
60
61 def notify(self):
62 """\
63 Your worker subclass must arrange to have this method called
64 once every ``self.timeout`` seconds. If you fail in accomplishing
65 this task, the master process will murder your workers.
66 """
67 self.tmp.notify()
68
69 def run(self):
70 """\
71 This is the mainloop of a worker process. You should override
72 this method in a subclass to provide the intended behaviour
73 for your particular evil schemes.
74 """
75 raise NotImplementedError()
76
77 def init_process(self):
78 """\
79 If you override this method in a subclass, the last statement
80 in the function should be to call this method with
81 super(MyWorkerClass, self).init_process() so that the ``run()``
82 loop is initiated.
83 """
84
85 # start the reloader
86 if self.cfg.reload:
87 def changed(fname):
88 self.log.info("Worker reloading: %s modified", fname)
89 os.kill(self.pid, signal.SIGQUIT)
90 raise SystemExit()
91 Reloader(callback=changed).start()
92
93 # set environment' variables
94 if self.cfg.env:
95 for k, v in self.cfg.env.items():
96 os.environ[k] = v
97
98 util.set_owner_process(self.cfg.uid, self.cfg.gid)
99
100 # Reseed the random number generator
101 util.seed()
102
103 # For waking ourselves up
104 self.PIPE = os.pipe()
105 for p in self.PIPE:
106 util.set_non_blocking(p)
107 util.close_on_exec(p)
108
109 # Prevent fd inheritance
110 [util.close_on_exec(s) for s in self.sockets]
111 util.close_on_exec(self.tmp.fileno())
112
113 self.log.close_on_exec()
114
115 self.init_signals()
116
117 self.wsgi = self.app.wsgi()
118
119 self.cfg.post_worker_init(self)
120
121 # Enter main run loop
122 self.booted = True
123 self.run()
124
125 def init_signals(self):
126 # reset signaling
127 [signal.signal(s, signal.SIG_DFL) for s in self.SIGNALS]
128 # init new signaling
129 signal.signal(signal.SIGQUIT, self.handle_quit)
130 signal.signal(signal.SIGTERM, self.handle_exit)
131 signal.signal(signal.SIGINT, self.handle_quit)
132 signal.signal(signal.SIGWINCH, self.handle_winch)
133 signal.signal(signal.SIGUSR1, self.handle_usr1)
134 signal.signal(signal.SIGABRT, self.handle_abort)
135
136 # Don't let SIGTERM and SIGUSR1 disturb active requests
137 # by interrupting system calls
138 if hasattr(signal, 'siginterrupt'): # python >= 2.6
139 signal.siginterrupt(signal.SIGTERM, False)
140 signal.siginterrupt(signal.SIGUSR1, False)
141
142 def handle_usr1(self, sig, frame):
143 self.log.reopen_files()
144
145 def handle_exit(self, sig, frame):
146 self.alive = False
147
148 def handle_quit(self, sig, frame):
149 self.alive = False
150 # worker_int callback
151 self.cfg.worker_int(self)
152 sys.exit(0)
153
154 def handle_abort(self, sig, frame):
155 self.alive = False
156 self.cfg.worker_abort(self)
157 sys.exit(1)
158
159 def handle_error(self, req, client, addr, exc):
160 request_start = datetime.now()
161 addr = addr or ('', -1) # unix socket case
162 if isinstance(exc, (InvalidRequestLine, InvalidRequestMethod,
163 InvalidHTTPVersion, InvalidHeader, InvalidHeaderName,
164 LimitRequestLine, LimitRequestHeaders,
165 InvalidProxyLine, ForbiddenProxyRequest)):
166
167 status_int = 400
168 reason = "Bad Request"
169
170 if isinstance(exc, InvalidRequestLine):
171 mesg = "Invalid Request Line '%s'" % str(exc)
172 elif isinstance(exc, InvalidRequestMethod):
173 mesg = "Invalid Method '%s'" % str(exc)
174 elif isinstance(exc, InvalidHTTPVersion):
175 mesg = "Invalid HTTP Version '%s'" % str(exc)
176 elif isinstance(exc, (InvalidHeaderName, InvalidHeader,)):
177 mesg = "%s" % str(exc)
178 if not req and hasattr(exc, "req"):
179 req = exc.req # for access log
180 elif isinstance(exc, LimitRequestLine):
181 mesg = "%s" % str(exc)
182 elif isinstance(exc, LimitRequestHeaders):
183 mesg = "Error parsing headers: '%s'" % str(exc)
184 elif isinstance(exc, InvalidProxyLine):
185 mesg = "'%s'" % str(exc)
186 elif isinstance(exc, ForbiddenProxyRequest):
187 reason = "Forbidden"
188 mesg = "Request forbidden"
189 status_int = 403
190
191 msg = "Invalid request from ip={ip}: {error}"
192 self.log.debug(msg.format(ip=addr[0], error=str(exc)))
193 else:
194 self.log.exception("Error handling request")
195
196 status_int = 500
197 reason = "Internal Server Error"
198 mesg = ""
199
200 if req is not None:
201 request_time = datetime.now() - request_start
202 environ = default_environ(req, client, self.cfg)
203 environ['REMOTE_ADDR'] = addr[0]
204 environ['REMOTE_PORT'] = str(addr[1])
205 resp = Response(req, client, self.cfg)
206 resp.status = "%s %s" % (status_int, reason)
207 resp.response_length = len(mesg)
208 self.log.access(resp, req, environ, request_time)
209
210 try:
211 util.write_error(client, status_int, reason, mesg)
212 except:
213 self.log.debug("Failed to send error message.")
214
215 def handle_winch(self, sig, fname):
216 # Ignore SIGWINCH in worker. Fixes a crash on OpenBSD.
217 return
218
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/gunicorn/workers/base.py b/gunicorn/workers/base.py
--- a/gunicorn/workers/base.py
+++ b/gunicorn/workers/base.py
@@ -87,7 +87,6 @@
def changed(fname):
self.log.info("Worker reloading: %s modified", fname)
os.kill(self.pid, signal.SIGQUIT)
- raise SystemExit()
Reloader(callback=changed).start()
# set environment' variables
| {"golden_diff": "diff --git a/gunicorn/workers/base.py b/gunicorn/workers/base.py\n--- a/gunicorn/workers/base.py\n+++ b/gunicorn/workers/base.py\n@@ -87,7 +87,6 @@\n def changed(fname):\n self.log.info(\"Worker reloading: %s modified\", fname)\n os.kill(self.pid, signal.SIGQUIT)\n- raise SystemExit()\n Reloader(callback=changed).start()\n \n # set environment' variables\n", "issue": "Reloading sometimes gives a TypeError: 'NoneType' object is not callable\nI'm running a custom app (subclass of `BaseApplication`) with reload set to True. In some situations, changing a file causes the following traceback:\n\n```\nException in thread Thread-1 (most likely raised during interpreter shutdown):\nTraceback (most recent call last):\n File \"/usr/maxm/lib/python2.7/threading.py\", line 551, in __bootstrap_inner\n File \"/usr/maxm/lib/python2.7/site-packages/gunicorn/reloader.py\", line 52, in run\n File \"/usr/maxm/lib/python2.7/site-packages/gunicorn/workers/base.py\", line 87, in changed\n<type 'exceptions.TypeError'>: 'NoneType' object is not callable\n```\n\nIt's intermittent; I can sometimes reproduce it several times in a row by touching the same file, and then it stops happening. It certainly doesn't seem to interfere with the reloading behavior.\n\nLine 87 is only `raise SystemExit()`. But line 86 is `os.kill(self.pid, signal.SIGQUIT)`, so I think what's happening is that the interpreter has started to tear down the environment and `SystemExit` has become `None`. (See also [this](http://article.gmane.org/gmane.comp.python.general/387087/) mailing list post.) \n\nAnything I can do about this?\n\n", "before_files": [{"content": "# -*- coding: utf-8 -\n#\n# This file is part of gunicorn released under the MIT license.\n# See the NOTICE for more information.\n\nfrom datetime import datetime\nimport os\nimport signal\nimport sys\nfrom random import randint\n\n\nfrom gunicorn import util\nfrom gunicorn.workers.workertmp import WorkerTmp\nfrom gunicorn.reloader import Reloader\nfrom gunicorn.http.errors import (\n InvalidHeader, InvalidHeaderName, InvalidRequestLine, InvalidRequestMethod,\n InvalidHTTPVersion, LimitRequestLine, LimitRequestHeaders,\n)\nfrom gunicorn.http.errors import InvalidProxyLine, ForbiddenProxyRequest\nfrom gunicorn.http.wsgi import default_environ, Response\nfrom gunicorn.six import MAXSIZE\n\n\nclass Worker(object):\n\n SIGNALS = [getattr(signal, \"SIG%s\" % x)\n for x in \"ABRT HUP QUIT INT TERM USR1 USR2 WINCH CHLD\".split()]\n\n PIPE = []\n\n def __init__(self, age, ppid, sockets, app, timeout, cfg, log):\n \"\"\"\\\n This is called pre-fork so it shouldn't do anything to the\n current process. If there's a need to make process wide\n changes you'll want to do that in ``self.init_process()``.\n \"\"\"\n self.age = age\n self.ppid = ppid\n self.sockets = sockets\n self.app = app\n self.timeout = timeout\n self.cfg = cfg\n self.booted = False\n self.aborted = False\n\n self.nr = 0\n jitter = randint(0, cfg.max_requests_jitter)\n self.max_requests = cfg.max_requests + jitter or MAXSIZE\n self.alive = True\n self.log = log\n self.tmp = WorkerTmp(cfg)\n\n def __str__(self):\n return \"<Worker %s>\" % self.pid\n\n @property\n def pid(self):\n return os.getpid()\n\n def notify(self):\n \"\"\"\\\n Your worker subclass must arrange to have this method called\n once every ``self.timeout`` seconds. If you fail in accomplishing\n this task, the master process will murder your workers.\n \"\"\"\n self.tmp.notify()\n\n def run(self):\n \"\"\"\\\n This is the mainloop of a worker process. You should override\n this method in a subclass to provide the intended behaviour\n for your particular evil schemes.\n \"\"\"\n raise NotImplementedError()\n\n def init_process(self):\n \"\"\"\\\n If you override this method in a subclass, the last statement\n in the function should be to call this method with\n super(MyWorkerClass, self).init_process() so that the ``run()``\n loop is initiated.\n \"\"\"\n\n # start the reloader\n if self.cfg.reload:\n def changed(fname):\n self.log.info(\"Worker reloading: %s modified\", fname)\n os.kill(self.pid, signal.SIGQUIT)\n raise SystemExit()\n Reloader(callback=changed).start()\n\n # set environment' variables\n if self.cfg.env:\n for k, v in self.cfg.env.items():\n os.environ[k] = v\n\n util.set_owner_process(self.cfg.uid, self.cfg.gid)\n\n # Reseed the random number generator\n util.seed()\n\n # For waking ourselves up\n self.PIPE = os.pipe()\n for p in self.PIPE:\n util.set_non_blocking(p)\n util.close_on_exec(p)\n\n # Prevent fd inheritance\n [util.close_on_exec(s) for s in self.sockets]\n util.close_on_exec(self.tmp.fileno())\n\n self.log.close_on_exec()\n\n self.init_signals()\n\n self.wsgi = self.app.wsgi()\n\n self.cfg.post_worker_init(self)\n\n # Enter main run loop\n self.booted = True\n self.run()\n\n def init_signals(self):\n # reset signaling\n [signal.signal(s, signal.SIG_DFL) for s in self.SIGNALS]\n # init new signaling\n signal.signal(signal.SIGQUIT, self.handle_quit)\n signal.signal(signal.SIGTERM, self.handle_exit)\n signal.signal(signal.SIGINT, self.handle_quit)\n signal.signal(signal.SIGWINCH, self.handle_winch)\n signal.signal(signal.SIGUSR1, self.handle_usr1)\n signal.signal(signal.SIGABRT, self.handle_abort)\n\n # Don't let SIGTERM and SIGUSR1 disturb active requests\n # by interrupting system calls\n if hasattr(signal, 'siginterrupt'): # python >= 2.6\n signal.siginterrupt(signal.SIGTERM, False)\n signal.siginterrupt(signal.SIGUSR1, False)\n\n def handle_usr1(self, sig, frame):\n self.log.reopen_files()\n\n def handle_exit(self, sig, frame):\n self.alive = False\n\n def handle_quit(self, sig, frame):\n self.alive = False\n # worker_int callback\n self.cfg.worker_int(self)\n sys.exit(0)\n\n def handle_abort(self, sig, frame):\n self.alive = False\n self.cfg.worker_abort(self)\n sys.exit(1)\n\n def handle_error(self, req, client, addr, exc):\n request_start = datetime.now()\n addr = addr or ('', -1) # unix socket case\n if isinstance(exc, (InvalidRequestLine, InvalidRequestMethod,\n InvalidHTTPVersion, InvalidHeader, InvalidHeaderName,\n LimitRequestLine, LimitRequestHeaders,\n InvalidProxyLine, ForbiddenProxyRequest)):\n\n status_int = 400\n reason = \"Bad Request\"\n\n if isinstance(exc, InvalidRequestLine):\n mesg = \"Invalid Request Line '%s'\" % str(exc)\n elif isinstance(exc, InvalidRequestMethod):\n mesg = \"Invalid Method '%s'\" % str(exc)\n elif isinstance(exc, InvalidHTTPVersion):\n mesg = \"Invalid HTTP Version '%s'\" % str(exc)\n elif isinstance(exc, (InvalidHeaderName, InvalidHeader,)):\n mesg = \"%s\" % str(exc)\n if not req and hasattr(exc, \"req\"):\n req = exc.req # for access log\n elif isinstance(exc, LimitRequestLine):\n mesg = \"%s\" % str(exc)\n elif isinstance(exc, LimitRequestHeaders):\n mesg = \"Error parsing headers: '%s'\" % str(exc)\n elif isinstance(exc, InvalidProxyLine):\n mesg = \"'%s'\" % str(exc)\n elif isinstance(exc, ForbiddenProxyRequest):\n reason = \"Forbidden\"\n mesg = \"Request forbidden\"\n status_int = 403\n\n msg = \"Invalid request from ip={ip}: {error}\"\n self.log.debug(msg.format(ip=addr[0], error=str(exc)))\n else:\n self.log.exception(\"Error handling request\")\n\n status_int = 500\n reason = \"Internal Server Error\"\n mesg = \"\"\n\n if req is not None:\n request_time = datetime.now() - request_start\n environ = default_environ(req, client, self.cfg)\n environ['REMOTE_ADDR'] = addr[0]\n environ['REMOTE_PORT'] = str(addr[1])\n resp = Response(req, client, self.cfg)\n resp.status = \"%s %s\" % (status_int, reason)\n resp.response_length = len(mesg)\n self.log.access(resp, req, environ, request_time)\n\n try:\n util.write_error(client, status_int, reason, mesg)\n except:\n self.log.debug(\"Failed to send error message.\")\n\n def handle_winch(self, sig, fname):\n # Ignore SIGWINCH in worker. Fixes a crash on OpenBSD.\n return\n", "path": "gunicorn/workers/base.py"}], "after_files": [{"content": "# -*- coding: utf-8 -\n#\n# This file is part of gunicorn released under the MIT license.\n# See the NOTICE for more information.\n\nfrom datetime import datetime\nimport os\nimport signal\nimport sys\nfrom random import randint\n\n\nfrom gunicorn import util\nfrom gunicorn.workers.workertmp import WorkerTmp\nfrom gunicorn.reloader import Reloader\nfrom gunicorn.http.errors import (\n InvalidHeader, InvalidHeaderName, InvalidRequestLine, InvalidRequestMethod,\n InvalidHTTPVersion, LimitRequestLine, LimitRequestHeaders,\n)\nfrom gunicorn.http.errors import InvalidProxyLine, ForbiddenProxyRequest\nfrom gunicorn.http.wsgi import default_environ, Response\nfrom gunicorn.six import MAXSIZE\n\n\nclass Worker(object):\n\n SIGNALS = [getattr(signal, \"SIG%s\" % x)\n for x in \"ABRT HUP QUIT INT TERM USR1 USR2 WINCH CHLD\".split()]\n\n PIPE = []\n\n def __init__(self, age, ppid, sockets, app, timeout, cfg, log):\n \"\"\"\\\n This is called pre-fork so it shouldn't do anything to the\n current process. If there's a need to make process wide\n changes you'll want to do that in ``self.init_process()``.\n \"\"\"\n self.age = age\n self.ppid = ppid\n self.sockets = sockets\n self.app = app\n self.timeout = timeout\n self.cfg = cfg\n self.booted = False\n self.aborted = False\n\n self.nr = 0\n jitter = randint(0, cfg.max_requests_jitter)\n self.max_requests = cfg.max_requests + jitter or MAXSIZE\n self.alive = True\n self.log = log\n self.tmp = WorkerTmp(cfg)\n\n def __str__(self):\n return \"<Worker %s>\" % self.pid\n\n @property\n def pid(self):\n return os.getpid()\n\n def notify(self):\n \"\"\"\\\n Your worker subclass must arrange to have this method called\n once every ``self.timeout`` seconds. If you fail in accomplishing\n this task, the master process will murder your workers.\n \"\"\"\n self.tmp.notify()\n\n def run(self):\n \"\"\"\\\n This is the mainloop of a worker process. You should override\n this method in a subclass to provide the intended behaviour\n for your particular evil schemes.\n \"\"\"\n raise NotImplementedError()\n\n def init_process(self):\n \"\"\"\\\n If you override this method in a subclass, the last statement\n in the function should be to call this method with\n super(MyWorkerClass, self).init_process() so that the ``run()``\n loop is initiated.\n \"\"\"\n\n # start the reloader\n if self.cfg.reload:\n def changed(fname):\n self.log.info(\"Worker reloading: %s modified\", fname)\n os.kill(self.pid, signal.SIGQUIT)\n Reloader(callback=changed).start()\n\n # set environment' variables\n if self.cfg.env:\n for k, v in self.cfg.env.items():\n os.environ[k] = v\n\n util.set_owner_process(self.cfg.uid, self.cfg.gid)\n\n # Reseed the random number generator\n util.seed()\n\n # For waking ourselves up\n self.PIPE = os.pipe()\n for p in self.PIPE:\n util.set_non_blocking(p)\n util.close_on_exec(p)\n\n # Prevent fd inheritance\n [util.close_on_exec(s) for s in self.sockets]\n util.close_on_exec(self.tmp.fileno())\n\n self.log.close_on_exec()\n\n self.init_signals()\n\n self.wsgi = self.app.wsgi()\n\n self.cfg.post_worker_init(self)\n\n # Enter main run loop\n self.booted = True\n self.run()\n\n def init_signals(self):\n # reset signaling\n [signal.signal(s, signal.SIG_DFL) for s in self.SIGNALS]\n # init new signaling\n signal.signal(signal.SIGQUIT, self.handle_quit)\n signal.signal(signal.SIGTERM, self.handle_exit)\n signal.signal(signal.SIGINT, self.handle_quit)\n signal.signal(signal.SIGWINCH, self.handle_winch)\n signal.signal(signal.SIGUSR1, self.handle_usr1)\n signal.signal(signal.SIGABRT, self.handle_abort)\n\n # Don't let SIGTERM and SIGUSR1 disturb active requests\n # by interrupting system calls\n if hasattr(signal, 'siginterrupt'): # python >= 2.6\n signal.siginterrupt(signal.SIGTERM, False)\n signal.siginterrupt(signal.SIGUSR1, False)\n\n def handle_usr1(self, sig, frame):\n self.log.reopen_files()\n\n def handle_exit(self, sig, frame):\n self.alive = False\n\n def handle_quit(self, sig, frame):\n self.alive = False\n # worker_int callback\n self.cfg.worker_int(self)\n sys.exit(0)\n\n def handle_abort(self, sig, frame):\n self.alive = False\n self.cfg.worker_abort(self)\n sys.exit(1)\n\n def handle_error(self, req, client, addr, exc):\n request_start = datetime.now()\n addr = addr or ('', -1) # unix socket case\n if isinstance(exc, (InvalidRequestLine, InvalidRequestMethod,\n InvalidHTTPVersion, InvalidHeader, InvalidHeaderName,\n LimitRequestLine, LimitRequestHeaders,\n InvalidProxyLine, ForbiddenProxyRequest)):\n\n status_int = 400\n reason = \"Bad Request\"\n\n if isinstance(exc, InvalidRequestLine):\n mesg = \"Invalid Request Line '%s'\" % str(exc)\n elif isinstance(exc, InvalidRequestMethod):\n mesg = \"Invalid Method '%s'\" % str(exc)\n elif isinstance(exc, InvalidHTTPVersion):\n mesg = \"Invalid HTTP Version '%s'\" % str(exc)\n elif isinstance(exc, (InvalidHeaderName, InvalidHeader,)):\n mesg = \"%s\" % str(exc)\n if not req and hasattr(exc, \"req\"):\n req = exc.req # for access log\n elif isinstance(exc, LimitRequestLine):\n mesg = \"%s\" % str(exc)\n elif isinstance(exc, LimitRequestHeaders):\n mesg = \"Error parsing headers: '%s'\" % str(exc)\n elif isinstance(exc, InvalidProxyLine):\n mesg = \"'%s'\" % str(exc)\n elif isinstance(exc, ForbiddenProxyRequest):\n reason = \"Forbidden\"\n mesg = \"Request forbidden\"\n status_int = 403\n\n msg = \"Invalid request from ip={ip}: {error}\"\n self.log.debug(msg.format(ip=addr[0], error=str(exc)))\n else:\n self.log.exception(\"Error handling request\")\n\n status_int = 500\n reason = \"Internal Server Error\"\n mesg = \"\"\n\n if req is not None:\n request_time = datetime.now() - request_start\n environ = default_environ(req, client, self.cfg)\n environ['REMOTE_ADDR'] = addr[0]\n environ['REMOTE_PORT'] = str(addr[1])\n resp = Response(req, client, self.cfg)\n resp.status = \"%s %s\" % (status_int, reason)\n resp.response_length = len(mesg)\n self.log.access(resp, req, environ, request_time)\n\n try:\n util.write_error(client, status_int, reason, mesg)\n except:\n self.log.debug(\"Failed to send error message.\")\n\n def handle_winch(self, sig, fname):\n # Ignore SIGWINCH in worker. Fixes a crash on OpenBSD.\n return\n", "path": "gunicorn/workers/base.py"}]} | 2,764 | 100 |
gh_patches_debug_32772 | rasdani/github-patches | git_diff | pwndbg__pwndbg-1094 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add support for telescope to show lower addresses instead of higher addresses
I often need to see a few 8 byte values above (and including) a particular address. For example, if I have a heap chunk at `0x557bb502e2e0`, this is how telescope currently works:
```
pwndbg> telescope 0x557bb502e2e0
00:0000│ 0x557bb502e2e0 ◂— 0x0
01:0008│ 0x557bb502e2e8 ◂— 0x21 /* '!' */
02:0010│ 0x557bb502e2f0 ◂— 0x20 /* ' ' */
03:0018│ 0x557bb502e2f8 ◂— 0x1
04:0020│ 0x557bb502e300 —▸ 0x557bb502e310 ◂— 'BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB'
05:0028│ 0x557bb502e308 ◂— 0x31 /* '1' */
06:0030│ 0x557bb502e310 ◂— 'BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB'
07:0038│ 0x557bb502e318 ◂— 'BBBBBBBBBBBBBBBBBBBBBBBB'
```
Now I may have expected that an overflow should have overwritten the first few pointers of this heap chunk, but for some reason it didn't, and I suspect I'm a few bytes off, maybe some small multiple of 8. I'd like to check right above this chunk (lower addresses), so currently I can do something like this (or similarly with `x/xg`):
```
pwndbg> telescope 0x557bb502e2e0-0x8*4 5
00:0000│ rax rdi 0x557bb502e2c0 ◂— 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
... ↓ 3 skipped
04:0020│ 0x557bb502e2e0 ◂— 0x0
```
This works, but I think it's a verbose and inconvenient way to do it. If I want to look farther back, I need to change both the `4` and the `5` to make it do the right thing, as opposed to changing just one number.
It'd be nice to either have a flag `telescope -r` or a new command `rtelescope` that take an address and an optional count and prints the values at lower memory addresses. The example above would just be `telescope -r 0x557bb502e2e0 5`, which is much easier.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pwndbg/commands/telescope.py`
Content:
```
1 """
2 Prints out pointer chains starting at some address in memory.
3
4 Generally used to print out the stack or register values.
5 """
6
7 import argparse
8 import collections
9 import math
10
11 import pwndbg.arch
12 import pwndbg.chain
13 import pwndbg.color.telescope as T
14 import pwndbg.color.theme as theme
15 import pwndbg.commands
16 import pwndbg.config
17 import pwndbg.memory
18 import pwndbg.regs
19 import pwndbg.typeinfo
20
21 telescope_lines = pwndbg.config.Parameter('telescope-lines', 8, 'number of lines to printed by the telescope command')
22 skip_repeating_values = pwndbg.config.Parameter('telescope-skip-repeating-val', True,
23 'whether to skip repeating values of the telescope command')
24 skip_repeating_values_minimum = pwndbg.config.Parameter('telescope-skip-repeating-val-minimum', 3,
25 'minimum amount of repeated values before skipping lines')
26
27 offset_separator = theme.Parameter('telescope-offset-separator', '│', 'offset separator of the telescope command')
28 offset_delimiter = theme.Parameter('telescope-offset-delimiter', ':', 'offset delimiter of the telescope command')
29 repeating_marker = theme.Parameter('telescope-repeating-marker', '... ↓',
30 'repeating values marker of the telescope command')
31
32
33 parser = argparse.ArgumentParser(description="""
34 Recursively dereferences pointers starting at the specified address
35 ($sp by default)
36 """)
37 parser.add_argument("address", nargs="?", default=None, type=int, help="The address to telescope at.")
38 parser.add_argument("count", nargs="?", default=telescope_lines, type=int, help="The number of lines to show.")
39 @pwndbg.commands.ArgparsedCommand(parser)
40 @pwndbg.commands.OnlyWhenRunning
41 def telescope(address=None, count=telescope_lines, to_string=False):
42 """
43 Recursively dereferences pointers starting at the specified address
44 ($sp by default)
45 """
46 ptrsize = pwndbg.typeinfo.ptrsize
47 if telescope.repeat:
48 address = telescope.last_address + ptrsize
49 telescope.offset += 1
50 else:
51 telescope.offset = 0
52
53 address = int(address if address else pwndbg.regs.sp) & pwndbg.arch.ptrmask
54 count = max(int(count), 1) & pwndbg.arch.ptrmask
55 delimiter = T.delimiter(offset_delimiter)
56 separator = T.separator(offset_separator)
57
58 # Allow invocation of "telescope 20" to dump 20 bytes at the stack pointer
59 if address < pwndbg.memory.MMAP_MIN_ADDR and not pwndbg.memory.peek(address):
60 count = address
61 address = pwndbg.regs.sp
62
63 # Allow invocation of "telescope a b" to dump all bytes from A to B
64 if int(address) <= int(count):
65 # adjust count if it is an address. use ceil division as count is number of
66 # ptrsize values and we don't want to strip out a value if dest is unaligned
67 count -= address
68 count = max(math.ceil(count / ptrsize), 1)
69
70 reg_values = collections.defaultdict(lambda: [])
71 for reg in pwndbg.regs.common:
72 reg_values[pwndbg.regs[reg]].append(reg)
73 # address = pwndbg.memory.poi(pwndbg.typeinfo.ppvoid, address)
74
75 start = address
76 stop = address + (count*ptrsize)
77 step = ptrsize
78
79 # Find all registers which show up in the trace
80 regs = {}
81 for i in range(start, stop, step):
82 values = list(reg_values[i])
83
84 for width in range(1, pwndbg.arch.ptrsize):
85 values.extend('%s-%i' % (r,width) for r in reg_values[i+width])
86
87 regs[i] = ' '.join(values)
88
89 # Find the longest set of register information
90 if regs:
91 longest_regs = max(map(len, regs.values()))
92 else:
93 longest_regs = 0
94
95 # Print everything out
96 result = []
97 last = None
98 collapse_buffer = []
99 skipped_padding = 2 + len(offset_delimiter) + 4 + len(offset_separator) + 1 + longest_regs + 1 - len(repeating_marker)
100
101 # Collapse repeating values exceeding minimum delta.
102 def collapse_repeating_values():
103 # The first line was already printed, hence increment by 1
104 if collapse_buffer and len(collapse_buffer) + 1 >= skip_repeating_values_minimum:
105 result.append(T.repeating_marker('%s%s%i skipped' % (repeating_marker, ' ' * skipped_padding, len(collapse_buffer))))
106 else:
107 result.extend(collapse_buffer)
108 collapse_buffer.clear()
109
110 for i, addr in enumerate(range(start, stop, step)):
111 if not pwndbg.memory.peek(addr):
112 collapse_repeating_values()
113 result.append("<Could not read memory at %#x>" % addr)
114 break
115
116 line = ' '.join((T.offset("%02x%s%04x%s" % (i + telescope.offset, delimiter,
117 addr - start + (telescope.offset * ptrsize), separator)),
118 T.register(regs[addr].ljust(longest_regs)),
119 pwndbg.chain.format(addr)))
120
121 # Buffer repeating values.
122 if skip_repeating_values:
123 value = pwndbg.memory.pvoid(addr)
124 if last == value:
125 collapse_buffer.append(line)
126 continue
127 collapse_repeating_values()
128 last = value
129
130 result.append(line)
131
132 collapse_repeating_values()
133 telescope.offset += i
134 telescope.last_address = addr
135
136 if not to_string:
137 print('\n'.join(result))
138
139 return result
140
141
142 parser = argparse.ArgumentParser(description='dereferences on stack data with specified count and offset.')
143 parser.add_argument('count', nargs='?', default=8, type=int,
144 help='number of element to dump')
145 parser.add_argument('offset', nargs='?', default=0, type=int,
146 help='Element offset from $sp (support negative offset)')
147
148
149 @pwndbg.commands.ArgparsedCommand(parser)
150 @pwndbg.commands.OnlyWhenRunning
151 def stack(count, offset):
152 ptrsize = pwndbg.typeinfo.ptrsize
153 telescope.repeat = stack.repeat
154 telescope(address=pwndbg.regs.sp + offset * ptrsize, count=count)
155
156
157 telescope.last_address = 0
158 telescope.offset = 0
159
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pwndbg/commands/telescope.py b/pwndbg/commands/telescope.py
--- a/pwndbg/commands/telescope.py
+++ b/pwndbg/commands/telescope.py
@@ -36,14 +36,18 @@
""")
parser.add_argument("address", nargs="?", default=None, type=int, help="The address to telescope at.")
parser.add_argument("count", nargs="?", default=telescope_lines, type=int, help="The number of lines to show.")
+parser.add_argument("-r", "--reverse", dest="reverse", action='store_true', default=False,
+ help='Show <count> previous addresses instead of next ones')
+
+
@pwndbg.commands.ArgparsedCommand(parser)
@pwndbg.commands.OnlyWhenRunning
-def telescope(address=None, count=telescope_lines, to_string=False):
+def telescope(address=None, count=telescope_lines, to_string=False, reverse=False):
"""
Recursively dereferences pointers starting at the specified address
($sp by default)
"""
- ptrsize = pwndbg.typeinfo.ptrsize
+ ptrsize = pwndbg.typeinfo.ptrsize
if telescope.repeat:
address = telescope.last_address + ptrsize
telescope.offset += 1
@@ -51,10 +55,14 @@
telescope.offset = 0
address = int(address if address else pwndbg.regs.sp) & pwndbg.arch.ptrmask
- count = max(int(count), 1) & pwndbg.arch.ptrmask
+ count = max(int(count), 1) & pwndbg.arch.ptrmask
delimiter = T.delimiter(offset_delimiter)
separator = T.separator(offset_separator)
+ # Allow invocation of telescope -r to dump previous addresses
+ if reverse:
+ address -= (count - 1) * ptrsize
+
# Allow invocation of "telescope 20" to dump 20 bytes at the stack pointer
if address < pwndbg.memory.MMAP_MIN_ADDR and not pwndbg.memory.peek(address):
count = address
| {"golden_diff": "diff --git a/pwndbg/commands/telescope.py b/pwndbg/commands/telescope.py\n--- a/pwndbg/commands/telescope.py\n+++ b/pwndbg/commands/telescope.py\n@@ -36,14 +36,18 @@\n \"\"\")\n parser.add_argument(\"address\", nargs=\"?\", default=None, type=int, help=\"The address to telescope at.\")\n parser.add_argument(\"count\", nargs=\"?\", default=telescope_lines, type=int, help=\"The number of lines to show.\")\n+parser.add_argument(\"-r\", \"--reverse\", dest=\"reverse\", action='store_true', default=False,\n+ help='Show <count> previous addresses instead of next ones')\n+\n+\n @pwndbg.commands.ArgparsedCommand(parser)\n @pwndbg.commands.OnlyWhenRunning\n-def telescope(address=None, count=telescope_lines, to_string=False):\n+def telescope(address=None, count=telescope_lines, to_string=False, reverse=False):\n \"\"\"\n Recursively dereferences pointers starting at the specified address\n ($sp by default)\n \"\"\"\n- ptrsize = pwndbg.typeinfo.ptrsize\n+ ptrsize = pwndbg.typeinfo.ptrsize\n if telescope.repeat:\n address = telescope.last_address + ptrsize\n telescope.offset += 1\n@@ -51,10 +55,14 @@\n telescope.offset = 0\n \n address = int(address if address else pwndbg.regs.sp) & pwndbg.arch.ptrmask\n- count = max(int(count), 1) & pwndbg.arch.ptrmask\n+ count = max(int(count), 1) & pwndbg.arch.ptrmask\n delimiter = T.delimiter(offset_delimiter)\n separator = T.separator(offset_separator)\n \n+ # Allow invocation of telescope -r to dump previous addresses\n+ if reverse:\n+ address -= (count - 1) * ptrsize\n+\n # Allow invocation of \"telescope 20\" to dump 20 bytes at the stack pointer\n if address < pwndbg.memory.MMAP_MIN_ADDR and not pwndbg.memory.peek(address):\n count = address\n", "issue": "Add support for telescope to show lower addresses instead of higher addresses\nI often need to see a few 8 byte values above (and including) a particular address. For example, if I have a heap chunk at `0x557bb502e2e0`, this is how telescope currently works:\r\n```\r\npwndbg> telescope 0x557bb502e2e0\r\n00:0000\u2502 0x557bb502e2e0 \u25c2\u2014 0x0\r\n01:0008\u2502 0x557bb502e2e8 \u25c2\u2014 0x21 /* '!' */\r\n02:0010\u2502 0x557bb502e2f0 \u25c2\u2014 0x20 /* ' ' */\r\n03:0018\u2502 0x557bb502e2f8 \u25c2\u2014 0x1\r\n04:0020\u2502 0x557bb502e300 \u2014\u25b8 0x557bb502e310 \u25c2\u2014 'BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB'\r\n05:0028\u2502 0x557bb502e308 \u25c2\u2014 0x31 /* '1' */\r\n06:0030\u2502 0x557bb502e310 \u25c2\u2014 'BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB'\r\n07:0038\u2502 0x557bb502e318 \u25c2\u2014 'BBBBBBBBBBBBBBBBBBBBBBBB'\r\n```\r\nNow I may have expected that an overflow should have overwritten the first few pointers of this heap chunk, but for some reason it didn't, and I suspect I'm a few bytes off, maybe some small multiple of 8. I'd like to check right above this chunk (lower addresses), so currently I can do something like this (or similarly with `x/xg`):\r\n```\r\npwndbg> telescope 0x557bb502e2e0-0x8*4 5\r\n00:0000\u2502 rax rdi 0x557bb502e2c0 \u25c2\u2014 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'\r\n... \u2193 3 skipped\r\n04:0020\u2502 0x557bb502e2e0 \u25c2\u2014 0x0\r\n```\r\n\r\nThis works, but I think it's a verbose and inconvenient way to do it. If I want to look farther back, I need to change both the `4` and the `5` to make it do the right thing, as opposed to changing just one number.\r\n\r\nIt'd be nice to either have a flag `telescope -r` or a new command `rtelescope` that take an address and an optional count and prints the values at lower memory addresses. The example above would just be `telescope -r 0x557bb502e2e0 5`, which is much easier.\n", "before_files": [{"content": "\"\"\"\nPrints out pointer chains starting at some address in memory.\n\nGenerally used to print out the stack or register values.\n\"\"\"\n\nimport argparse\nimport collections\nimport math\n\nimport pwndbg.arch\nimport pwndbg.chain\nimport pwndbg.color.telescope as T\nimport pwndbg.color.theme as theme\nimport pwndbg.commands\nimport pwndbg.config\nimport pwndbg.memory\nimport pwndbg.regs\nimport pwndbg.typeinfo\n\ntelescope_lines = pwndbg.config.Parameter('telescope-lines', 8, 'number of lines to printed by the telescope command')\nskip_repeating_values = pwndbg.config.Parameter('telescope-skip-repeating-val', True,\n 'whether to skip repeating values of the telescope command')\nskip_repeating_values_minimum = pwndbg.config.Parameter('telescope-skip-repeating-val-minimum', 3,\n 'minimum amount of repeated values before skipping lines')\n\noffset_separator = theme.Parameter('telescope-offset-separator', '\u2502', 'offset separator of the telescope command')\noffset_delimiter = theme.Parameter('telescope-offset-delimiter', ':', 'offset delimiter of the telescope command')\nrepeating_marker = theme.Parameter('telescope-repeating-marker', '... \u2193',\n 'repeating values marker of the telescope command')\n\n\nparser = argparse.ArgumentParser(description=\"\"\"\n Recursively dereferences pointers starting at the specified address\n ($sp by default)\n \"\"\")\nparser.add_argument(\"address\", nargs=\"?\", default=None, type=int, help=\"The address to telescope at.\")\nparser.add_argument(\"count\", nargs=\"?\", default=telescope_lines, type=int, help=\"The number of lines to show.\")\[email protected](parser)\[email protected]\ndef telescope(address=None, count=telescope_lines, to_string=False):\n \"\"\"\n Recursively dereferences pointers starting at the specified address\n ($sp by default)\n \"\"\"\n ptrsize = pwndbg.typeinfo.ptrsize\n if telescope.repeat:\n address = telescope.last_address + ptrsize\n telescope.offset += 1\n else:\n telescope.offset = 0\n\n address = int(address if address else pwndbg.regs.sp) & pwndbg.arch.ptrmask\n count = max(int(count), 1) & pwndbg.arch.ptrmask\n delimiter = T.delimiter(offset_delimiter)\n separator = T.separator(offset_separator)\n\n # Allow invocation of \"telescope 20\" to dump 20 bytes at the stack pointer\n if address < pwndbg.memory.MMAP_MIN_ADDR and not pwndbg.memory.peek(address):\n count = address\n address = pwndbg.regs.sp\n\n # Allow invocation of \"telescope a b\" to dump all bytes from A to B\n if int(address) <= int(count):\n # adjust count if it is an address. use ceil division as count is number of\n # ptrsize values and we don't want to strip out a value if dest is unaligned\n count -= address\n count = max(math.ceil(count / ptrsize), 1)\n\n reg_values = collections.defaultdict(lambda: [])\n for reg in pwndbg.regs.common:\n reg_values[pwndbg.regs[reg]].append(reg)\n # address = pwndbg.memory.poi(pwndbg.typeinfo.ppvoid, address)\n\n start = address\n stop = address + (count*ptrsize)\n step = ptrsize\n\n # Find all registers which show up in the trace\n regs = {}\n for i in range(start, stop, step):\n values = list(reg_values[i])\n\n for width in range(1, pwndbg.arch.ptrsize):\n values.extend('%s-%i' % (r,width) for r in reg_values[i+width])\n\n regs[i] = ' '.join(values)\n\n # Find the longest set of register information\n if regs:\n longest_regs = max(map(len, regs.values()))\n else:\n longest_regs = 0\n\n # Print everything out\n result = []\n last = None\n collapse_buffer = []\n skipped_padding = 2 + len(offset_delimiter) + 4 + len(offset_separator) + 1 + longest_regs + 1 - len(repeating_marker)\n\n # Collapse repeating values exceeding minimum delta.\n def collapse_repeating_values():\n # The first line was already printed, hence increment by 1\n if collapse_buffer and len(collapse_buffer) + 1 >= skip_repeating_values_minimum:\n result.append(T.repeating_marker('%s%s%i skipped' % (repeating_marker, ' ' * skipped_padding, len(collapse_buffer))))\n else:\n result.extend(collapse_buffer)\n collapse_buffer.clear()\n\n for i, addr in enumerate(range(start, stop, step)):\n if not pwndbg.memory.peek(addr):\n collapse_repeating_values()\n result.append(\"<Could not read memory at %#x>\" % addr)\n break\n\n line = ' '.join((T.offset(\"%02x%s%04x%s\" % (i + telescope.offset, delimiter,\n addr - start + (telescope.offset * ptrsize), separator)),\n T.register(regs[addr].ljust(longest_regs)),\n pwndbg.chain.format(addr)))\n\n # Buffer repeating values.\n if skip_repeating_values:\n value = pwndbg.memory.pvoid(addr)\n if last == value:\n collapse_buffer.append(line)\n continue\n collapse_repeating_values()\n last = value\n\n result.append(line)\n\n collapse_repeating_values()\n telescope.offset += i\n telescope.last_address = addr\n\n if not to_string:\n print('\\n'.join(result))\n\n return result\n\n\nparser = argparse.ArgumentParser(description='dereferences on stack data with specified count and offset.')\nparser.add_argument('count', nargs='?', default=8, type=int,\n help='number of element to dump')\nparser.add_argument('offset', nargs='?', default=0, type=int,\n help='Element offset from $sp (support negative offset)')\n\n\[email protected](parser)\[email protected]\ndef stack(count, offset):\n ptrsize = pwndbg.typeinfo.ptrsize\n telescope.repeat = stack.repeat\n telescope(address=pwndbg.regs.sp + offset * ptrsize, count=count)\n\n\ntelescope.last_address = 0\ntelescope.offset = 0\n", "path": "pwndbg/commands/telescope.py"}], "after_files": [{"content": "\"\"\"\nPrints out pointer chains starting at some address in memory.\n\nGenerally used to print out the stack or register values.\n\"\"\"\n\nimport argparse\nimport collections\nimport math\n\nimport pwndbg.arch\nimport pwndbg.chain\nimport pwndbg.color.telescope as T\nimport pwndbg.color.theme as theme\nimport pwndbg.commands\nimport pwndbg.config\nimport pwndbg.memory\nimport pwndbg.regs\nimport pwndbg.typeinfo\n\ntelescope_lines = pwndbg.config.Parameter('telescope-lines', 8, 'number of lines to printed by the telescope command')\nskip_repeating_values = pwndbg.config.Parameter('telescope-skip-repeating-val', True,\n 'whether to skip repeating values of the telescope command')\nskip_repeating_values_minimum = pwndbg.config.Parameter('telescope-skip-repeating-val-minimum', 3,\n 'minimum amount of repeated values before skipping lines')\n\noffset_separator = theme.Parameter('telescope-offset-separator', '\u2502', 'offset separator of the telescope command')\noffset_delimiter = theme.Parameter('telescope-offset-delimiter', ':', 'offset delimiter of the telescope command')\nrepeating_marker = theme.Parameter('telescope-repeating-marker', '... \u2193',\n 'repeating values marker of the telescope command')\n\n\nparser = argparse.ArgumentParser(description=\"\"\"\n Recursively dereferences pointers starting at the specified address\n ($sp by default)\n \"\"\")\nparser.add_argument(\"address\", nargs=\"?\", default=None, type=int, help=\"The address to telescope at.\")\nparser.add_argument(\"count\", nargs=\"?\", default=telescope_lines, type=int, help=\"The number of lines to show.\")\nparser.add_argument(\"-r\", \"--reverse\", dest=\"reverse\", action='store_true', default=False,\n help='Show <count> previous addresses instead of next ones')\n\n\[email protected](parser)\[email protected]\ndef telescope(address=None, count=telescope_lines, to_string=False, reverse=False):\n \"\"\"\n Recursively dereferences pointers starting at the specified address\n ($sp by default)\n \"\"\"\n ptrsize = pwndbg.typeinfo.ptrsize\n if telescope.repeat:\n address = telescope.last_address + ptrsize\n telescope.offset += 1\n else:\n telescope.offset = 0\n\n address = int(address if address else pwndbg.regs.sp) & pwndbg.arch.ptrmask\n count = max(int(count), 1) & pwndbg.arch.ptrmask\n delimiter = T.delimiter(offset_delimiter)\n separator = T.separator(offset_separator)\n\n # Allow invocation of telescope -r to dump previous addresses\n if reverse:\n address -= (count - 1) * ptrsize\n\n # Allow invocation of \"telescope 20\" to dump 20 bytes at the stack pointer\n if address < pwndbg.memory.MMAP_MIN_ADDR and not pwndbg.memory.peek(address):\n count = address\n address = pwndbg.regs.sp\n\n # Allow invocation of \"telescope a b\" to dump all bytes from A to B\n if int(address) <= int(count):\n # adjust count if it is an address. use ceil division as count is number of\n # ptrsize values and we don't want to strip out a value if dest is unaligned\n count -= address\n count = max(math.ceil(count / ptrsize), 1)\n\n reg_values = collections.defaultdict(lambda: [])\n for reg in pwndbg.regs.common:\n reg_values[pwndbg.regs[reg]].append(reg)\n # address = pwndbg.memory.poi(pwndbg.typeinfo.ppvoid, address)\n\n start = address\n stop = address + (count*ptrsize)\n step = ptrsize\n\n # Find all registers which show up in the trace\n regs = {}\n for i in range(start, stop, step):\n values = list(reg_values[i])\n\n for width in range(1, pwndbg.arch.ptrsize):\n values.extend('%s-%i' % (r,width) for r in reg_values[i+width])\n\n regs[i] = ' '.join(values)\n\n # Find the longest set of register information\n if regs:\n longest_regs = max(map(len, regs.values()))\n else:\n longest_regs = 0\n\n # Print everything out\n result = []\n last = None\n collapse_buffer = []\n skipped_padding = 2 + len(offset_delimiter) + 4 + len(offset_separator) + 1 + longest_regs + 1 - len(repeating_marker)\n\n # Collapse repeating values exceeding minimum delta.\n def collapse_repeating_values():\n # The first line was already printed, hence increment by 1\n if collapse_buffer and len(collapse_buffer) + 1 >= skip_repeating_values_minimum:\n result.append(T.repeating_marker('%s%s%i skipped' % (repeating_marker, ' ' * skipped_padding, len(collapse_buffer))))\n else:\n result.extend(collapse_buffer)\n collapse_buffer.clear()\n\n for i, addr in enumerate(range(start, stop, step)):\n if not pwndbg.memory.peek(addr):\n collapse_repeating_values()\n result.append(\"<Could not read memory at %#x>\" % addr)\n break\n\n line = ' '.join((T.offset(\"%02x%s%04x%s\" % (i + telescope.offset, delimiter,\n addr - start + (telescope.offset * ptrsize), separator)),\n T.register(regs[addr].ljust(longest_regs)),\n pwndbg.chain.format(addr)))\n\n # Buffer repeating values.\n if skip_repeating_values:\n value = pwndbg.memory.pvoid(addr)\n if last == value:\n collapse_buffer.append(line)\n continue\n collapse_repeating_values()\n last = value\n\n result.append(line)\n\n collapse_repeating_values()\n telescope.offset += i\n telescope.last_address = addr\n\n if not to_string:\n print('\\n'.join(result))\n\n return result\n\n\nparser = argparse.ArgumentParser(description='dereferences on stack data with specified count and offset.')\nparser.add_argument('count', nargs='?', default=8, type=int,\n help='number of element to dump')\nparser.add_argument('offset', nargs='?', default=0, type=int,\n help='Element offset from $sp (support negative offset)')\n\n\[email protected](parser)\[email protected]\ndef stack(count, offset):\n ptrsize = pwndbg.typeinfo.ptrsize\n telescope.repeat = stack.repeat\n telescope(address=pwndbg.regs.sp + offset * ptrsize, count=count)\n\n\ntelescope.last_address = 0\ntelescope.offset = 0\n", "path": "pwndbg/commands/telescope.py"}]} | 2,706 | 465 |
gh_patches_debug_11484 | rasdani/github-patches | git_diff | lightly-ai__lightly-1536 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Tutorial 3: small typo "strcture" -> "structure"
In [this section](https://docs.lightly.ai/self-supervised-learning/tutorials/package/tutorial_simclr_clothing.html#setup-data-augmentations-and-loaders), the first paragraph has the following typo:
"strcture" -> "structure"
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/source/tutorials_source/package/tutorial_simclr_clothing.py`
Content:
```
1 """
2 .. _lightly-simclr-tutorial-3:
3
4 Tutorial 3: Train SimCLR on Clothing
5 ==============================================
6
7 In this tutorial, we will train a SimCLR model using lightly. The model,
8 augmentations and training procedure is from
9 `A Simple Framework for Contrastive Learning of Visual Representations <https://arxiv.org/abs/2002.05709>`_.
10
11 The paper explores a rather simple training procedure for contrastive learning.
12 Since we use the typical contrastive learning loss based on NCE the method
13 greatly benefits from having larger batch sizes. In this example, we use a batch
14 size of 256 and paired with the input resolution per image of 64x64 pixels and
15 a resnet-18 model this example requires 16GB of GPU memory.
16
17 We use the
18 `clothing dataset from Alex Grigorev <https://github.com/alexeygrigorev/clothing-dataset>`_
19 for this tutorial.
20
21 In this tutorial you will learn:
22
23 - How to create a SimCLR model
24
25 - How to generate image representations
26
27 - How different augmentations impact the learned representations
28
29 """
30
31 # %%
32 # Imports
33 # -------
34 #
35 # Import the Python frameworks we need for this tutorial.
36 import os
37
38 import matplotlib.pyplot as plt
39 import numpy as np
40 import pytorch_lightning as pl
41 import torch
42 import torch.nn as nn
43 import torchvision
44 from PIL import Image
45 from sklearn.neighbors import NearestNeighbors
46 from sklearn.preprocessing import normalize
47
48 from lightly.data import LightlyDataset
49 from lightly.transforms import SimCLRTransform, utils
50
51 # %%
52 # Configuration
53 # -------------
54 #
55 # We set some configuration parameters for our experiment.
56 # Feel free to change them and analyze the effect.
57 #
58 # The default configuration with a batch size of 256 and input resolution of 128
59 # requires 6GB of GPU memory.
60 num_workers = 8
61 batch_size = 256
62 seed = 1
63 max_epochs = 20
64 input_size = 128
65 num_ftrs = 32
66
67 # %%
68 # Let's set the seed for our experiments
69 pl.seed_everything(seed)
70
71 # %%
72 # Make sure `path_to_data` points to the downloaded clothing dataset.
73 # You can download it using
74 # `git clone https://github.com/alexeygrigorev/clothing-dataset.git`
75 path_to_data = "/datasets/clothing-dataset/images"
76
77
78 # %%
79 # Setup data augmentations and loaders
80 # ------------------------------------
81 #
82 # The images from the dataset have been taken from above when the clothing was
83 # on a table, bed or floor. Therefore, we can make use of additional augmentations
84 # such as vertical flip or random rotation (90 degrees).
85 # By adding these augmentations we learn our model invariance regarding the
86 # orientation of the clothing piece. E.g. we don't care if a shirt is upside down
87 # but more about the strcture which make it a shirt.
88 #
89 # You can learn more about the different augmentations and learned invariances
90 # here: :ref:`lightly-advanced`.
91 transform = SimCLRTransform(input_size=input_size, vf_prob=0.5, rr_prob=0.5)
92
93 # We create a torchvision transformation for embedding the dataset after
94 # training
95 test_transform = torchvision.transforms.Compose(
96 [
97 torchvision.transforms.Resize((input_size, input_size)),
98 torchvision.transforms.ToTensor(),
99 torchvision.transforms.Normalize(
100 mean=utils.IMAGENET_NORMALIZE["mean"],
101 std=utils.IMAGENET_NORMALIZE["std"],
102 ),
103 ]
104 )
105
106 dataset_train_simclr = LightlyDataset(input_dir=path_to_data, transform=transform)
107
108 dataset_test = LightlyDataset(input_dir=path_to_data, transform=test_transform)
109
110 dataloader_train_simclr = torch.utils.data.DataLoader(
111 dataset_train_simclr,
112 batch_size=batch_size,
113 shuffle=True,
114 drop_last=True,
115 num_workers=num_workers,
116 )
117
118 dataloader_test = torch.utils.data.DataLoader(
119 dataset_test,
120 batch_size=batch_size,
121 shuffle=False,
122 drop_last=False,
123 num_workers=num_workers,
124 )
125
126 # %%
127 # Create the SimCLR Model
128 # -----------------------
129 # Now we create the SimCLR model. We implement it as a PyTorch Lightning Module
130 # and use a ResNet-18 backbone from Torchvision. Lightly provides implementations
131 # of the SimCLR projection head and loss function in the `SimCLRProjectionHead`
132 # and `NTXentLoss` classes. We can simply import them and combine the building
133 # blocks in the module.
134
135 from lightly.loss import NTXentLoss
136 from lightly.models.modules.heads import SimCLRProjectionHead
137
138
139 class SimCLRModel(pl.LightningModule):
140 def __init__(self):
141 super().__init__()
142
143 # create a ResNet backbone and remove the classification head
144 resnet = torchvision.models.resnet18()
145 self.backbone = nn.Sequential(*list(resnet.children())[:-1])
146
147 hidden_dim = resnet.fc.in_features
148 self.projection_head = SimCLRProjectionHead(hidden_dim, hidden_dim, 128)
149
150 self.criterion = NTXentLoss()
151
152 def forward(self, x):
153 h = self.backbone(x).flatten(start_dim=1)
154 z = self.projection_head(h)
155 return z
156
157 def training_step(self, batch, batch_idx):
158 (x0, x1), _, _ = batch
159 z0 = self.forward(x0)
160 z1 = self.forward(x1)
161 loss = self.criterion(z0, z1)
162 self.log("train_loss_ssl", loss)
163 return loss
164
165 def configure_optimizers(self):
166 optim = torch.optim.SGD(
167 self.parameters(), lr=6e-2, momentum=0.9, weight_decay=5e-4
168 )
169 scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optim, max_epochs)
170 return [optim], [scheduler]
171
172
173 # %%
174 # Train the module using the PyTorch Lightning Trainer on a single GPU.
175
176 model = SimCLRModel()
177 trainer = pl.Trainer(max_epochs=max_epochs, devices=1, accelerator="gpu")
178 trainer.fit(model, dataloader_train_simclr)
179
180 # %%
181 # Next we create a helper function to generate embeddings
182 # from our test images using the model we just trained.
183 # Note that only the backbone is needed to generate embeddings,
184 # the projection head is only required for the training.
185 # Make sure to put the model into eval mode for this part!
186
187
188 def generate_embeddings(model, dataloader):
189 """Generates representations for all images in the dataloader with
190 the given model
191 """
192
193 embeddings = []
194 filenames = []
195 with torch.no_grad():
196 for img, _, fnames in dataloader:
197 img = img.to(model.device)
198 emb = model.backbone(img).flatten(start_dim=1)
199 embeddings.append(emb)
200 filenames.extend(fnames)
201
202 embeddings = torch.cat(embeddings, 0)
203 embeddings = normalize(embeddings)
204 return embeddings, filenames
205
206
207 model.eval()
208 embeddings, filenames = generate_embeddings(model, dataloader_test)
209
210 # %%
211 # Visualize Nearest Neighbors
212 # ----------------------------
213 # Let's look at the trained embedding and visualize the nearest neighbors for
214 # a few random samples.
215 #
216 # We create some helper functions to simplify the work
217
218
219 def get_image_as_np_array(filename: str):
220 """Returns an image as an numpy array"""
221 img = Image.open(filename)
222 return np.asarray(img)
223
224
225 def plot_knn_examples(embeddings, filenames, n_neighbors=3, num_examples=6):
226 """Plots multiple rows of random images with their nearest neighbors"""
227 # lets look at the nearest neighbors for some samples
228 # we use the sklearn library
229 nbrs = NearestNeighbors(n_neighbors=n_neighbors).fit(embeddings)
230 distances, indices = nbrs.kneighbors(embeddings)
231
232 # get 5 random samples
233 samples_idx = np.random.choice(len(indices), size=num_examples, replace=False)
234
235 # loop through our randomly picked samples
236 for idx in samples_idx:
237 fig = plt.figure()
238 # loop through their nearest neighbors
239 for plot_x_offset, neighbor_idx in enumerate(indices[idx]):
240 # add the subplot
241 ax = fig.add_subplot(1, len(indices[idx]), plot_x_offset + 1)
242 # get the correponding filename for the current index
243 fname = os.path.join(path_to_data, filenames[neighbor_idx])
244 # plot the image
245 plt.imshow(get_image_as_np_array(fname))
246 # set the title to the distance of the neighbor
247 ax.set_title(f"d={distances[idx][plot_x_offset]:.3f}")
248 # let's disable the axis
249 plt.axis("off")
250
251
252 # %%
253 # Let's do the plot of the images. The leftmost image is the query image whereas
254 # the ones next to it on the same row are the nearest neighbors.
255 # In the title we see the distance of the neigbor.
256 plot_knn_examples(embeddings, filenames)
257
258 # %%
259 # Color Invariance
260 # ---------------------
261 # Let's train again without color augmentation. This will force our model to
262 # respect the colors in the images.
263
264 # Set color jitter and gray scale probability to 0
265 new_transform = SimCLRTransform(
266 input_size=input_size, vf_prob=0.5, rr_prob=0.5, cj_prob=0.0, random_gray_scale=0.0
267 )
268
269 # let's update the transform on the training dataset
270 dataset_train_simclr.transform = new_transform
271
272 # then train a new model
273 model = SimCLRModel()
274 trainer = pl.Trainer(max_epochs=max_epochs, devices=1, accelerator="gpu")
275 trainer.fit(model, dataloader_train_simclr)
276
277 # and generate again embeddings from the test set
278 model.eval()
279 embeddings, filenames = generate_embeddings(model, dataloader_test)
280
281 # %%
282 # other example
283 plot_knn_examples(embeddings, filenames)
284
285 # %%
286 # What's next?
287
288 # You could use the pre-trained model and train a classifier on top.
289 pretrained_resnet_backbone = model.backbone
290
291 # you can also store the backbone and use it in another code
292 state_dict = {"resnet18_parameters": pretrained_resnet_backbone.state_dict()}
293 torch.save(state_dict, "model.pth")
294
295 # %%
296 # THIS COULD BE IN A NEW FILE (e.g. inference.py)
297 #
298 # Make sure you place the `model.pth` file in the same folder as this code
299
300 # load the model in a new file for inference
301 resnet18_new = torchvision.models.resnet18()
302
303 # note that we need to create exactly the same backbone in order to load the weights
304 backbone_new = nn.Sequential(*list(resnet18_new.children())[:-1])
305
306 ckpt = torch.load("model.pth")
307 backbone_new.load_state_dict(ckpt["resnet18_parameters"])
308
309 # %%
310 # Next Steps
311 # ------------
312 #
313 # Interested in exploring other self-supervised models? Check out our other
314 # tutorials:
315 #
316 # - :ref:`lightly-moco-tutorial-2`
317 # - :ref:`lightly-simsiam-tutorial-4`
318 # - :ref:`lightly-custom-augmentation-5`
319 # - :ref:`lightly-detectron-tutorial-6`
320 #
321
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docs/source/tutorials_source/package/tutorial_simclr_clothing.py b/docs/source/tutorials_source/package/tutorial_simclr_clothing.py
--- a/docs/source/tutorials_source/package/tutorial_simclr_clothing.py
+++ b/docs/source/tutorials_source/package/tutorial_simclr_clothing.py
@@ -84,7 +84,7 @@
# such as vertical flip or random rotation (90 degrees).
# By adding these augmentations we learn our model invariance regarding the
# orientation of the clothing piece. E.g. we don't care if a shirt is upside down
-# but more about the strcture which make it a shirt.
+# but more about the structure which make it a shirt.
#
# You can learn more about the different augmentations and learned invariances
# here: :ref:`lightly-advanced`.
| {"golden_diff": "diff --git a/docs/source/tutorials_source/package/tutorial_simclr_clothing.py b/docs/source/tutorials_source/package/tutorial_simclr_clothing.py\n--- a/docs/source/tutorials_source/package/tutorial_simclr_clothing.py\n+++ b/docs/source/tutorials_source/package/tutorial_simclr_clothing.py\n@@ -84,7 +84,7 @@\n # such as vertical flip or random rotation (90 degrees).\n # By adding these augmentations we learn our model invariance regarding the\n # orientation of the clothing piece. E.g. we don't care if a shirt is upside down\n-# but more about the strcture which make it a shirt.\n+# but more about the structure which make it a shirt.\n #\n # You can learn more about the different augmentations and learned invariances\n # here: :ref:`lightly-advanced`.\n", "issue": "Tutorial 3: small typo \"strcture\" -> \"structure\"\nIn [this section](https://docs.lightly.ai/self-supervised-learning/tutorials/package/tutorial_simclr_clothing.html#setup-data-augmentations-and-loaders), the first paragraph has the following typo:\r\n\r\n\"strcture\" -> \"structure\"\r\n\n", "before_files": [{"content": "\"\"\"\n.. _lightly-simclr-tutorial-3:\n\nTutorial 3: Train SimCLR on Clothing\n==============================================\n\nIn this tutorial, we will train a SimCLR model using lightly. The model,\naugmentations and training procedure is from \n`A Simple Framework for Contrastive Learning of Visual Representations <https://arxiv.org/abs/2002.05709>`_.\n\nThe paper explores a rather simple training procedure for contrastive learning.\nSince we use the typical contrastive learning loss based on NCE the method\ngreatly benefits from having larger batch sizes. In this example, we use a batch\nsize of 256 and paired with the input resolution per image of 64x64 pixels and\na resnet-18 model this example requires 16GB of GPU memory.\n\nWe use the \n`clothing dataset from Alex Grigorev <https://github.com/alexeygrigorev/clothing-dataset>`_ \nfor this tutorial.\n\nIn this tutorial you will learn:\n\n- How to create a SimCLR model\n\n- How to generate image representations\n\n- How different augmentations impact the learned representations\n\n\"\"\"\n\n# %%\n# Imports\n# -------\n#\n# Import the Python frameworks we need for this tutorial.\nimport os\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pytorch_lightning as pl\nimport torch\nimport torch.nn as nn\nimport torchvision\nfrom PIL import Image\nfrom sklearn.neighbors import NearestNeighbors\nfrom sklearn.preprocessing import normalize\n\nfrom lightly.data import LightlyDataset\nfrom lightly.transforms import SimCLRTransform, utils\n\n# %%\n# Configuration\n# -------------\n#\n# We set some configuration parameters for our experiment.\n# Feel free to change them and analyze the effect.\n#\n# The default configuration with a batch size of 256 and input resolution of 128\n# requires 6GB of GPU memory.\nnum_workers = 8\nbatch_size = 256\nseed = 1\nmax_epochs = 20\ninput_size = 128\nnum_ftrs = 32\n\n# %%\n# Let's set the seed for our experiments\npl.seed_everything(seed)\n\n# %%\n# Make sure `path_to_data` points to the downloaded clothing dataset.\n# You can download it using\n# `git clone https://github.com/alexeygrigorev/clothing-dataset.git`\npath_to_data = \"/datasets/clothing-dataset/images\"\n\n\n# %%\n# Setup data augmentations and loaders\n# ------------------------------------\n#\n# The images from the dataset have been taken from above when the clothing was\n# on a table, bed or floor. Therefore, we can make use of additional augmentations\n# such as vertical flip or random rotation (90 degrees).\n# By adding these augmentations we learn our model invariance regarding the\n# orientation of the clothing piece. E.g. we don't care if a shirt is upside down\n# but more about the strcture which make it a shirt.\n#\n# You can learn more about the different augmentations and learned invariances\n# here: :ref:`lightly-advanced`.\ntransform = SimCLRTransform(input_size=input_size, vf_prob=0.5, rr_prob=0.5)\n\n# We create a torchvision transformation for embedding the dataset after\n# training\ntest_transform = torchvision.transforms.Compose(\n [\n torchvision.transforms.Resize((input_size, input_size)),\n torchvision.transforms.ToTensor(),\n torchvision.transforms.Normalize(\n mean=utils.IMAGENET_NORMALIZE[\"mean\"],\n std=utils.IMAGENET_NORMALIZE[\"std\"],\n ),\n ]\n)\n\ndataset_train_simclr = LightlyDataset(input_dir=path_to_data, transform=transform)\n\ndataset_test = LightlyDataset(input_dir=path_to_data, transform=test_transform)\n\ndataloader_train_simclr = torch.utils.data.DataLoader(\n dataset_train_simclr,\n batch_size=batch_size,\n shuffle=True,\n drop_last=True,\n num_workers=num_workers,\n)\n\ndataloader_test = torch.utils.data.DataLoader(\n dataset_test,\n batch_size=batch_size,\n shuffle=False,\n drop_last=False,\n num_workers=num_workers,\n)\n\n# %%\n# Create the SimCLR Model\n# -----------------------\n# Now we create the SimCLR model. We implement it as a PyTorch Lightning Module\n# and use a ResNet-18 backbone from Torchvision. Lightly provides implementations\n# of the SimCLR projection head and loss function in the `SimCLRProjectionHead`\n# and `NTXentLoss` classes. We can simply import them and combine the building\n# blocks in the module.\n\nfrom lightly.loss import NTXentLoss\nfrom lightly.models.modules.heads import SimCLRProjectionHead\n\n\nclass SimCLRModel(pl.LightningModule):\n def __init__(self):\n super().__init__()\n\n # create a ResNet backbone and remove the classification head\n resnet = torchvision.models.resnet18()\n self.backbone = nn.Sequential(*list(resnet.children())[:-1])\n\n hidden_dim = resnet.fc.in_features\n self.projection_head = SimCLRProjectionHead(hidden_dim, hidden_dim, 128)\n\n self.criterion = NTXentLoss()\n\n def forward(self, x):\n h = self.backbone(x).flatten(start_dim=1)\n z = self.projection_head(h)\n return z\n\n def training_step(self, batch, batch_idx):\n (x0, x1), _, _ = batch\n z0 = self.forward(x0)\n z1 = self.forward(x1)\n loss = self.criterion(z0, z1)\n self.log(\"train_loss_ssl\", loss)\n return loss\n\n def configure_optimizers(self):\n optim = torch.optim.SGD(\n self.parameters(), lr=6e-2, momentum=0.9, weight_decay=5e-4\n )\n scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optim, max_epochs)\n return [optim], [scheduler]\n\n\n# %%\n# Train the module using the PyTorch Lightning Trainer on a single GPU.\n\nmodel = SimCLRModel()\ntrainer = pl.Trainer(max_epochs=max_epochs, devices=1, accelerator=\"gpu\")\ntrainer.fit(model, dataloader_train_simclr)\n\n# %%\n# Next we create a helper function to generate embeddings\n# from our test images using the model we just trained.\n# Note that only the backbone is needed to generate embeddings,\n# the projection head is only required for the training.\n# Make sure to put the model into eval mode for this part!\n\n\ndef generate_embeddings(model, dataloader):\n \"\"\"Generates representations for all images in the dataloader with\n the given model\n \"\"\"\n\n embeddings = []\n filenames = []\n with torch.no_grad():\n for img, _, fnames in dataloader:\n img = img.to(model.device)\n emb = model.backbone(img).flatten(start_dim=1)\n embeddings.append(emb)\n filenames.extend(fnames)\n\n embeddings = torch.cat(embeddings, 0)\n embeddings = normalize(embeddings)\n return embeddings, filenames\n\n\nmodel.eval()\nembeddings, filenames = generate_embeddings(model, dataloader_test)\n\n# %%\n# Visualize Nearest Neighbors\n# ----------------------------\n# Let's look at the trained embedding and visualize the nearest neighbors for\n# a few random samples.\n#\n# We create some helper functions to simplify the work\n\n\ndef get_image_as_np_array(filename: str):\n \"\"\"Returns an image as an numpy array\"\"\"\n img = Image.open(filename)\n return np.asarray(img)\n\n\ndef plot_knn_examples(embeddings, filenames, n_neighbors=3, num_examples=6):\n \"\"\"Plots multiple rows of random images with their nearest neighbors\"\"\"\n # lets look at the nearest neighbors for some samples\n # we use the sklearn library\n nbrs = NearestNeighbors(n_neighbors=n_neighbors).fit(embeddings)\n distances, indices = nbrs.kneighbors(embeddings)\n\n # get 5 random samples\n samples_idx = np.random.choice(len(indices), size=num_examples, replace=False)\n\n # loop through our randomly picked samples\n for idx in samples_idx:\n fig = plt.figure()\n # loop through their nearest neighbors\n for plot_x_offset, neighbor_idx in enumerate(indices[idx]):\n # add the subplot\n ax = fig.add_subplot(1, len(indices[idx]), plot_x_offset + 1)\n # get the correponding filename for the current index\n fname = os.path.join(path_to_data, filenames[neighbor_idx])\n # plot the image\n plt.imshow(get_image_as_np_array(fname))\n # set the title to the distance of the neighbor\n ax.set_title(f\"d={distances[idx][plot_x_offset]:.3f}\")\n # let's disable the axis\n plt.axis(\"off\")\n\n\n# %%\n# Let's do the plot of the images. The leftmost image is the query image whereas\n# the ones next to it on the same row are the nearest neighbors.\n# In the title we see the distance of the neigbor.\nplot_knn_examples(embeddings, filenames)\n\n# %%\n# Color Invariance\n# ---------------------\n# Let's train again without color augmentation. This will force our model to\n# respect the colors in the images.\n\n# Set color jitter and gray scale probability to 0\nnew_transform = SimCLRTransform(\n input_size=input_size, vf_prob=0.5, rr_prob=0.5, cj_prob=0.0, random_gray_scale=0.0\n)\n\n# let's update the transform on the training dataset\ndataset_train_simclr.transform = new_transform\n\n# then train a new model\nmodel = SimCLRModel()\ntrainer = pl.Trainer(max_epochs=max_epochs, devices=1, accelerator=\"gpu\")\ntrainer.fit(model, dataloader_train_simclr)\n\n# and generate again embeddings from the test set\nmodel.eval()\nembeddings, filenames = generate_embeddings(model, dataloader_test)\n\n# %%\n# other example\nplot_knn_examples(embeddings, filenames)\n\n# %%\n# What's next?\n\n# You could use the pre-trained model and train a classifier on top.\npretrained_resnet_backbone = model.backbone\n\n# you can also store the backbone and use it in another code\nstate_dict = {\"resnet18_parameters\": pretrained_resnet_backbone.state_dict()}\ntorch.save(state_dict, \"model.pth\")\n\n# %%\n# THIS COULD BE IN A NEW FILE (e.g. inference.py)\n#\n# Make sure you place the `model.pth` file in the same folder as this code\n\n# load the model in a new file for inference\nresnet18_new = torchvision.models.resnet18()\n\n# note that we need to create exactly the same backbone in order to load the weights\nbackbone_new = nn.Sequential(*list(resnet18_new.children())[:-1])\n\nckpt = torch.load(\"model.pth\")\nbackbone_new.load_state_dict(ckpt[\"resnet18_parameters\"])\n\n# %%\n# Next Steps\n# ------------\n#\n# Interested in exploring other self-supervised models? Check out our other\n# tutorials:\n#\n# - :ref:`lightly-moco-tutorial-2`\n# - :ref:`lightly-simsiam-tutorial-4`\n# - :ref:`lightly-custom-augmentation-5`\n# - :ref:`lightly-detectron-tutorial-6`\n#\n", "path": "docs/source/tutorials_source/package/tutorial_simclr_clothing.py"}], "after_files": [{"content": "\"\"\"\n.. _lightly-simclr-tutorial-3:\n\nTutorial 3: Train SimCLR on Clothing\n==============================================\n\nIn this tutorial, we will train a SimCLR model using lightly. The model,\naugmentations and training procedure is from \n`A Simple Framework for Contrastive Learning of Visual Representations <https://arxiv.org/abs/2002.05709>`_.\n\nThe paper explores a rather simple training procedure for contrastive learning.\nSince we use the typical contrastive learning loss based on NCE the method\ngreatly benefits from having larger batch sizes. In this example, we use a batch\nsize of 256 and paired with the input resolution per image of 64x64 pixels and\na resnet-18 model this example requires 16GB of GPU memory.\n\nWe use the \n`clothing dataset from Alex Grigorev <https://github.com/alexeygrigorev/clothing-dataset>`_ \nfor this tutorial.\n\nIn this tutorial you will learn:\n\n- How to create a SimCLR model\n\n- How to generate image representations\n\n- How different augmentations impact the learned representations\n\n\"\"\"\n\n# %%\n# Imports\n# -------\n#\n# Import the Python frameworks we need for this tutorial.\nimport os\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pytorch_lightning as pl\nimport torch\nimport torch.nn as nn\nimport torchvision\nfrom PIL import Image\nfrom sklearn.neighbors import NearestNeighbors\nfrom sklearn.preprocessing import normalize\n\nfrom lightly.data import LightlyDataset\nfrom lightly.transforms import SimCLRTransform, utils\n\n# %%\n# Configuration\n# -------------\n#\n# We set some configuration parameters for our experiment.\n# Feel free to change them and analyze the effect.\n#\n# The default configuration with a batch size of 256 and input resolution of 128\n# requires 6GB of GPU memory.\nnum_workers = 8\nbatch_size = 256\nseed = 1\nmax_epochs = 20\ninput_size = 128\nnum_ftrs = 32\n\n# %%\n# Let's set the seed for our experiments\npl.seed_everything(seed)\n\n# %%\n# Make sure `path_to_data` points to the downloaded clothing dataset.\n# You can download it using\n# `git clone https://github.com/alexeygrigorev/clothing-dataset.git`\npath_to_data = \"/datasets/clothing-dataset/images\"\n\n\n# %%\n# Setup data augmentations and loaders\n# ------------------------------------\n#\n# The images from the dataset have been taken from above when the clothing was\n# on a table, bed or floor. Therefore, we can make use of additional augmentations\n# such as vertical flip or random rotation (90 degrees).\n# By adding these augmentations we learn our model invariance regarding the\n# orientation of the clothing piece. E.g. we don't care if a shirt is upside down\n# but more about the structure which make it a shirt.\n#\n# You can learn more about the different augmentations and learned invariances\n# here: :ref:`lightly-advanced`.\ntransform = SimCLRTransform(input_size=input_size, vf_prob=0.5, rr_prob=0.5)\n\n# We create a torchvision transformation for embedding the dataset after\n# training\ntest_transform = torchvision.transforms.Compose(\n [\n torchvision.transforms.Resize((input_size, input_size)),\n torchvision.transforms.ToTensor(),\n torchvision.transforms.Normalize(\n mean=utils.IMAGENET_NORMALIZE[\"mean\"],\n std=utils.IMAGENET_NORMALIZE[\"std\"],\n ),\n ]\n)\n\ndataset_train_simclr = LightlyDataset(input_dir=path_to_data, transform=transform)\n\ndataset_test = LightlyDataset(input_dir=path_to_data, transform=test_transform)\n\ndataloader_train_simclr = torch.utils.data.DataLoader(\n dataset_train_simclr,\n batch_size=batch_size,\n shuffle=True,\n drop_last=True,\n num_workers=num_workers,\n)\n\ndataloader_test = torch.utils.data.DataLoader(\n dataset_test,\n batch_size=batch_size,\n shuffle=False,\n drop_last=False,\n num_workers=num_workers,\n)\n\n# %%\n# Create the SimCLR Model\n# -----------------------\n# Now we create the SimCLR model. We implement it as a PyTorch Lightning Module\n# and use a ResNet-18 backbone from Torchvision. Lightly provides implementations\n# of the SimCLR projection head and loss function in the `SimCLRProjectionHead`\n# and `NTXentLoss` classes. We can simply import them and combine the building\n# blocks in the module.\n\nfrom lightly.loss import NTXentLoss\nfrom lightly.models.modules.heads import SimCLRProjectionHead\n\n\nclass SimCLRModel(pl.LightningModule):\n def __init__(self):\n super().__init__()\n\n # create a ResNet backbone and remove the classification head\n resnet = torchvision.models.resnet18()\n self.backbone = nn.Sequential(*list(resnet.children())[:-1])\n\n hidden_dim = resnet.fc.in_features\n self.projection_head = SimCLRProjectionHead(hidden_dim, hidden_dim, 128)\n\n self.criterion = NTXentLoss()\n\n def forward(self, x):\n h = self.backbone(x).flatten(start_dim=1)\n z = self.projection_head(h)\n return z\n\n def training_step(self, batch, batch_idx):\n (x0, x1), _, _ = batch\n z0 = self.forward(x0)\n z1 = self.forward(x1)\n loss = self.criterion(z0, z1)\n self.log(\"train_loss_ssl\", loss)\n return loss\n\n def configure_optimizers(self):\n optim = torch.optim.SGD(\n self.parameters(), lr=6e-2, momentum=0.9, weight_decay=5e-4\n )\n scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optim, max_epochs)\n return [optim], [scheduler]\n\n\n# %%\n# Train the module using the PyTorch Lightning Trainer on a single GPU.\n\nmodel = SimCLRModel()\ntrainer = pl.Trainer(max_epochs=max_epochs, devices=1, accelerator=\"gpu\")\ntrainer.fit(model, dataloader_train_simclr)\n\n# %%\n# Next we create a helper function to generate embeddings\n# from our test images using the model we just trained.\n# Note that only the backbone is needed to generate embeddings,\n# the projection head is only required for the training.\n# Make sure to put the model into eval mode for this part!\n\n\ndef generate_embeddings(model, dataloader):\n \"\"\"Generates representations for all images in the dataloader with\n the given model\n \"\"\"\n\n embeddings = []\n filenames = []\n with torch.no_grad():\n for img, _, fnames in dataloader:\n img = img.to(model.device)\n emb = model.backbone(img).flatten(start_dim=1)\n embeddings.append(emb)\n filenames.extend(fnames)\n\n embeddings = torch.cat(embeddings, 0)\n embeddings = normalize(embeddings)\n return embeddings, filenames\n\n\nmodel.eval()\nembeddings, filenames = generate_embeddings(model, dataloader_test)\n\n# %%\n# Visualize Nearest Neighbors\n# ----------------------------\n# Let's look at the trained embedding and visualize the nearest neighbors for\n# a few random samples.\n#\n# We create some helper functions to simplify the work\n\n\ndef get_image_as_np_array(filename: str):\n \"\"\"Returns an image as an numpy array\"\"\"\n img = Image.open(filename)\n return np.asarray(img)\n\n\ndef plot_knn_examples(embeddings, filenames, n_neighbors=3, num_examples=6):\n \"\"\"Plots multiple rows of random images with their nearest neighbors\"\"\"\n # lets look at the nearest neighbors for some samples\n # we use the sklearn library\n nbrs = NearestNeighbors(n_neighbors=n_neighbors).fit(embeddings)\n distances, indices = nbrs.kneighbors(embeddings)\n\n # get 5 random samples\n samples_idx = np.random.choice(len(indices), size=num_examples, replace=False)\n\n # loop through our randomly picked samples\n for idx in samples_idx:\n fig = plt.figure()\n # loop through their nearest neighbors\n for plot_x_offset, neighbor_idx in enumerate(indices[idx]):\n # add the subplot\n ax = fig.add_subplot(1, len(indices[idx]), plot_x_offset + 1)\n # get the correponding filename for the current index\n fname = os.path.join(path_to_data, filenames[neighbor_idx])\n # plot the image\n plt.imshow(get_image_as_np_array(fname))\n # set the title to the distance of the neighbor\n ax.set_title(f\"d={distances[idx][plot_x_offset]:.3f}\")\n # let's disable the axis\n plt.axis(\"off\")\n\n\n# %%\n# Let's do the plot of the images. The leftmost image is the query image whereas\n# the ones next to it on the same row are the nearest neighbors.\n# In the title we see the distance of the neigbor.\nplot_knn_examples(embeddings, filenames)\n\n# %%\n# Color Invariance\n# ---------------------\n# Let's train again without color augmentation. This will force our model to\n# respect the colors in the images.\n\n# Set color jitter and gray scale probability to 0\nnew_transform = SimCLRTransform(\n input_size=input_size, vf_prob=0.5, rr_prob=0.5, cj_prob=0.0, random_gray_scale=0.0\n)\n\n# let's update the transform on the training dataset\ndataset_train_simclr.transform = new_transform\n\n# then train a new model\nmodel = SimCLRModel()\ntrainer = pl.Trainer(max_epochs=max_epochs, devices=1, accelerator=\"gpu\")\ntrainer.fit(model, dataloader_train_simclr)\n\n# and generate again embeddings from the test set\nmodel.eval()\nembeddings, filenames = generate_embeddings(model, dataloader_test)\n\n# %%\n# other example\nplot_knn_examples(embeddings, filenames)\n\n# %%\n# What's next?\n\n# You could use the pre-trained model and train a classifier on top.\npretrained_resnet_backbone = model.backbone\n\n# you can also store the backbone and use it in another code\nstate_dict = {\"resnet18_parameters\": pretrained_resnet_backbone.state_dict()}\ntorch.save(state_dict, \"model.pth\")\n\n# %%\n# THIS COULD BE IN A NEW FILE (e.g. inference.py)\n#\n# Make sure you place the `model.pth` file in the same folder as this code\n\n# load the model in a new file for inference\nresnet18_new = torchvision.models.resnet18()\n\n# note that we need to create exactly the same backbone in order to load the weights\nbackbone_new = nn.Sequential(*list(resnet18_new.children())[:-1])\n\nckpt = torch.load(\"model.pth\")\nbackbone_new.load_state_dict(ckpt[\"resnet18_parameters\"])\n\n# %%\n# Next Steps\n# ------------\n#\n# Interested in exploring other self-supervised models? Check out our other\n# tutorials:\n#\n# - :ref:`lightly-moco-tutorial-2`\n# - :ref:`lightly-simsiam-tutorial-4`\n# - :ref:`lightly-custom-augmentation-5`\n# - :ref:`lightly-detectron-tutorial-6`\n#\n", "path": "docs/source/tutorials_source/package/tutorial_simclr_clothing.py"}]} | 3,634 | 180 |
gh_patches_debug_38592 | rasdani/github-patches | git_diff | enthought__chaco-574 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
SelectableOverlayPlotContainer can be removed
This is an _old_ class that has been superceded by the use of overlays.
It does not appear to be being used and can be safely removed for 5.0.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `chaco/selectable_overlay_container.py`
Content:
```
1 """ Defines the SelectableOverlayPlotContainer class.
2 """
3
4
5
6 from numpy import array, float64
7
8 # Enthought library imports
9 from traits.api import Bool, Float, Enum
10 from enable.api import ColorTrait
11
12 # Local imports
13 from .plot_containers import OverlayPlotContainer
14
15 class SelectableOverlayPlotContainer(OverlayPlotContainer):
16 """
17 An OverlayPlotContainer that can show a selection region on top of it.
18 """
19
20 #: Screen position of the start of the selection, which can be in the x- or
21 #: y-dimension, depending on **selection_direction**.
22 selection_screen_start = Float(0.0)
23 #: Screen position of the end of the selection, which can be in the x- or
24 #: y-dimension, depending on **selection_direction**.
25 selection_screen_end = Float(0.0)
26 #: Is there an active selection?
27 selection_active = Bool(False)
28 #: The direction of the selection.
29 selection_direction = Enum('v', 'h')
30 #: The color to use to fill the selected region.
31 selection_fill_color = ColorTrait('lightskyblue')
32 #: The color to use to draw the border of the selected region.
33 selection_border_color = ColorTrait('dodgerblue')
34 #: The transparency of the **selection_fill_color**.
35 selection_alpha = Float(0.3)
36
37 def _draw_overlays(self, gc, view_bounds=None, mode='normal'):
38 """ Method for backward compatability with old drawing scheme.
39
40 Overrides BasePlotContainer.
41 """
42 self._draw_selection(gc, view_bounds=view_bounds, mode=mode)
43 return
44
45 def _draw_selection(self, gc, view_bounds=None, mode='normal'):
46 """ Renders a selected subset of a component's data.
47
48 Overrides PlotComponent.
49 """
50 if self.selection_active:
51 if self.selection_direction == 'h':
52 x1 = self.selection_screen_start
53 x2 = self.selection_screen_end
54 y1 = self.y
55 y2 = self.position[1] + self.bounds[1] - 1
56 else:
57 x1 = self.x
58 x2 = self.position[0] + self.bounds[0] - 1
59 y1 = self.selection_screen_start
60 y2 = self.selection_screen_end
61 lowerleft = array((min(x1, x2), min(y1, y2)), float64)
62 upperright = array((max(x1, x2), max(y1, y2)), float64)
63 with gc:
64 gc.translate_ctm(*self.position)
65 gc.set_fill_color(self.selection_fill_color_)
66 gc.set_stroke_color(self.selection_border_color_)
67 gc.set_alpha(self.selection_alpha)
68 gc.rect(lowerleft[0], lowerleft[1], upperright[0], upperright[1])
69 gc.draw_path()
70 return
71
72
73
```
Path: `chaco/api.py`
Content:
```
1 """ Defines the publicly accessible items of the Chaco API.
2 """
3 # This just imports the key datamodel classes into the top-level package
4 # namespace for convenience.
5
6 from .base import NumericalSequenceTrait, PointTrait, ImageTrait, DimensionTrait, \
7 SortOrderTrait, bin_search, reverse_map_1d, right_shift, \
8 left_shift, sort_points, find_runs, arg_find_runs, \
9 point_line_distance
10
11 # Data model
12 from .abstract_data_source import AbstractDataSource
13 from .array_data_source import ArrayDataSource
14 from .grid_data_source import GridDataSource
15 from .image_data import ImageData
16 from .multi_array_data_source import MultiArrayDataSource
17 from .point_data_source import PointDataSource
18 from .abstract_data_range import AbstractDataRange
19 from .base_data_range import BaseDataRange
20 from .data_range_1d import DataRange1D
21 from .data_range_2d import DataRange2D
22
23 # Mappers
24 from .abstract_mapper import AbstractMapper
25 from .base_1d_mapper import Base1DMapper
26 from .grid_mapper import GridMapper
27 from .log_mapper import LogMapper
28 from .linear_mapper import LinearMapper
29 from .color_mapper import ColorMapper, ColorMapTemplate
30 from .discrete_color_mapper import DiscreteColorMapper
31 from .transform_color_mapper import TransformColorMapper
32
33 # Colormaps and color palettes
34 from .default_colormaps import *
35 from .default_colors import *
36
37 # Visual components
38 from .abstract_plot_renderer import AbstractPlotRenderer
39 from .abstract_overlay import AbstractOverlay
40 from .base_plot_container import BasePlotContainer
41 from .base_plot_frame import BasePlotFrame
42 from .cross_plot_frame import CrossPlotFrame
43 from .data_view import DataView
44 from .simple_plot_frame import SimplePlotFrame
45 from .plot_component import PlotComponent
46 from .plot_graphics_context import PlotGraphicsContext, PlotGraphicsContextMixin
47 from .selectable_overlay_container import SelectableOverlayPlotContainer
48 from .plot_containers import OverlayPlotContainer, HPlotContainer, VPlotContainer, \
49 GridPlotContainer
50 GridContainer = GridPlotContainer
51
52 try:
53 from .plot_containers import ConstraintsPlotContainer
54 except ImportError:
55 pass
56
57 from .label import Label
58 from .plot_label import PlotLabel
59 from .legend import Legend
60 from .tooltip import ToolTip
61 from .data_label import DataLabel
62 from .lasso_overlay import LassoOverlay
63 from .color_bar import ColorBar
64 from .text_box_overlay import TextBoxOverlay
65 from .scatter_inspector_overlay import ScatterInspectorOverlay
66
67 # Renderers
68 from .barplot import BarPlot
69 from .base_1d_plot import Base1DPlot
70 from .base_2d_plot import Base2DPlot
71 from .base_xy_plot import BaseXYPlot
72 from .scatterplot import ScatterPlot, render_markers
73 from .image_plot import ImagePlot
74 from .cmap_image_plot import CMapImagePlot
75 from .contour_line_plot import ContourLinePlot
76 from .contour_poly_plot import ContourPolyPlot
77 from .lineplot import LinePlot
78 from .colormapped_scatterplot import ColormappedScatterPlot
79 from .colormapped_selection_overlay import ColormappedSelectionOverlay
80 from .polygon_plot import PolygonPlot
81 from .errorbar_plot import ErrorBarPlot
82 from .filled_line_plot import FilledLinePlot
83 from .quiverplot import QuiverPlot
84 from .candle_plot import CandlePlot
85 from .multi_line_plot import MultiLinePlot
86 from .jitterplot import JitterPlot
87 from .variable_size_scatterplot import VariableSizeScatterPlot
88 from .horizon_plot import BandedMapper, HorizonPlot
89 from .scatterplot_1d import ScatterPlot1D
90 from .line_scatterplot_1d import LineScatterPlot1D
91 from .text_plot_1d import TextPlot1D
92 from .segment_plot import SegmentPlot
93 from .text_plot import TextPlot
94
95 # Plot factories
96 from .plot_factory import create_bar_plot, create_line_plot, create_scatter_plot, \
97 create_polar_plot, add_default_axes, add_default_grids
98
99 from .abstract_plot_data import AbstractPlotData
100 from .array_plot_data import ArrayPlotData
101 from .data_frame_plot_data import DataFramePlotData
102 from .plot import Plot
103 from .toolbar_plot import ToolbarPlot
104
105 # Axis
106 from .axis import PlotAxis, MinorPlotAxis
107 from .label_axis import LabelAxis
108 from .ticks import AbstractTickGenerator, DefaultTickGenerator, auto_ticks, auto_interval, \
109 tick_intervals, log_auto_ticks, auto_bounds, calc_bound
110
111 # Grid
112 from .grid import PlotGrid
113
114 # Tools
115 from .abstract_controller import AbstractController
116
117 # Importing various symbols into the Chaco namespace for backwards
118 # compatibility. New code should directly import from Enable.
119 from enable.base_tool import BaseTool, KeySpec
120 from enable.markers import marker_trait
121
122 #EOF
123
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/chaco/api.py b/chaco/api.py
--- a/chaco/api.py
+++ b/chaco/api.py
@@ -44,7 +44,6 @@
from .simple_plot_frame import SimplePlotFrame
from .plot_component import PlotComponent
from .plot_graphics_context import PlotGraphicsContext, PlotGraphicsContextMixin
-from .selectable_overlay_container import SelectableOverlayPlotContainer
from .plot_containers import OverlayPlotContainer, HPlotContainer, VPlotContainer, \
GridPlotContainer
GridContainer = GridPlotContainer
diff --git a/chaco/selectable_overlay_container.py b/chaco/selectable_overlay_container.py
deleted file mode 100644
--- a/chaco/selectable_overlay_container.py
+++ /dev/null
@@ -1,72 +0,0 @@
-""" Defines the SelectableOverlayPlotContainer class.
-"""
-
-
-
-from numpy import array, float64
-
-# Enthought library imports
-from traits.api import Bool, Float, Enum
-from enable.api import ColorTrait
-
-# Local imports
-from .plot_containers import OverlayPlotContainer
-
-class SelectableOverlayPlotContainer(OverlayPlotContainer):
- """
- An OverlayPlotContainer that can show a selection region on top of it.
- """
-
- #: Screen position of the start of the selection, which can be in the x- or
- #: y-dimension, depending on **selection_direction**.
- selection_screen_start = Float(0.0)
- #: Screen position of the end of the selection, which can be in the x- or
- #: y-dimension, depending on **selection_direction**.
- selection_screen_end = Float(0.0)
- #: Is there an active selection?
- selection_active = Bool(False)
- #: The direction of the selection.
- selection_direction = Enum('v', 'h')
- #: The color to use to fill the selected region.
- selection_fill_color = ColorTrait('lightskyblue')
- #: The color to use to draw the border of the selected region.
- selection_border_color = ColorTrait('dodgerblue')
- #: The transparency of the **selection_fill_color**.
- selection_alpha = Float(0.3)
-
- def _draw_overlays(self, gc, view_bounds=None, mode='normal'):
- """ Method for backward compatability with old drawing scheme.
-
- Overrides BasePlotContainer.
- """
- self._draw_selection(gc, view_bounds=view_bounds, mode=mode)
- return
-
- def _draw_selection(self, gc, view_bounds=None, mode='normal'):
- """ Renders a selected subset of a component's data.
-
- Overrides PlotComponent.
- """
- if self.selection_active:
- if self.selection_direction == 'h':
- x1 = self.selection_screen_start
- x2 = self.selection_screen_end
- y1 = self.y
- y2 = self.position[1] + self.bounds[1] - 1
- else:
- x1 = self.x
- x2 = self.position[0] + self.bounds[0] - 1
- y1 = self.selection_screen_start
- y2 = self.selection_screen_end
- lowerleft = array((min(x1, x2), min(y1, y2)), float64)
- upperright = array((max(x1, x2), max(y1, y2)), float64)
- with gc:
- gc.translate_ctm(*self.position)
- gc.set_fill_color(self.selection_fill_color_)
- gc.set_stroke_color(self.selection_border_color_)
- gc.set_alpha(self.selection_alpha)
- gc.rect(lowerleft[0], lowerleft[1], upperright[0], upperright[1])
- gc.draw_path()
- return
-
-
| {"golden_diff": "diff --git a/chaco/api.py b/chaco/api.py\n--- a/chaco/api.py\n+++ b/chaco/api.py\n@@ -44,7 +44,6 @@\n from .simple_plot_frame import SimplePlotFrame\n from .plot_component import PlotComponent\n from .plot_graphics_context import PlotGraphicsContext, PlotGraphicsContextMixin\n-from .selectable_overlay_container import SelectableOverlayPlotContainer\n from .plot_containers import OverlayPlotContainer, HPlotContainer, VPlotContainer, \\\n GridPlotContainer\n GridContainer = GridPlotContainer\ndiff --git a/chaco/selectable_overlay_container.py b/chaco/selectable_overlay_container.py\ndeleted file mode 100644\n--- a/chaco/selectable_overlay_container.py\n+++ /dev/null\n@@ -1,72 +0,0 @@\n-\"\"\" Defines the SelectableOverlayPlotContainer class.\n-\"\"\"\n-\n-\n-\n-from numpy import array, float64\n-\n-# Enthought library imports\n-from traits.api import Bool, Float, Enum\n-from enable.api import ColorTrait\n-\n-# Local imports\n-from .plot_containers import OverlayPlotContainer\n-\n-class SelectableOverlayPlotContainer(OverlayPlotContainer):\n- \"\"\"\n- An OverlayPlotContainer that can show a selection region on top of it.\n- \"\"\"\n-\n- #: Screen position of the start of the selection, which can be in the x- or\n- #: y-dimension, depending on **selection_direction**.\n- selection_screen_start = Float(0.0)\n- #: Screen position of the end of the selection, which can be in the x- or\n- #: y-dimension, depending on **selection_direction**.\n- selection_screen_end = Float(0.0)\n- #: Is there an active selection?\n- selection_active = Bool(False)\n- #: The direction of the selection.\n- selection_direction = Enum('v', 'h')\n- #: The color to use to fill the selected region.\n- selection_fill_color = ColorTrait('lightskyblue')\n- #: The color to use to draw the border of the selected region.\n- selection_border_color = ColorTrait('dodgerblue')\n- #: The transparency of the **selection_fill_color**.\n- selection_alpha = Float(0.3)\n-\n- def _draw_overlays(self, gc, view_bounds=None, mode='normal'):\n- \"\"\" Method for backward compatability with old drawing scheme.\n-\n- Overrides BasePlotContainer.\n- \"\"\"\n- self._draw_selection(gc, view_bounds=view_bounds, mode=mode)\n- return\n-\n- def _draw_selection(self, gc, view_bounds=None, mode='normal'):\n- \"\"\" Renders a selected subset of a component's data.\n-\n- Overrides PlotComponent.\n- \"\"\"\n- if self.selection_active:\n- if self.selection_direction == 'h':\n- x1 = self.selection_screen_start\n- x2 = self.selection_screen_end\n- y1 = self.y\n- y2 = self.position[1] + self.bounds[1] - 1\n- else:\n- x1 = self.x\n- x2 = self.position[0] + self.bounds[0] - 1\n- y1 = self.selection_screen_start\n- y2 = self.selection_screen_end\n- lowerleft = array((min(x1, x2), min(y1, y2)), float64)\n- upperright = array((max(x1, x2), max(y1, y2)), float64)\n- with gc:\n- gc.translate_ctm(*self.position)\n- gc.set_fill_color(self.selection_fill_color_)\n- gc.set_stroke_color(self.selection_border_color_)\n- gc.set_alpha(self.selection_alpha)\n- gc.rect(lowerleft[0], lowerleft[1], upperright[0], upperright[1])\n- gc.draw_path()\n- return\n-\n-\n", "issue": "SelectableOverlayPlotContainer can be removed\nThis is an _old_ class that has been superceded by the use of overlays.\r\n\r\nIt does not appear to be being used and can be safely removed for 5.0.\n", "before_files": [{"content": "\"\"\" Defines the SelectableOverlayPlotContainer class.\n\"\"\"\n\n\n\nfrom numpy import array, float64\n\n# Enthought library imports\nfrom traits.api import Bool, Float, Enum\nfrom enable.api import ColorTrait\n\n# Local imports\nfrom .plot_containers import OverlayPlotContainer\n\nclass SelectableOverlayPlotContainer(OverlayPlotContainer):\n \"\"\"\n An OverlayPlotContainer that can show a selection region on top of it.\n \"\"\"\n\n #: Screen position of the start of the selection, which can be in the x- or\n #: y-dimension, depending on **selection_direction**.\n selection_screen_start = Float(0.0)\n #: Screen position of the end of the selection, which can be in the x- or\n #: y-dimension, depending on **selection_direction**.\n selection_screen_end = Float(0.0)\n #: Is there an active selection?\n selection_active = Bool(False)\n #: The direction of the selection.\n selection_direction = Enum('v', 'h')\n #: The color to use to fill the selected region.\n selection_fill_color = ColorTrait('lightskyblue')\n #: The color to use to draw the border of the selected region.\n selection_border_color = ColorTrait('dodgerblue')\n #: The transparency of the **selection_fill_color**.\n selection_alpha = Float(0.3)\n\n def _draw_overlays(self, gc, view_bounds=None, mode='normal'):\n \"\"\" Method for backward compatability with old drawing scheme.\n\n Overrides BasePlotContainer.\n \"\"\"\n self._draw_selection(gc, view_bounds=view_bounds, mode=mode)\n return\n\n def _draw_selection(self, gc, view_bounds=None, mode='normal'):\n \"\"\" Renders a selected subset of a component's data.\n\n Overrides PlotComponent.\n \"\"\"\n if self.selection_active:\n if self.selection_direction == 'h':\n x1 = self.selection_screen_start\n x2 = self.selection_screen_end\n y1 = self.y\n y2 = self.position[1] + self.bounds[1] - 1\n else:\n x1 = self.x\n x2 = self.position[0] + self.bounds[0] - 1\n y1 = self.selection_screen_start\n y2 = self.selection_screen_end\n lowerleft = array((min(x1, x2), min(y1, y2)), float64)\n upperright = array((max(x1, x2), max(y1, y2)), float64)\n with gc:\n gc.translate_ctm(*self.position)\n gc.set_fill_color(self.selection_fill_color_)\n gc.set_stroke_color(self.selection_border_color_)\n gc.set_alpha(self.selection_alpha)\n gc.rect(lowerleft[0], lowerleft[1], upperright[0], upperright[1])\n gc.draw_path()\n return\n\n\n", "path": "chaco/selectable_overlay_container.py"}, {"content": "\"\"\" Defines the publicly accessible items of the Chaco API.\n\"\"\"\n# This just imports the key datamodel classes into the top-level package\n# namespace for convenience.\n\nfrom .base import NumericalSequenceTrait, PointTrait, ImageTrait, DimensionTrait, \\\n SortOrderTrait, bin_search, reverse_map_1d, right_shift, \\\n left_shift, sort_points, find_runs, arg_find_runs, \\\n point_line_distance\n\n# Data model\nfrom .abstract_data_source import AbstractDataSource\nfrom .array_data_source import ArrayDataSource\nfrom .grid_data_source import GridDataSource\nfrom .image_data import ImageData\nfrom .multi_array_data_source import MultiArrayDataSource\nfrom .point_data_source import PointDataSource\nfrom .abstract_data_range import AbstractDataRange\nfrom .base_data_range import BaseDataRange\nfrom .data_range_1d import DataRange1D\nfrom .data_range_2d import DataRange2D\n\n# Mappers\nfrom .abstract_mapper import AbstractMapper\nfrom .base_1d_mapper import Base1DMapper\nfrom .grid_mapper import GridMapper\nfrom .log_mapper import LogMapper\nfrom .linear_mapper import LinearMapper\nfrom .color_mapper import ColorMapper, ColorMapTemplate\nfrom .discrete_color_mapper import DiscreteColorMapper\nfrom .transform_color_mapper import TransformColorMapper\n\n# Colormaps and color palettes\nfrom .default_colormaps import *\nfrom .default_colors import *\n\n# Visual components\nfrom .abstract_plot_renderer import AbstractPlotRenderer\nfrom .abstract_overlay import AbstractOverlay\nfrom .base_plot_container import BasePlotContainer\nfrom .base_plot_frame import BasePlotFrame\nfrom .cross_plot_frame import CrossPlotFrame\nfrom .data_view import DataView\nfrom .simple_plot_frame import SimplePlotFrame\nfrom .plot_component import PlotComponent\nfrom .plot_graphics_context import PlotGraphicsContext, PlotGraphicsContextMixin\nfrom .selectable_overlay_container import SelectableOverlayPlotContainer\nfrom .plot_containers import OverlayPlotContainer, HPlotContainer, VPlotContainer, \\\n GridPlotContainer\nGridContainer = GridPlotContainer\n\ntry:\n from .plot_containers import ConstraintsPlotContainer\nexcept ImportError:\n pass\n\nfrom .label import Label\nfrom .plot_label import PlotLabel\nfrom .legend import Legend\nfrom .tooltip import ToolTip\nfrom .data_label import DataLabel\nfrom .lasso_overlay import LassoOverlay\nfrom .color_bar import ColorBar\nfrom .text_box_overlay import TextBoxOverlay\nfrom .scatter_inspector_overlay import ScatterInspectorOverlay\n\n# Renderers\nfrom .barplot import BarPlot\nfrom .base_1d_plot import Base1DPlot\nfrom .base_2d_plot import Base2DPlot\nfrom .base_xy_plot import BaseXYPlot\nfrom .scatterplot import ScatterPlot, render_markers\nfrom .image_plot import ImagePlot\nfrom .cmap_image_plot import CMapImagePlot\nfrom .contour_line_plot import ContourLinePlot\nfrom .contour_poly_plot import ContourPolyPlot\nfrom .lineplot import LinePlot\nfrom .colormapped_scatterplot import ColormappedScatterPlot\nfrom .colormapped_selection_overlay import ColormappedSelectionOverlay\nfrom .polygon_plot import PolygonPlot\nfrom .errorbar_plot import ErrorBarPlot\nfrom .filled_line_plot import FilledLinePlot\nfrom .quiverplot import QuiverPlot\nfrom .candle_plot import CandlePlot\nfrom .multi_line_plot import MultiLinePlot\nfrom .jitterplot import JitterPlot\nfrom .variable_size_scatterplot import VariableSizeScatterPlot\nfrom .horizon_plot import BandedMapper, HorizonPlot\nfrom .scatterplot_1d import ScatterPlot1D\nfrom .line_scatterplot_1d import LineScatterPlot1D\nfrom .text_plot_1d import TextPlot1D\nfrom .segment_plot import SegmentPlot\nfrom .text_plot import TextPlot\n\n# Plot factories\nfrom .plot_factory import create_bar_plot, create_line_plot, create_scatter_plot, \\\n create_polar_plot, add_default_axes, add_default_grids\n\nfrom .abstract_plot_data import AbstractPlotData\nfrom .array_plot_data import ArrayPlotData\nfrom .data_frame_plot_data import DataFramePlotData\nfrom .plot import Plot\nfrom .toolbar_plot import ToolbarPlot\n\n# Axis\nfrom .axis import PlotAxis, MinorPlotAxis\nfrom .label_axis import LabelAxis\nfrom .ticks import AbstractTickGenerator, DefaultTickGenerator, auto_ticks, auto_interval, \\\n tick_intervals, log_auto_ticks, auto_bounds, calc_bound\n\n# Grid\nfrom .grid import PlotGrid\n\n# Tools\nfrom .abstract_controller import AbstractController\n\n# Importing various symbols into the Chaco namespace for backwards\n# compatibility. New code should directly import from Enable.\nfrom enable.base_tool import BaseTool, KeySpec\nfrom enable.markers import marker_trait\n\n#EOF\n", "path": "chaco/api.py"}], "after_files": [{"content": null, "path": "chaco/selectable_overlay_container.py"}, {"content": "\"\"\" Defines the publicly accessible items of the Chaco API.\n\"\"\"\n# This just imports the key datamodel classes into the top-level package\n# namespace for convenience.\n\nfrom .base import NumericalSequenceTrait, PointTrait, ImageTrait, DimensionTrait, \\\n SortOrderTrait, bin_search, reverse_map_1d, right_shift, \\\n left_shift, sort_points, find_runs, arg_find_runs, \\\n point_line_distance\n\n# Data model\nfrom .abstract_data_source import AbstractDataSource\nfrom .array_data_source import ArrayDataSource\nfrom .grid_data_source import GridDataSource\nfrom .image_data import ImageData\nfrom .multi_array_data_source import MultiArrayDataSource\nfrom .point_data_source import PointDataSource\nfrom .abstract_data_range import AbstractDataRange\nfrom .base_data_range import BaseDataRange\nfrom .data_range_1d import DataRange1D\nfrom .data_range_2d import DataRange2D\n\n# Mappers\nfrom .abstract_mapper import AbstractMapper\nfrom .base_1d_mapper import Base1DMapper\nfrom .grid_mapper import GridMapper\nfrom .log_mapper import LogMapper\nfrom .linear_mapper import LinearMapper\nfrom .color_mapper import ColorMapper, ColorMapTemplate\nfrom .discrete_color_mapper import DiscreteColorMapper\nfrom .transform_color_mapper import TransformColorMapper\n\n# Colormaps and color palettes\nfrom .default_colormaps import *\nfrom .default_colors import *\n\n# Visual components\nfrom .abstract_plot_renderer import AbstractPlotRenderer\nfrom .abstract_overlay import AbstractOverlay\nfrom .base_plot_container import BasePlotContainer\nfrom .base_plot_frame import BasePlotFrame\nfrom .cross_plot_frame import CrossPlotFrame\nfrom .data_view import DataView\nfrom .simple_plot_frame import SimplePlotFrame\nfrom .plot_component import PlotComponent\nfrom .plot_graphics_context import PlotGraphicsContext, PlotGraphicsContextMixin\nfrom .plot_containers import OverlayPlotContainer, HPlotContainer, VPlotContainer, \\\n GridPlotContainer\nGridContainer = GridPlotContainer\n\ntry:\n from .plot_containers import ConstraintsPlotContainer\nexcept ImportError:\n pass\n\nfrom .label import Label\nfrom .plot_label import PlotLabel\nfrom .legend import Legend\nfrom .tooltip import ToolTip\nfrom .data_label import DataLabel\nfrom .lasso_overlay import LassoOverlay\nfrom .color_bar import ColorBar\nfrom .text_box_overlay import TextBoxOverlay\nfrom .scatter_inspector_overlay import ScatterInspectorOverlay\n\n# Renderers\nfrom .barplot import BarPlot\nfrom .base_1d_plot import Base1DPlot\nfrom .base_2d_plot import Base2DPlot\nfrom .base_xy_plot import BaseXYPlot\nfrom .scatterplot import ScatterPlot, render_markers\nfrom .image_plot import ImagePlot\nfrom .cmap_image_plot import CMapImagePlot\nfrom .contour_line_plot import ContourLinePlot\nfrom .contour_poly_plot import ContourPolyPlot\nfrom .lineplot import LinePlot\nfrom .colormapped_scatterplot import ColormappedScatterPlot\nfrom .colormapped_selection_overlay import ColormappedSelectionOverlay\nfrom .polygon_plot import PolygonPlot\nfrom .errorbar_plot import ErrorBarPlot\nfrom .filled_line_plot import FilledLinePlot\nfrom .quiverplot import QuiverPlot\nfrom .candle_plot import CandlePlot\nfrom .multi_line_plot import MultiLinePlot\nfrom .jitterplot import JitterPlot\nfrom .variable_size_scatterplot import VariableSizeScatterPlot\nfrom .horizon_plot import BandedMapper, HorizonPlot\nfrom .scatterplot_1d import ScatterPlot1D\nfrom .line_scatterplot_1d import LineScatterPlot1D\nfrom .text_plot_1d import TextPlot1D\nfrom .segment_plot import SegmentPlot\nfrom .text_plot import TextPlot\n\n# Plot factories\nfrom .plot_factory import create_bar_plot, create_line_plot, create_scatter_plot, \\\n create_polar_plot, add_default_axes, add_default_grids\n\nfrom .abstract_plot_data import AbstractPlotData\nfrom .array_plot_data import ArrayPlotData\nfrom .data_frame_plot_data import DataFramePlotData\nfrom .plot import Plot\nfrom .toolbar_plot import ToolbarPlot\n\n# Axis\nfrom .axis import PlotAxis, MinorPlotAxis\nfrom .label_axis import LabelAxis\nfrom .ticks import AbstractTickGenerator, DefaultTickGenerator, auto_ticks, auto_interval, \\\n tick_intervals, log_auto_ticks, auto_bounds, calc_bound\n\n# Grid\nfrom .grid import PlotGrid\n\n# Tools\nfrom .abstract_controller import AbstractController\n\n# Importing various symbols into the Chaco namespace for backwards\n# compatibility. New code should directly import from Enable.\nfrom enable.base_tool import BaseTool, KeySpec\nfrom enable.markers import marker_trait\n\n#EOF\n", "path": "chaco/api.py"}]} | 2,386 | 849 |
gh_patches_debug_23077 | rasdani/github-patches | git_diff | PrefectHQ__prefect-1667 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Core docs language
https://docs.prefect.io/core/
In a few places the Core docs mention that Cloud is coming soon, and I believe the language should be updated.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/prefect/engine/cache_validators.py`
Content:
```
1 """
2 Cache validators are functions that determine if a task's output cache
3 is still valid, or whether that task should be re-run; they are provided at
4 Task creation via the `cache_validator` keyword argument (for more information
5 on instantiating Tasks see the [Task documentation](../core/task.html)).
6
7 Task caches are created at Task runtime if and only if the `cache_for` keyword
8 argument is provided to the Task, that specifies how long the output cache will be valid for
9 after its creation. Cache validators come into play when a cached Task is re-run,
10 and are used to determine whether to re-run the Task or use the cache.
11
12 Note that _all_ validators take into account cache expiration.
13
14 A cache validator returns `True` if the cache is still valid, and `False` otherwise.
15 """
16 from typing import Any, Callable, Dict, Iterable
17
18 import pendulum
19
20 import prefect
21
22
23 def never_use(
24 state: "prefect.engine.state.Cached",
25 inputs: Dict[str, Any],
26 parameters: Dict[str, Any],
27 ) -> bool:
28 """
29 Never uses the cache.
30
31 Args:
32 - state (State): a `Success` state from the last successful Task run that contains the cache
33 - inputs (dict): a `dict` of inputs that were available on the last
34 successful run of the cached Task
35 - parameters (dict): a `dict` of parameters that were available on the
36 last successful run of the cached Task
37
38 Returns:
39 - boolean specifying whether or not the cache should be used
40 """
41 return False
42
43
44 def duration_only(
45 state: "prefect.engine.state.Cached",
46 inputs: Dict[str, Any],
47 parameters: Dict[str, Any],
48 ) -> bool:
49 """
50 Validates the cache based only on cache expiration.
51
52 Args:
53 - state (State): a `Success` state from the last successful Task run that contains the cache
54 - inputs (dict): a `dict` of inputs that were available on the last
55 successful run of the cached Task
56 - parameters (dict): a `dict` of parameters that were available on the
57 last successful run of the cached Task
58
59 Returns:
60 - boolean specifying whether or not the cache should be used
61 """
62 if state.cached_result_expiration is None:
63 return True
64 elif state.cached_result_expiration > pendulum.now("utc"):
65 return True
66 else:
67 return False
68
69
70 def all_inputs(
71 state: "prefect.engine.state.Cached",
72 inputs: Dict[str, Any],
73 parameters: Dict[str, Any],
74 ) -> bool:
75 """
76 Validates the cache based on cache expiration _and_ all inputs that were provided
77 on the last successful run.
78
79 Args:
80 - state (State): a `Success` state from the last successful Task run that contains the cache
81 - inputs (dict): a `dict` of inputs that were available on the last
82 successful run of the cached Task
83 - parameters (dict): a `dict` of parameters that were available on the
84 last successful run of the cached Task
85
86 Returns:
87 - boolean specifying whether or not the cache should be used
88 """
89 if duration_only(state, inputs, parameters) is False:
90 return False
91 elif {key: res.value for key, res in (state.cached_inputs or {}).items()} == inputs:
92 return True
93 else:
94 return False
95
96
97 def all_parameters(
98 state: "prefect.engine.state.Cached",
99 inputs: Dict[str, Any],
100 parameters: Dict[str, Any],
101 ) -> bool:
102 """
103 Validates the cache based on cache expiration _and_ all parameters that were provided
104 on the last successful run.
105
106 Args:
107 - state (State): a `Success` state from the last successful Task run that contains the cache
108 - inputs (dict): a `dict` of inputs that were available on the last
109 successful run of the cached Task
110 - parameters (dict): a `dict` of parameters that were available on the
111 last successful run of the cached Task
112
113 Returns:
114 - boolean specifying whether or not the cache should be used
115 """
116 if duration_only(state, inputs, parameters) is False:
117 return False
118 elif state.cached_parameters == parameters:
119 return True
120 else:
121 return False
122
123
124 def partial_parameters_only(validate_on: Iterable[str] = None,) -> Callable:
125 """
126 Validates the cache based on cache expiration _and_ a subset of parameters (determined by the
127 `validate_on` keyword) that were provided on the last successful run.
128
129 Args:
130 - validate_on (list): a `list` of strings specifying the parameter names
131 to validate against
132
133 Returns:
134 - Callable: the actual validation function specifying whether or not the cache should be used
135
136 Example:
137 ```python
138 from datetime import timedelta
139 import pendulum
140 from prefect import Flow, Parameter, task
141 from prefect.engine.cache_validators import partial_parameters_only
142
143 @task(cache_for=timedelta(days=1),
144 cache_validator=partial_parameters_only(validate_on=['nrows']))
145 def daily_db_refresh(nrows, runtime):
146 pass
147
148 with Flow("My Flow") as f:
149 nrows = Parameter("nrows", default=500)
150 runtime = Parameter("runtime")
151 db_state = daily_db_refresh(nrows, runtime)
152
153 state1 = f.run(parameters=dict(nrows=1000, runtime=pendulum.now('utc')))
154
155 ## the second run will use the cache contained within state1.result[db_state]
156 ## even though `runtime` has changed
157 state2 = f.run(parameters=dict(nrows=1000, runtime=pendulum.now('utc')),
158 task_states={result: state1.result[db_state]})
159 ```
160 """
161
162 def _partial_parameters_only(
163 state: "prefect.engine.state.Cached",
164 inputs: Dict[str, Any],
165 parameters: Dict[str, Any],
166 ) -> bool:
167 """
168 The actual cache validation function that will be used.
169
170 Args:
171 - state (State): a `Success` state from the last successful Task run that contains the cache
172 - inputs (dict): a `dict` of inputs that were available on the last
173 successful run of the cached Task
174 - parameters (dict): a `dict` of parameters that were available on the
175 last successful run of the cached Task
176
177 Returns:
178 - boolean specifying whether or not the cache should be used
179 """
180 parameters = parameters or {}
181 if duration_only(state, inputs, parameters) is False:
182 return False
183 elif validate_on is None:
184 return (
185 True
186 ) # if you dont want to validate on anything, then the cache is valid
187 else:
188 cached = state.cached_parameters or {}
189 partial_provided = {
190 key: value for key, value in parameters.items() if key in validate_on
191 }
192 partial_needed = {
193 key: value for key, value in cached.items() if key in validate_on
194 }
195 return partial_provided == partial_needed
196
197 return _partial_parameters_only
198
199
200 def partial_inputs_only(validate_on: Iterable[str] = None,) -> Callable:
201 """
202 Validates the cache based on cache expiration _and_ a subset of inputs (determined by the
203 `validate_on` keyword) that were provided on the last successful run.
204
205 Args:
206 - validate_on (list): a `list` of strings specifying the input names
207 to validate against
208
209 Returns:
210 - Callable: the actual validation function specifying whether or not the cache should be used
211
212 Example:
213 ```python
214 import random
215 from datetime import timedelta
216 from prefect import Flow, task
217 from prefect.engine.cache_validators import partial_inputs_only
218
219 @task(cache_for=timedelta(days=1),
220 cache_validator=partial_inputs_only(validate_on=['x', 'y']))
221 def add(x, y, as_string=False):
222 if as_string:
223 return '{0} + {1}'.format(x, y)
224 return x + y
225
226 @task
227 def rand_bool():
228 return random.random() > 0.5
229
230 with Flow("My Flow") as f:
231 ans = add(1, 2, rand_bool())
232
233 state1 = f.run()
234 ## the second run will use the cache contained within state1.result[ans]
235 ## even though `rand_bool` might change
236 state2 = f.run(task_states={result: state1.result[ans]})
237 ```
238 """
239
240 def _partial_inputs_only(
241 state: "prefect.engine.state.Cached",
242 inputs: Dict[str, Any],
243 parameters: Dict[str, Any],
244 ) -> bool:
245 """
246 The actual cache validation function that will be used.
247
248 Args:
249 - state (State): a `Success` state from the last successful Task run that contains the cache
250 - inputs (dict): a `dict` of inputs that were available on the last
251 successful run of the cached Task
252 - parameters (dict): a `dict` of parameters that were available on the
253 last successful run of the cached Task
254
255 Returns:
256 - boolean specifying whether or not the cache should be used
257 """
258 inputs = inputs or {}
259 if duration_only(state, inputs, parameters) is False:
260 return False
261 elif validate_on is None:
262 return (
263 True
264 ) # if you dont want to validate on anything, then the cache is valid
265 else:
266 cached = {
267 key: res.value for key, res in (state.cached_inputs or {}).items()
268 }
269 partial_provided = {
270 key: value for key, value in inputs.items() if key in validate_on
271 }
272 partial_needed = {
273 key: value for key, value in cached.items() if key in validate_on
274 }
275 return partial_provided == partial_needed
276
277 return _partial_inputs_only
278
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/prefect/engine/cache_validators.py b/src/prefect/engine/cache_validators.py
--- a/src/prefect/engine/cache_validators.py
+++ b/src/prefect/engine/cache_validators.py
@@ -181,9 +181,7 @@
if duration_only(state, inputs, parameters) is False:
return False
elif validate_on is None:
- return (
- True
- ) # if you dont want to validate on anything, then the cache is valid
+ return True # if you dont want to validate on anything, then the cache is valid
else:
cached = state.cached_parameters or {}
partial_provided = {
@@ -259,9 +257,7 @@
if duration_only(state, inputs, parameters) is False:
return False
elif validate_on is None:
- return (
- True
- ) # if you dont want to validate on anything, then the cache is valid
+ return True # if you dont want to validate on anything, then the cache is valid
else:
cached = {
key: res.value for key, res in (state.cached_inputs or {}).items()
| {"golden_diff": "diff --git a/src/prefect/engine/cache_validators.py b/src/prefect/engine/cache_validators.py\n--- a/src/prefect/engine/cache_validators.py\n+++ b/src/prefect/engine/cache_validators.py\n@@ -181,9 +181,7 @@\n if duration_only(state, inputs, parameters) is False:\n return False\n elif validate_on is None:\n- return (\n- True\n- ) # if you dont want to validate on anything, then the cache is valid\n+ return True # if you dont want to validate on anything, then the cache is valid\n else:\n cached = state.cached_parameters or {}\n partial_provided = {\n@@ -259,9 +257,7 @@\n if duration_only(state, inputs, parameters) is False:\n return False\n elif validate_on is None:\n- return (\n- True\n- ) # if you dont want to validate on anything, then the cache is valid\n+ return True # if you dont want to validate on anything, then the cache is valid\n else:\n cached = {\n key: res.value for key, res in (state.cached_inputs or {}).items()\n", "issue": "Core docs language\nhttps://docs.prefect.io/core/\r\n\r\nIn a few places the Core docs mention that Cloud is coming soon, and I believe the language should be updated.\n", "before_files": [{"content": "\"\"\"\nCache validators are functions that determine if a task's output cache\nis still valid, or whether that task should be re-run; they are provided at\nTask creation via the `cache_validator` keyword argument (for more information\non instantiating Tasks see the [Task documentation](../core/task.html)).\n\nTask caches are created at Task runtime if and only if the `cache_for` keyword\nargument is provided to the Task, that specifies how long the output cache will be valid for\nafter its creation. Cache validators come into play when a cached Task is re-run,\nand are used to determine whether to re-run the Task or use the cache.\n\nNote that _all_ validators take into account cache expiration.\n\nA cache validator returns `True` if the cache is still valid, and `False` otherwise.\n\"\"\"\nfrom typing import Any, Callable, Dict, Iterable\n\nimport pendulum\n\nimport prefect\n\n\ndef never_use(\n state: \"prefect.engine.state.Cached\",\n inputs: Dict[str, Any],\n parameters: Dict[str, Any],\n) -> bool:\n \"\"\"\n Never uses the cache.\n\n Args:\n - state (State): a `Success` state from the last successful Task run that contains the cache\n - inputs (dict): a `dict` of inputs that were available on the last\n successful run of the cached Task\n - parameters (dict): a `dict` of parameters that were available on the\n last successful run of the cached Task\n\n Returns:\n - boolean specifying whether or not the cache should be used\n \"\"\"\n return False\n\n\ndef duration_only(\n state: \"prefect.engine.state.Cached\",\n inputs: Dict[str, Any],\n parameters: Dict[str, Any],\n) -> bool:\n \"\"\"\n Validates the cache based only on cache expiration.\n\n Args:\n - state (State): a `Success` state from the last successful Task run that contains the cache\n - inputs (dict): a `dict` of inputs that were available on the last\n successful run of the cached Task\n - parameters (dict): a `dict` of parameters that were available on the\n last successful run of the cached Task\n\n Returns:\n - boolean specifying whether or not the cache should be used\n \"\"\"\n if state.cached_result_expiration is None:\n return True\n elif state.cached_result_expiration > pendulum.now(\"utc\"):\n return True\n else:\n return False\n\n\ndef all_inputs(\n state: \"prefect.engine.state.Cached\",\n inputs: Dict[str, Any],\n parameters: Dict[str, Any],\n) -> bool:\n \"\"\"\n Validates the cache based on cache expiration _and_ all inputs that were provided\n on the last successful run.\n\n Args:\n - state (State): a `Success` state from the last successful Task run that contains the cache\n - inputs (dict): a `dict` of inputs that were available on the last\n successful run of the cached Task\n - parameters (dict): a `dict` of parameters that were available on the\n last successful run of the cached Task\n\n Returns:\n - boolean specifying whether or not the cache should be used\n \"\"\"\n if duration_only(state, inputs, parameters) is False:\n return False\n elif {key: res.value for key, res in (state.cached_inputs or {}).items()} == inputs:\n return True\n else:\n return False\n\n\ndef all_parameters(\n state: \"prefect.engine.state.Cached\",\n inputs: Dict[str, Any],\n parameters: Dict[str, Any],\n) -> bool:\n \"\"\"\n Validates the cache based on cache expiration _and_ all parameters that were provided\n on the last successful run.\n\n Args:\n - state (State): a `Success` state from the last successful Task run that contains the cache\n - inputs (dict): a `dict` of inputs that were available on the last\n successful run of the cached Task\n - parameters (dict): a `dict` of parameters that were available on the\n last successful run of the cached Task\n\n Returns:\n - boolean specifying whether or not the cache should be used\n \"\"\"\n if duration_only(state, inputs, parameters) is False:\n return False\n elif state.cached_parameters == parameters:\n return True\n else:\n return False\n\n\ndef partial_parameters_only(validate_on: Iterable[str] = None,) -> Callable:\n \"\"\"\n Validates the cache based on cache expiration _and_ a subset of parameters (determined by the\n `validate_on` keyword) that were provided on the last successful run.\n\n Args:\n - validate_on (list): a `list` of strings specifying the parameter names\n to validate against\n\n Returns:\n - Callable: the actual validation function specifying whether or not the cache should be used\n\n Example:\n ```python\n from datetime import timedelta\n import pendulum\n from prefect import Flow, Parameter, task\n from prefect.engine.cache_validators import partial_parameters_only\n\n @task(cache_for=timedelta(days=1),\n cache_validator=partial_parameters_only(validate_on=['nrows']))\n def daily_db_refresh(nrows, runtime):\n pass\n\n with Flow(\"My Flow\") as f:\n nrows = Parameter(\"nrows\", default=500)\n runtime = Parameter(\"runtime\")\n db_state = daily_db_refresh(nrows, runtime)\n\n state1 = f.run(parameters=dict(nrows=1000, runtime=pendulum.now('utc')))\n\n ## the second run will use the cache contained within state1.result[db_state]\n ## even though `runtime` has changed\n state2 = f.run(parameters=dict(nrows=1000, runtime=pendulum.now('utc')),\n task_states={result: state1.result[db_state]})\n ```\n \"\"\"\n\n def _partial_parameters_only(\n state: \"prefect.engine.state.Cached\",\n inputs: Dict[str, Any],\n parameters: Dict[str, Any],\n ) -> bool:\n \"\"\"\n The actual cache validation function that will be used.\n\n Args:\n - state (State): a `Success` state from the last successful Task run that contains the cache\n - inputs (dict): a `dict` of inputs that were available on the last\n successful run of the cached Task\n - parameters (dict): a `dict` of parameters that were available on the\n last successful run of the cached Task\n\n Returns:\n - boolean specifying whether or not the cache should be used\n \"\"\"\n parameters = parameters or {}\n if duration_only(state, inputs, parameters) is False:\n return False\n elif validate_on is None:\n return (\n True\n ) # if you dont want to validate on anything, then the cache is valid\n else:\n cached = state.cached_parameters or {}\n partial_provided = {\n key: value for key, value in parameters.items() if key in validate_on\n }\n partial_needed = {\n key: value for key, value in cached.items() if key in validate_on\n }\n return partial_provided == partial_needed\n\n return _partial_parameters_only\n\n\ndef partial_inputs_only(validate_on: Iterable[str] = None,) -> Callable:\n \"\"\"\n Validates the cache based on cache expiration _and_ a subset of inputs (determined by the\n `validate_on` keyword) that were provided on the last successful run.\n\n Args:\n - validate_on (list): a `list` of strings specifying the input names\n to validate against\n\n Returns:\n - Callable: the actual validation function specifying whether or not the cache should be used\n\n Example:\n ```python\n import random\n from datetime import timedelta\n from prefect import Flow, task\n from prefect.engine.cache_validators import partial_inputs_only\n\n @task(cache_for=timedelta(days=1),\n cache_validator=partial_inputs_only(validate_on=['x', 'y']))\n def add(x, y, as_string=False):\n if as_string:\n return '{0} + {1}'.format(x, y)\n return x + y\n\n @task\n def rand_bool():\n return random.random() > 0.5\n\n with Flow(\"My Flow\") as f:\n ans = add(1, 2, rand_bool())\n\n state1 = f.run()\n ## the second run will use the cache contained within state1.result[ans]\n ## even though `rand_bool` might change\n state2 = f.run(task_states={result: state1.result[ans]})\n ```\n \"\"\"\n\n def _partial_inputs_only(\n state: \"prefect.engine.state.Cached\",\n inputs: Dict[str, Any],\n parameters: Dict[str, Any],\n ) -> bool:\n \"\"\"\n The actual cache validation function that will be used.\n\n Args:\n - state (State): a `Success` state from the last successful Task run that contains the cache\n - inputs (dict): a `dict` of inputs that were available on the last\n successful run of the cached Task\n - parameters (dict): a `dict` of parameters that were available on the\n last successful run of the cached Task\n\n Returns:\n - boolean specifying whether or not the cache should be used\n \"\"\"\n inputs = inputs or {}\n if duration_only(state, inputs, parameters) is False:\n return False\n elif validate_on is None:\n return (\n True\n ) # if you dont want to validate on anything, then the cache is valid\n else:\n cached = {\n key: res.value for key, res in (state.cached_inputs or {}).items()\n }\n partial_provided = {\n key: value for key, value in inputs.items() if key in validate_on\n }\n partial_needed = {\n key: value for key, value in cached.items() if key in validate_on\n }\n return partial_provided == partial_needed\n\n return _partial_inputs_only\n", "path": "src/prefect/engine/cache_validators.py"}], "after_files": [{"content": "\"\"\"\nCache validators are functions that determine if a task's output cache\nis still valid, or whether that task should be re-run; they are provided at\nTask creation via the `cache_validator` keyword argument (for more information\non instantiating Tasks see the [Task documentation](../core/task.html)).\n\nTask caches are created at Task runtime if and only if the `cache_for` keyword\nargument is provided to the Task, that specifies how long the output cache will be valid for\nafter its creation. Cache validators come into play when a cached Task is re-run,\nand are used to determine whether to re-run the Task or use the cache.\n\nNote that _all_ validators take into account cache expiration.\n\nA cache validator returns `True` if the cache is still valid, and `False` otherwise.\n\"\"\"\nfrom typing import Any, Callable, Dict, Iterable\n\nimport pendulum\n\nimport prefect\n\n\ndef never_use(\n state: \"prefect.engine.state.Cached\",\n inputs: Dict[str, Any],\n parameters: Dict[str, Any],\n) -> bool:\n \"\"\"\n Never uses the cache.\n\n Args:\n - state (State): a `Success` state from the last successful Task run that contains the cache\n - inputs (dict): a `dict` of inputs that were available on the last\n successful run of the cached Task\n - parameters (dict): a `dict` of parameters that were available on the\n last successful run of the cached Task\n\n Returns:\n - boolean specifying whether or not the cache should be used\n \"\"\"\n return False\n\n\ndef duration_only(\n state: \"prefect.engine.state.Cached\",\n inputs: Dict[str, Any],\n parameters: Dict[str, Any],\n) -> bool:\n \"\"\"\n Validates the cache based only on cache expiration.\n\n Args:\n - state (State): a `Success` state from the last successful Task run that contains the cache\n - inputs (dict): a `dict` of inputs that were available on the last\n successful run of the cached Task\n - parameters (dict): a `dict` of parameters that were available on the\n last successful run of the cached Task\n\n Returns:\n - boolean specifying whether or not the cache should be used\n \"\"\"\n if state.cached_result_expiration is None:\n return True\n elif state.cached_result_expiration > pendulum.now(\"utc\"):\n return True\n else:\n return False\n\n\ndef all_inputs(\n state: \"prefect.engine.state.Cached\",\n inputs: Dict[str, Any],\n parameters: Dict[str, Any],\n) -> bool:\n \"\"\"\n Validates the cache based on cache expiration _and_ all inputs that were provided\n on the last successful run.\n\n Args:\n - state (State): a `Success` state from the last successful Task run that contains the cache\n - inputs (dict): a `dict` of inputs that were available on the last\n successful run of the cached Task\n - parameters (dict): a `dict` of parameters that were available on the\n last successful run of the cached Task\n\n Returns:\n - boolean specifying whether or not the cache should be used\n \"\"\"\n if duration_only(state, inputs, parameters) is False:\n return False\n elif {key: res.value for key, res in (state.cached_inputs or {}).items()} == inputs:\n return True\n else:\n return False\n\n\ndef all_parameters(\n state: \"prefect.engine.state.Cached\",\n inputs: Dict[str, Any],\n parameters: Dict[str, Any],\n) -> bool:\n \"\"\"\n Validates the cache based on cache expiration _and_ all parameters that were provided\n on the last successful run.\n\n Args:\n - state (State): a `Success` state from the last successful Task run that contains the cache\n - inputs (dict): a `dict` of inputs that were available on the last\n successful run of the cached Task\n - parameters (dict): a `dict` of parameters that were available on the\n last successful run of the cached Task\n\n Returns:\n - boolean specifying whether or not the cache should be used\n \"\"\"\n if duration_only(state, inputs, parameters) is False:\n return False\n elif state.cached_parameters == parameters:\n return True\n else:\n return False\n\n\ndef partial_parameters_only(validate_on: Iterable[str] = None,) -> Callable:\n \"\"\"\n Validates the cache based on cache expiration _and_ a subset of parameters (determined by the\n `validate_on` keyword) that were provided on the last successful run.\n\n Args:\n - validate_on (list): a `list` of strings specifying the parameter names\n to validate against\n\n Returns:\n - Callable: the actual validation function specifying whether or not the cache should be used\n\n Example:\n ```python\n from datetime import timedelta\n import pendulum\n from prefect import Flow, Parameter, task\n from prefect.engine.cache_validators import partial_parameters_only\n\n @task(cache_for=timedelta(days=1),\n cache_validator=partial_parameters_only(validate_on=['nrows']))\n def daily_db_refresh(nrows, runtime):\n pass\n\n with Flow(\"My Flow\") as f:\n nrows = Parameter(\"nrows\", default=500)\n runtime = Parameter(\"runtime\")\n db_state = daily_db_refresh(nrows, runtime)\n\n state1 = f.run(parameters=dict(nrows=1000, runtime=pendulum.now('utc')))\n\n ## the second run will use the cache contained within state1.result[db_state]\n ## even though `runtime` has changed\n state2 = f.run(parameters=dict(nrows=1000, runtime=pendulum.now('utc')),\n task_states={result: state1.result[db_state]})\n ```\n \"\"\"\n\n def _partial_parameters_only(\n state: \"prefect.engine.state.Cached\",\n inputs: Dict[str, Any],\n parameters: Dict[str, Any],\n ) -> bool:\n \"\"\"\n The actual cache validation function that will be used.\n\n Args:\n - state (State): a `Success` state from the last successful Task run that contains the cache\n - inputs (dict): a `dict` of inputs that were available on the last\n successful run of the cached Task\n - parameters (dict): a `dict` of parameters that were available on the\n last successful run of the cached Task\n\n Returns:\n - boolean specifying whether or not the cache should be used\n \"\"\"\n parameters = parameters or {}\n if duration_only(state, inputs, parameters) is False:\n return False\n elif validate_on is None:\n return True # if you dont want to validate on anything, then the cache is valid\n else:\n cached = state.cached_parameters or {}\n partial_provided = {\n key: value for key, value in parameters.items() if key in validate_on\n }\n partial_needed = {\n key: value for key, value in cached.items() if key in validate_on\n }\n return partial_provided == partial_needed\n\n return _partial_parameters_only\n\n\ndef partial_inputs_only(validate_on: Iterable[str] = None,) -> Callable:\n \"\"\"\n Validates the cache based on cache expiration _and_ a subset of inputs (determined by the\n `validate_on` keyword) that were provided on the last successful run.\n\n Args:\n - validate_on (list): a `list` of strings specifying the input names\n to validate against\n\n Returns:\n - Callable: the actual validation function specifying whether or not the cache should be used\n\n Example:\n ```python\n import random\n from datetime import timedelta\n from prefect import Flow, task\n from prefect.engine.cache_validators import partial_inputs_only\n\n @task(cache_for=timedelta(days=1),\n cache_validator=partial_inputs_only(validate_on=['x', 'y']))\n def add(x, y, as_string=False):\n if as_string:\n return '{0} + {1}'.format(x, y)\n return x + y\n\n @task\n def rand_bool():\n return random.random() > 0.5\n\n with Flow(\"My Flow\") as f:\n ans = add(1, 2, rand_bool())\n\n state1 = f.run()\n ## the second run will use the cache contained within state1.result[ans]\n ## even though `rand_bool` might change\n state2 = f.run(task_states={result: state1.result[ans]})\n ```\n \"\"\"\n\n def _partial_inputs_only(\n state: \"prefect.engine.state.Cached\",\n inputs: Dict[str, Any],\n parameters: Dict[str, Any],\n ) -> bool:\n \"\"\"\n The actual cache validation function that will be used.\n\n Args:\n - state (State): a `Success` state from the last successful Task run that contains the cache\n - inputs (dict): a `dict` of inputs that were available on the last\n successful run of the cached Task\n - parameters (dict): a `dict` of parameters that were available on the\n last successful run of the cached Task\n\n Returns:\n - boolean specifying whether or not the cache should be used\n \"\"\"\n inputs = inputs or {}\n if duration_only(state, inputs, parameters) is False:\n return False\n elif validate_on is None:\n return True # if you dont want to validate on anything, then the cache is valid\n else:\n cached = {\n key: res.value for key, res in (state.cached_inputs or {}).items()\n }\n partial_provided = {\n key: value for key, value in inputs.items() if key in validate_on\n }\n partial_needed = {\n key: value for key, value in cached.items() if key in validate_on\n }\n return partial_provided == partial_needed\n\n return _partial_inputs_only\n", "path": "src/prefect/engine/cache_validators.py"}]} | 3,188 | 266 |
gh_patches_debug_8910 | rasdani/github-patches | git_diff | boto__boto-3682 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Circular Import Error with boto.vendored.regions.regions
as part of using boto with CircleCI in context of deployment (using CodeDeploy) seeing the following error:
```
File "/usr/local/lib/python2.7/dist-packages/boto/__init__.py", line 28, in <module>
from boto.storage_uri import BucketStorageUri, FileStorageUri
File "/usr/local/lib/python2.7/dist-packages/boto/storage_uri.py", line 27, in <module>
from boto.s3.deletemarker import DeleteMarker
File "/usr/local/lib/python2.7/dist-packages/boto/s3/__init__.py", line 26, in <module>
from boto.regioninfo import RegionInfo, get_regions
File "/usr/local/lib/python2.7/dist-packages/boto/regioninfo.py", line 28, in <module>
from boto.endpoints import BotoEndpointResolver
File "/usr/local/lib/python2.7/dist-packages/boto/endpoints.py", line 13, in <module>
import boto.vendored.regions.regions as _regions
ImportError: No module named regions.regions
```
Trying to perform the above import in IPython manually reveals what I believe is a circular dependency import which is probably the core problem:
```
In [2]: from boto.vendored.regions import regions as _regions
---------------------------------------------------------------------------
ImportError Traceback (most recent call last)
<ipython-input-2-6327378203c8> in <module>()
----> 1 from boto.vendored.regions import regions as _regions
/Users/adamhadani/.virtualenvs/boto-env/lib/python2.7/site-packages/boto/__init__.py in <module>()
26 #
27 from boto.pyami.config import Config, BotoConfigLocations
---> 28 from boto.storage_uri import BucketStorageUri, FileStorageUri
29 import boto.plugin
30 import datetime
/Users/adamhadani/.virtualenvs/boto-env/lib/python2.7/site-packages/boto/storage_uri.py in <module>()
25 import sys
26 import textwrap
---> 27 from boto.s3.deletemarker import DeleteMarker
28 from boto.exception import BotoClientError
29 from boto.exception import InvalidUriError
/Users/adamhadani/.virtualenvs/boto-env/lib/python2.7/site-packages/boto/s3/__init__.py in <module>()
24 #
25
---> 26 from boto.regioninfo import RegionInfo, get_regions
27 from boto.regioninfo import connect
28
/Users/adamhadani/.virtualenvs/boto-env/lib/python2.7/site-packages/boto/regioninfo.py in <module>()
26 from boto.compat import json
27 from boto.exception import BotoClientError
---> 28 from boto.endpoints import BotoEndpointResolver
29 from boto.endpoints import StaticEndpointBuilder
30
/Users/adamhadani/.virtualenvs/boto-env/lib/python2.7/site-packages/boto/endpoints.py in <module>()
11 # ANY KIND, either express or implied. See the License for the specific
12 # language governing permissions and limitations under the License.
---> 13 import boto.vendored.regions.regions as _regions
14
15
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2
3 # Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
4 # Copyright (c) 2010, Eucalyptus Systems, Inc.
5 # All rights reserved.
6 #
7 # Permission is hereby granted, free of charge, to any person obtaining a
8 # copy of this software and associated documentation files (the
9 # "Software"), to deal in the Software without restriction, including
10 # without limitation the rights to use, copy, modify, merge, publish, dis-
11 # tribute, sublicense, and/or sell copies of the Software, and to permit
12 # persons to whom the Software is furnished to do so, subject to the fol-
13 # lowing conditions:
14 #
15 # The above copyright notice and this permission notice shall be included
16 # in all copies or substantial portions of the Software.
17 #
18 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
20 # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
21 # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
22 # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 # IN THE SOFTWARE.
25
26 from __future__ import print_function
27
28 try:
29 from setuptools import setup
30 extra = dict(test_suite="tests.test.suite", include_package_data=True)
31 except ImportError:
32 from distutils.core import setup
33 extra = {}
34
35 import sys
36
37 from boto import __version__
38
39 if sys.version_info <= (2, 5):
40 error = "ERROR: boto requires Python Version 2.6 or above...exiting."
41 print(error, file=sys.stderr)
42 sys.exit(1)
43
44 def readme():
45 with open("README.rst") as f:
46 return f.read()
47
48 setup(name = "boto",
49 version = __version__,
50 description = "Amazon Web Services Library",
51 long_description = readme(),
52 author = "Mitch Garnaat",
53 author_email = "[email protected]",
54 scripts = ["bin/sdbadmin", "bin/elbadmin", "bin/cfadmin",
55 "bin/s3put", "bin/fetch_file", "bin/launch_instance",
56 "bin/list_instances", "bin/taskadmin", "bin/kill_instance",
57 "bin/bundle_image", "bin/pyami_sendmail", "bin/lss3",
58 "bin/cq", "bin/route53", "bin/cwutil", "bin/instance_events",
59 "bin/asadmin", "bin/glacier", "bin/mturk",
60 "bin/dynamodb_dump", "bin/dynamodb_load"],
61 url = "https://github.com/boto/boto/",
62 packages = ["boto", "boto.sqs", "boto.s3", "boto.gs", "boto.file",
63 "boto.ec2", "boto.ec2.cloudwatch", "boto.ec2.autoscale",
64 "boto.ec2.elb", "boto.sdb", "boto.cacerts",
65 "boto.sdb.db", "boto.sdb.db.manager",
66 "boto.mturk", "boto.pyami",
67 "boto.pyami.installers", "boto.pyami.installers.ubuntu",
68 "boto.mashups", "boto.contrib", "boto.manage",
69 "boto.services", "boto.cloudfront",
70 "boto.roboto", "boto.rds", "boto.vpc", "boto.fps",
71 "boto.fps", "boto.emr", "boto.emr", "boto.sns",
72 "boto.ecs", "boto.iam", "boto.route53", "boto.ses",
73 "boto.cloudformation", "boto.sts", "boto.dynamodb",
74 "boto.swf", "boto.mws", "boto.cloudsearch", "boto.glacier",
75 "boto.beanstalk", "boto.datapipeline", "boto.elasticache",
76 "boto.elastictranscoder", "boto.opsworks", "boto.redshift",
77 "boto.dynamodb2", "boto.support", "boto.cloudtrail",
78 "boto.directconnect", "boto.kinesis", "boto.rds2",
79 "boto.cloudsearch2", "boto.logs", "boto.vendored",
80 "boto.route53.domains", "boto.cognito",
81 "boto.cognito.identity", "boto.cognito.sync",
82 "boto.cloudsearchdomain", "boto.kms",
83 "boto.awslambda", "boto.codedeploy", "boto.configservice",
84 "boto.cloudhsm", "boto.ec2containerservice",
85 "boto.machinelearning"],
86 package_data = {
87 "boto.cacerts": ["cacerts.txt"],
88 "boto": ["endpoints.json"],
89 },
90 license = "MIT",
91 platforms = "Posix; MacOS X; Windows",
92 classifiers = ["Development Status :: 5 - Production/Stable",
93 "Intended Audience :: Developers",
94 "License :: OSI Approved :: MIT License",
95 "Operating System :: OS Independent",
96 "Topic :: Internet",
97 "Programming Language :: Python :: 2",
98 "Programming Language :: Python :: 2.6",
99 "Programming Language :: Python :: 2.7",
100 "Programming Language :: Python :: 3",
101 "Programming Language :: Python :: 3.3",
102 "Programming Language :: Python :: 3.4"],
103 **extra
104 )
105
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -82,7 +82,7 @@
"boto.cloudsearchdomain", "boto.kms",
"boto.awslambda", "boto.codedeploy", "boto.configservice",
"boto.cloudhsm", "boto.ec2containerservice",
- "boto.machinelearning"],
+ "boto.machinelearning", "boto.vendored.regions"],
package_data = {
"boto.cacerts": ["cacerts.txt"],
"boto": ["endpoints.json"],
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -82,7 +82,7 @@\n \"boto.cloudsearchdomain\", \"boto.kms\",\n \"boto.awslambda\", \"boto.codedeploy\", \"boto.configservice\",\n \"boto.cloudhsm\", \"boto.ec2containerservice\",\n- \"boto.machinelearning\"],\n+ \"boto.machinelearning\", \"boto.vendored.regions\"],\n package_data = {\n \"boto.cacerts\": [\"cacerts.txt\"],\n \"boto\": [\"endpoints.json\"],\n", "issue": "Circular Import Error with boto.vendored.regions.regions\nas part of using boto with CircleCI in context of deployment (using CodeDeploy) seeing the following error:\r\n\r\n``` \r\n File \"/usr/local/lib/python2.7/dist-packages/boto/__init__.py\", line 28, in <module>\r\n from boto.storage_uri import BucketStorageUri, FileStorageUri\r\n File \"/usr/local/lib/python2.7/dist-packages/boto/storage_uri.py\", line 27, in <module>\r\n from boto.s3.deletemarker import DeleteMarker\r\n File \"/usr/local/lib/python2.7/dist-packages/boto/s3/__init__.py\", line 26, in <module>\r\n from boto.regioninfo import RegionInfo, get_regions\r\n File \"/usr/local/lib/python2.7/dist-packages/boto/regioninfo.py\", line 28, in <module>\r\n from boto.endpoints import BotoEndpointResolver\r\n File \"/usr/local/lib/python2.7/dist-packages/boto/endpoints.py\", line 13, in <module>\r\n import boto.vendored.regions.regions as _regions\r\nImportError: No module named regions.regions\r\n```\r\n\r\nTrying to perform the above import in IPython manually reveals what I believe is a circular dependency import which is probably the core problem:\r\n\r\n```\r\nIn [2]: from boto.vendored.regions import regions as _regions\r\n---------------------------------------------------------------------------\r\nImportError Traceback (most recent call last)\r\n<ipython-input-2-6327378203c8> in <module>()\r\n----> 1 from boto.vendored.regions import regions as _regions\r\n\r\n/Users/adamhadani/.virtualenvs/boto-env/lib/python2.7/site-packages/boto/__init__.py in <module>()\r\n 26 #\r\n 27 from boto.pyami.config import Config, BotoConfigLocations\r\n---> 28 from boto.storage_uri import BucketStorageUri, FileStorageUri\r\n 29 import boto.plugin\r\n 30 import datetime\r\n\r\n/Users/adamhadani/.virtualenvs/boto-env/lib/python2.7/site-packages/boto/storage_uri.py in <module>()\r\n 25 import sys\r\n 26 import textwrap\r\n---> 27 from boto.s3.deletemarker import DeleteMarker\r\n 28 from boto.exception import BotoClientError\r\n 29 from boto.exception import InvalidUriError\r\n\r\n/Users/adamhadani/.virtualenvs/boto-env/lib/python2.7/site-packages/boto/s3/__init__.py in <module>()\r\n 24 #\r\n 25\r\n---> 26 from boto.regioninfo import RegionInfo, get_regions\r\n 27 from boto.regioninfo import connect\r\n 28\r\n\r\n/Users/adamhadani/.virtualenvs/boto-env/lib/python2.7/site-packages/boto/regioninfo.py in <module>()\r\n 26 from boto.compat import json\r\n 27 from boto.exception import BotoClientError\r\n---> 28 from boto.endpoints import BotoEndpointResolver\r\n 29 from boto.endpoints import StaticEndpointBuilder\r\n 30\r\n\r\n/Users/adamhadani/.virtualenvs/boto-env/lib/python2.7/site-packages/boto/endpoints.py in <module>()\r\n 11 # ANY KIND, either express or implied. See the License for the specific\r\n 12 # language governing permissions and limitations under the License.\r\n---> 13 import boto.vendored.regions.regions as _regions\r\n 14\r\n 15\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python\n\n# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/\n# Copyright (c) 2010, Eucalyptus Systems, Inc.\n# All rights reserved.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish, dis-\n# tribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the fol-\n# lowing conditions:\n#\n# The above copyright notice and this permission notice shall be included\n# in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\n# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-\n# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT\n# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,\n# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n# IN THE SOFTWARE.\n\nfrom __future__ import print_function\n\ntry:\n from setuptools import setup\n extra = dict(test_suite=\"tests.test.suite\", include_package_data=True)\nexcept ImportError:\n from distutils.core import setup\n extra = {}\n\nimport sys\n\nfrom boto import __version__\n\nif sys.version_info <= (2, 5):\n error = \"ERROR: boto requires Python Version 2.6 or above...exiting.\"\n print(error, file=sys.stderr)\n sys.exit(1)\n\ndef readme():\n with open(\"README.rst\") as f:\n return f.read()\n\nsetup(name = \"boto\",\n version = __version__,\n description = \"Amazon Web Services Library\",\n long_description = readme(),\n author = \"Mitch Garnaat\",\n author_email = \"[email protected]\",\n scripts = [\"bin/sdbadmin\", \"bin/elbadmin\", \"bin/cfadmin\",\n \"bin/s3put\", \"bin/fetch_file\", \"bin/launch_instance\",\n \"bin/list_instances\", \"bin/taskadmin\", \"bin/kill_instance\",\n \"bin/bundle_image\", \"bin/pyami_sendmail\", \"bin/lss3\",\n \"bin/cq\", \"bin/route53\", \"bin/cwutil\", \"bin/instance_events\",\n \"bin/asadmin\", \"bin/glacier\", \"bin/mturk\",\n \"bin/dynamodb_dump\", \"bin/dynamodb_load\"],\n url = \"https://github.com/boto/boto/\",\n packages = [\"boto\", \"boto.sqs\", \"boto.s3\", \"boto.gs\", \"boto.file\",\n \"boto.ec2\", \"boto.ec2.cloudwatch\", \"boto.ec2.autoscale\",\n \"boto.ec2.elb\", \"boto.sdb\", \"boto.cacerts\",\n \"boto.sdb.db\", \"boto.sdb.db.manager\",\n \"boto.mturk\", \"boto.pyami\",\n \"boto.pyami.installers\", \"boto.pyami.installers.ubuntu\",\n \"boto.mashups\", \"boto.contrib\", \"boto.manage\",\n \"boto.services\", \"boto.cloudfront\",\n \"boto.roboto\", \"boto.rds\", \"boto.vpc\", \"boto.fps\",\n \"boto.fps\", \"boto.emr\", \"boto.emr\", \"boto.sns\",\n \"boto.ecs\", \"boto.iam\", \"boto.route53\", \"boto.ses\",\n \"boto.cloudformation\", \"boto.sts\", \"boto.dynamodb\",\n \"boto.swf\", \"boto.mws\", \"boto.cloudsearch\", \"boto.glacier\",\n \"boto.beanstalk\", \"boto.datapipeline\", \"boto.elasticache\",\n \"boto.elastictranscoder\", \"boto.opsworks\", \"boto.redshift\",\n \"boto.dynamodb2\", \"boto.support\", \"boto.cloudtrail\",\n \"boto.directconnect\", \"boto.kinesis\", \"boto.rds2\",\n \"boto.cloudsearch2\", \"boto.logs\", \"boto.vendored\",\n \"boto.route53.domains\", \"boto.cognito\",\n \"boto.cognito.identity\", \"boto.cognito.sync\",\n \"boto.cloudsearchdomain\", \"boto.kms\",\n \"boto.awslambda\", \"boto.codedeploy\", \"boto.configservice\",\n \"boto.cloudhsm\", \"boto.ec2containerservice\",\n \"boto.machinelearning\"],\n package_data = {\n \"boto.cacerts\": [\"cacerts.txt\"],\n \"boto\": [\"endpoints.json\"],\n },\n license = \"MIT\",\n platforms = \"Posix; MacOS X; Windows\",\n classifiers = [\"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.6\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.3\",\n \"Programming Language :: Python :: 3.4\"],\n **extra\n )\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\n# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/\n# Copyright (c) 2010, Eucalyptus Systems, Inc.\n# All rights reserved.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish, dis-\n# tribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the fol-\n# lowing conditions:\n#\n# The above copyright notice and this permission notice shall be included\n# in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\n# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-\n# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT\n# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,\n# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n# IN THE SOFTWARE.\n\nfrom __future__ import print_function\n\ntry:\n from setuptools import setup\n extra = dict(test_suite=\"tests.test.suite\", include_package_data=True)\nexcept ImportError:\n from distutils.core import setup\n extra = {}\n\nimport sys\n\nfrom boto import __version__\n\nif sys.version_info <= (2, 5):\n error = \"ERROR: boto requires Python Version 2.6 or above...exiting.\"\n print(error, file=sys.stderr)\n sys.exit(1)\n\ndef readme():\n with open(\"README.rst\") as f:\n return f.read()\n\nsetup(name = \"boto\",\n version = __version__,\n description = \"Amazon Web Services Library\",\n long_description = readme(),\n author = \"Mitch Garnaat\",\n author_email = \"[email protected]\",\n scripts = [\"bin/sdbadmin\", \"bin/elbadmin\", \"bin/cfadmin\",\n \"bin/s3put\", \"bin/fetch_file\", \"bin/launch_instance\",\n \"bin/list_instances\", \"bin/taskadmin\", \"bin/kill_instance\",\n \"bin/bundle_image\", \"bin/pyami_sendmail\", \"bin/lss3\",\n \"bin/cq\", \"bin/route53\", \"bin/cwutil\", \"bin/instance_events\",\n \"bin/asadmin\", \"bin/glacier\", \"bin/mturk\",\n \"bin/dynamodb_dump\", \"bin/dynamodb_load\"],\n url = \"https://github.com/boto/boto/\",\n packages = [\"boto\", \"boto.sqs\", \"boto.s3\", \"boto.gs\", \"boto.file\",\n \"boto.ec2\", \"boto.ec2.cloudwatch\", \"boto.ec2.autoscale\",\n \"boto.ec2.elb\", \"boto.sdb\", \"boto.cacerts\",\n \"boto.sdb.db\", \"boto.sdb.db.manager\",\n \"boto.mturk\", \"boto.pyami\",\n \"boto.pyami.installers\", \"boto.pyami.installers.ubuntu\",\n \"boto.mashups\", \"boto.contrib\", \"boto.manage\",\n \"boto.services\", \"boto.cloudfront\",\n \"boto.roboto\", \"boto.rds\", \"boto.vpc\", \"boto.fps\",\n \"boto.fps\", \"boto.emr\", \"boto.emr\", \"boto.sns\",\n \"boto.ecs\", \"boto.iam\", \"boto.route53\", \"boto.ses\",\n \"boto.cloudformation\", \"boto.sts\", \"boto.dynamodb\",\n \"boto.swf\", \"boto.mws\", \"boto.cloudsearch\", \"boto.glacier\",\n \"boto.beanstalk\", \"boto.datapipeline\", \"boto.elasticache\",\n \"boto.elastictranscoder\", \"boto.opsworks\", \"boto.redshift\",\n \"boto.dynamodb2\", \"boto.support\", \"boto.cloudtrail\",\n \"boto.directconnect\", \"boto.kinesis\", \"boto.rds2\",\n \"boto.cloudsearch2\", \"boto.logs\", \"boto.vendored\",\n \"boto.route53.domains\", \"boto.cognito\",\n \"boto.cognito.identity\", \"boto.cognito.sync\",\n \"boto.cloudsearchdomain\", \"boto.kms\",\n \"boto.awslambda\", \"boto.codedeploy\", \"boto.configservice\",\n \"boto.cloudhsm\", \"boto.ec2containerservice\",\n \"boto.machinelearning\", \"boto.vendored.regions\"],\n package_data = {\n \"boto.cacerts\": [\"cacerts.txt\"],\n \"boto\": [\"endpoints.json\"],\n },\n license = \"MIT\",\n platforms = \"Posix; MacOS X; Windows\",\n classifiers = [\"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.6\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.3\",\n \"Programming Language :: Python :: 3.4\"],\n **extra\n )\n", "path": "setup.py"}]} | 2,480 | 137 |
gh_patches_debug_9392 | rasdani/github-patches | git_diff | litestar-org__litestar-842 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
SQLAlchemy Session backend uses wrong column type in `SessionModelMixin`
**Describe the bug**
The SQLAlchem session backend uses `BLOB` as the datatype in the `SessionModelMixin`, which is not compatible with all databases.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `starlite/middleware/session/sqlalchemy_backend.py`
Content:
```
1 from abc import ABC, abstractmethod
2 from datetime import datetime, timedelta
3 from typing import TYPE_CHECKING, Any, Generic, Optional, Type, TypeVar, Union, cast
4
5 import anyio.to_thread
6 import sqlalchemy as sa
7 from pydantic import validator
8 from sqlalchemy.ext.asyncio import AsyncSession as AsyncSASession
9 from sqlalchemy.ext.hybrid import hybrid_property
10 from sqlalchemy.orm import Mapped
11 from sqlalchemy.orm import Session as SASession
12 from sqlalchemy.orm import declarative_mixin, registry
13
14 from starlite.middleware.session.base import ServerSideBackend, ServerSideSessionConfig
15 from starlite.plugins.sql_alchemy import SQLAlchemyPlugin
16
17 if TYPE_CHECKING:
18 from sqlalchemy.sql import Select
19 from sqlalchemy.sql.elements import BooleanClauseList
20
21 from starlite.plugins.sql_alchemy import SQLAlchemyConfig as SQLAlchemyPluginConfig
22
23
24 AnySASession = Union[SASession, AsyncSASession]
25 AnySASessionT = TypeVar("AnySASessionT", bound=AnySASession)
26 SessionModelT = TypeVar("SessionModelT", bound="SessionModelMixin")
27
28
29 @declarative_mixin
30 class SessionModelMixin:
31 """Mixin for session storage."""
32
33 session_id: Mapped[str] = sa.Column(sa.String, nullable=False, unique=True, index=True) # pyright: ignore
34 data: Mapped[bytes] = sa.Column(sa.BLOB, nullable=False) # pyright: ignore
35 expires: Mapped[datetime] = sa.Column(sa.DateTime, nullable=False) # pyright: ignore
36
37 @hybrid_property
38 def expired(self) -> bool: # pyright: ignore
39 """Boolean indicating if the session has expired."""
40 return datetime.utcnow().replace(tzinfo=None) > self.expires
41
42 @expired.expression # type: ignore[no-redef]
43 def expired(cls) -> "BooleanClauseList": # pylint: disable=no-self-argument
44 """SQL-Expression to check if the session has expired."""
45 return datetime.utcnow().replace(tzinfo=None) > cls.expires # pyright: ignore
46
47
48 class SessionModel(SessionModelMixin):
49 """Session storage model."""
50
51 __tablename__ = "session"
52 id: Mapped[int] = sa.Column(sa.Integer, primary_key=True) # pyright: ignore
53
54
55 def create_session_model(base: Type[Any], table_name: str = "session") -> Type[SessionModelMixin]:
56 """Dynamically generate a session storage model and register it with the declarative base.
57
58 Args:
59 base: SQLAlchemy declarative base
60 table_name: Alternative table name
61
62 Returns:
63 A mapped model subclassing `base` and `SessionModelMixin`
64 """
65
66 class Model(base, SessionModelMixin): # type: ignore[valid-type,misc]
67 __tablename__ = table_name
68 id: Mapped[int] = sa.Column(sa.Integer, primary_key=True) # pyright: ignore
69
70 return Model
71
72
73 def register_session_model(base: Union[registry, Any], model: Type[SessionModelT]) -> Type[SessionModelT]:
74 """Map and register a pre-existing model subclassing `SessionModelMixin` with a declarative base or registry.
75
76 Args:
77 base: Either a `orm.registry` or `DeclarativeBase`
78 model: SQLAlchemy model to register
79
80 Returns:
81 A mapped model subclassing `SessionModelMixin`, and registered in `registry`
82 """
83 registry_ = base.registry if not isinstance(base, registry) else base
84 return cast("Type[SessionModelT]", registry_.map_declaratively(model))
85
86
87 class BaseSQLAlchemyBackend(Generic[AnySASessionT], ServerSideBackend["SQLAlchemyBackendConfig"], ABC):
88 """Session backend to store data in a database with SQLAlchemy. Works with both sync and async engines.
89
90 Notes:
91 - Requires `sqlalchemy` which needs to be installed separately, and a configured
92 [SQLAlchemyPlugin][starlite.plugins.sql_alchemy.SQLAlchemyPlugin].
93 """
94
95 __slots__ = ("_model", "_session_maker")
96
97 def __init__(self, config: "SQLAlchemyBackendConfig") -> None:
98 """Initialize `BaseSQLAlchemyBackend`.
99
100 Args:
101 config: An instance of `SQLAlchemyBackendConfig`
102 """
103 super().__init__(config=config)
104 self._model = config.model
105 self._session_maker = cast("SQLAlchemyPluginConfig", config.plugin._config).session_maker
106
107 def _create_sa_session(self) -> AnySASessionT:
108 return cast("AnySASessionT", self._session_maker())
109
110 def _select_session_obj(self, session_id: str) -> "Select":
111 return sa.select(self._model).where(self._model.session_id == session_id)
112
113 def _update_session_expiry(self, session_obj: SessionModelMixin) -> None:
114 session_obj.expires = datetime.utcnow().replace(tzinfo=None) + timedelta(seconds=self.config.max_age)
115
116 @abstractmethod
117 async def delete_expired(self) -> None:
118 """Delete all expired sessions from the database."""
119
120
121 class AsyncSQLAlchemyBackend(BaseSQLAlchemyBackend[AsyncSASession]):
122 """Asynchronous SQLAlchemy backend."""
123
124 async def _get_session_obj(self, *, sa_session: AsyncSASession, session_id: str) -> Optional[SessionModelMixin]:
125 result = await sa_session.scalars(self._select_session_obj(session_id))
126 return result.one_or_none()
127
128 async def get(self, session_id: str) -> Optional[bytes]:
129 """Retrieve data associated with `session_id`.
130
131 Args:
132 session_id: The session-ID
133
134 Returns:
135 The session data, if existing, otherwise `None`.
136 """
137 async with self._create_sa_session() as sa_session:
138 session_obj = await self._get_session_obj(sa_session=sa_session, session_id=session_id)
139 if session_obj:
140 if not session_obj.expired: # type: ignore[truthy-function]
141 self._update_session_expiry(session_obj) # type: ignore[unreachable]
142 await sa_session.commit()
143 return session_obj.data
144 await sa_session.delete(session_obj)
145 await sa_session.commit()
146 return None
147
148 async def set(self, session_id: str, data: bytes) -> None:
149 """Store `data` under the `session_id` for later retrieval.
150
151 If there is already data associated with `session_id`, replace
152 it with `data` and reset its expiry time
153
154 Args:
155 session_id: The session-ID.
156 data: Serialized session data
157
158 Returns:
159 None
160 """
161 async with self._create_sa_session() as sa_session:
162 session_obj = await self._get_session_obj(sa_session=sa_session, session_id=session_id)
163
164 if not session_obj:
165 session_obj = self._model(session_id=session_id) # type: ignore[call-arg]
166 sa_session.add(session_obj)
167 session_obj.data = data
168 self._update_session_expiry(session_obj)
169 await sa_session.commit()
170
171 async def delete(self, session_id: str) -> None:
172 """Delete the data associated with `session_id`. Fails silently if no such session-ID exists.
173
174 Args:
175 session_id: The session-ID
176
177 Returns:
178 None
179 """
180 async with self._create_sa_session() as sa_session:
181 await sa_session.execute(sa.delete(self._model).where(self._model.session_id == session_id))
182 await sa_session.commit()
183
184 async def delete_all(self) -> None:
185 """Delete all session data.
186
187 Returns:
188 None
189 """
190 async with self._create_sa_session() as sa_session:
191 await sa_session.execute(sa.delete(self._model))
192 await sa_session.commit()
193
194 async def delete_expired(self) -> None:
195 """Delete all expired session from the database.
196
197 Returns:
198 None
199 """
200 async with self._create_sa_session() as sa_session:
201 await sa_session.execute(sa.delete(self._model).where(self._model.expired))
202
203
204 class SQLAlchemyBackend(BaseSQLAlchemyBackend[SASession]):
205 """Synchronous SQLAlchemy backend."""
206
207 def _get_session_obj(self, *, sa_session: SASession, session_id: str) -> Optional[SessionModelMixin]:
208 return sa_session.scalars(self._select_session_obj(session_id)).one_or_none()
209
210 def _get_sync(self, session_id: str) -> Optional[bytes]:
211 sa_session = self._create_sa_session()
212 session_obj = self._get_session_obj(sa_session=sa_session, session_id=session_id)
213
214 if session_obj:
215 if not session_obj.expired: # type: ignore[truthy-function]
216 self._update_session_expiry(session_obj) # type: ignore[unreachable]
217 sa_session.commit()
218 return session_obj.data
219 sa_session.delete(session_obj)
220 sa_session.commit()
221 return None
222
223 async def get(self, session_id: str) -> Optional[bytes]:
224 """Retrieve data associated with `session_id`.
225
226 Args:
227 session_id: The session-ID
228
229 Returns:
230 The session data, if existing, otherwise `None`.
231 """
232 return await anyio.to_thread.run_sync(self._get_sync, session_id)
233
234 def _set_sync(self, session_id: str, data: bytes) -> None:
235 sa_session = self._create_sa_session()
236 session_obj = self._get_session_obj(sa_session=sa_session, session_id=session_id)
237
238 if not session_obj:
239 session_obj = self._model(session_id=session_id) # type: ignore[call-arg]
240 sa_session.add(session_obj)
241 session_obj.data = data
242 self._update_session_expiry(session_obj)
243 sa_session.commit()
244
245 async def set(self, session_id: str, data: bytes) -> None:
246 """Store `data` under the `session_id` for later retrieval.
247
248 If there is already data associated with `session_id`, replace
249 it with `data` and reset its expiry time
250
251 Args:
252 session_id: The session-ID
253 data: Serialized session data
254
255 Returns:
256 None
257 """
258 return await anyio.to_thread.run_sync(self._set_sync, session_id, data)
259
260 def _delete_sync(self, session_id: str) -> None:
261 sa_session = self._create_sa_session()
262 sa_session.execute(sa.delete(self._model).where(self._model.session_id == session_id))
263 sa_session.commit()
264
265 async def delete(self, session_id: str) -> None:
266 """Delete the data associated with `session_id`. Fails silently if no such session-ID exists.
267
268 Args:
269 session_id: The session-ID
270
271 Returns:
272 None
273 """
274 return await anyio.to_thread.run_sync(self._delete_sync, session_id)
275
276 def _delete_all_sync(self) -> None:
277 sa_session = self._create_sa_session()
278
279 sa_session.execute(sa.delete(self._model))
280 sa_session.commit()
281
282 async def delete_all(self) -> None:
283 """Delete all session data.
284
285 Returns:
286 None
287 """
288 await anyio.to_thread.run_sync(self._delete_all_sync)
289
290 def _delete_expired_sync(self) -> None:
291 sa_session = self._create_sa_session()
292 sa_session.execute(sa.delete(self._model).where(self._model.expired))
293
294 async def delete_expired(self) -> None:
295 """Delete all expired session from the database.
296
297 Returns:
298 None
299 """
300 await anyio.to_thread.run_sync(self._delete_expired_sync)
301
302
303 class SQLAlchemyBackendConfig(ServerSideSessionConfig):
304 """Configuration for `SQLAlchemyBackend` and `AsyncSQLAlchemyBackend`"""
305
306 model: Type[SessionModelMixin]
307 plugin: SQLAlchemyPlugin
308
309 @validator("plugin", always=True)
310 def validate_plugin_config(cls, value: SQLAlchemyPlugin) -> SQLAlchemyPlugin: # pylint: disable=no-self-argument)
311 """Check if the SQLAlchemyPlugin is configured."""
312 if not (value._config and value._config.session_maker): # pylint: disable=protected-access
313 raise ValueError("Plugin needs to be configured")
314 return value
315
316 @property
317 def _backend_class(self) -> Type[Union[SQLAlchemyBackend, AsyncSQLAlchemyBackend]]: # type: ignore[override]
318 """Return either `SQLAlchemyBackend` or `AsyncSQLAlchemyBackend`, depending on the engine type configured in the
319 `SQLAlchemyPlugin`
320 """
321 if cast("SQLAlchemyPluginConfig", self.plugin._config).use_async_engine: # pylint: disable=protected-access
322 return AsyncSQLAlchemyBackend
323 return SQLAlchemyBackend
324
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/starlite/middleware/session/sqlalchemy_backend.py b/starlite/middleware/session/sqlalchemy_backend.py
--- a/starlite/middleware/session/sqlalchemy_backend.py
+++ b/starlite/middleware/session/sqlalchemy_backend.py
@@ -31,7 +31,7 @@
"""Mixin for session storage."""
session_id: Mapped[str] = sa.Column(sa.String, nullable=False, unique=True, index=True) # pyright: ignore
- data: Mapped[bytes] = sa.Column(sa.BLOB, nullable=False) # pyright: ignore
+ data: Mapped[bytes] = sa.Column(sa.LargeBinary, nullable=False) # pyright: ignore
expires: Mapped[datetime] = sa.Column(sa.DateTime, nullable=False) # pyright: ignore
@hybrid_property
| {"golden_diff": "diff --git a/starlite/middleware/session/sqlalchemy_backend.py b/starlite/middleware/session/sqlalchemy_backend.py\n--- a/starlite/middleware/session/sqlalchemy_backend.py\n+++ b/starlite/middleware/session/sqlalchemy_backend.py\n@@ -31,7 +31,7 @@\n \"\"\"Mixin for session storage.\"\"\"\n \n session_id: Mapped[str] = sa.Column(sa.String, nullable=False, unique=True, index=True) # pyright: ignore\n- data: Mapped[bytes] = sa.Column(sa.BLOB, nullable=False) # pyright: ignore\n+ data: Mapped[bytes] = sa.Column(sa.LargeBinary, nullable=False) # pyright: ignore\n expires: Mapped[datetime] = sa.Column(sa.DateTime, nullable=False) # pyright: ignore\n \n @hybrid_property\n", "issue": "SQLAlchemy Session backend uses wrong column type in `SessionModelMixin`\n**Describe the bug**\r\nThe SQLAlchem session backend uses `BLOB` as the datatype in the `SessionModelMixin`, which is not compatible with all databases. \r\n\r\n\n", "before_files": [{"content": "from abc import ABC, abstractmethod\nfrom datetime import datetime, timedelta\nfrom typing import TYPE_CHECKING, Any, Generic, Optional, Type, TypeVar, Union, cast\n\nimport anyio.to_thread\nimport sqlalchemy as sa\nfrom pydantic import validator\nfrom sqlalchemy.ext.asyncio import AsyncSession as AsyncSASession\nfrom sqlalchemy.ext.hybrid import hybrid_property\nfrom sqlalchemy.orm import Mapped\nfrom sqlalchemy.orm import Session as SASession\nfrom sqlalchemy.orm import declarative_mixin, registry\n\nfrom starlite.middleware.session.base import ServerSideBackend, ServerSideSessionConfig\nfrom starlite.plugins.sql_alchemy import SQLAlchemyPlugin\n\nif TYPE_CHECKING:\n from sqlalchemy.sql import Select\n from sqlalchemy.sql.elements import BooleanClauseList\n\n from starlite.plugins.sql_alchemy import SQLAlchemyConfig as SQLAlchemyPluginConfig\n\n\nAnySASession = Union[SASession, AsyncSASession]\nAnySASessionT = TypeVar(\"AnySASessionT\", bound=AnySASession)\nSessionModelT = TypeVar(\"SessionModelT\", bound=\"SessionModelMixin\")\n\n\n@declarative_mixin\nclass SessionModelMixin:\n \"\"\"Mixin for session storage.\"\"\"\n\n session_id: Mapped[str] = sa.Column(sa.String, nullable=False, unique=True, index=True) # pyright: ignore\n data: Mapped[bytes] = sa.Column(sa.BLOB, nullable=False) # pyright: ignore\n expires: Mapped[datetime] = sa.Column(sa.DateTime, nullable=False) # pyright: ignore\n\n @hybrid_property\n def expired(self) -> bool: # pyright: ignore\n \"\"\"Boolean indicating if the session has expired.\"\"\"\n return datetime.utcnow().replace(tzinfo=None) > self.expires\n\n @expired.expression # type: ignore[no-redef]\n def expired(cls) -> \"BooleanClauseList\": # pylint: disable=no-self-argument\n \"\"\"SQL-Expression to check if the session has expired.\"\"\"\n return datetime.utcnow().replace(tzinfo=None) > cls.expires # pyright: ignore\n\n\nclass SessionModel(SessionModelMixin):\n \"\"\"Session storage model.\"\"\"\n\n __tablename__ = \"session\"\n id: Mapped[int] = sa.Column(sa.Integer, primary_key=True) # pyright: ignore\n\n\ndef create_session_model(base: Type[Any], table_name: str = \"session\") -> Type[SessionModelMixin]:\n \"\"\"Dynamically generate a session storage model and register it with the declarative base.\n\n Args:\n base: SQLAlchemy declarative base\n table_name: Alternative table name\n\n Returns:\n A mapped model subclassing `base` and `SessionModelMixin`\n \"\"\"\n\n class Model(base, SessionModelMixin): # type: ignore[valid-type,misc]\n __tablename__ = table_name\n id: Mapped[int] = sa.Column(sa.Integer, primary_key=True) # pyright: ignore\n\n return Model\n\n\ndef register_session_model(base: Union[registry, Any], model: Type[SessionModelT]) -> Type[SessionModelT]:\n \"\"\"Map and register a pre-existing model subclassing `SessionModelMixin` with a declarative base or registry.\n\n Args:\n base: Either a `orm.registry` or `DeclarativeBase`\n model: SQLAlchemy model to register\n\n Returns:\n A mapped model subclassing `SessionModelMixin`, and registered in `registry`\n \"\"\"\n registry_ = base.registry if not isinstance(base, registry) else base\n return cast(\"Type[SessionModelT]\", registry_.map_declaratively(model))\n\n\nclass BaseSQLAlchemyBackend(Generic[AnySASessionT], ServerSideBackend[\"SQLAlchemyBackendConfig\"], ABC):\n \"\"\"Session backend to store data in a database with SQLAlchemy. Works with both sync and async engines.\n\n Notes:\n - Requires `sqlalchemy` which needs to be installed separately, and a configured\n [SQLAlchemyPlugin][starlite.plugins.sql_alchemy.SQLAlchemyPlugin].\n \"\"\"\n\n __slots__ = (\"_model\", \"_session_maker\")\n\n def __init__(self, config: \"SQLAlchemyBackendConfig\") -> None:\n \"\"\"Initialize `BaseSQLAlchemyBackend`.\n\n Args:\n config: An instance of `SQLAlchemyBackendConfig`\n \"\"\"\n super().__init__(config=config)\n self._model = config.model\n self._session_maker = cast(\"SQLAlchemyPluginConfig\", config.plugin._config).session_maker\n\n def _create_sa_session(self) -> AnySASessionT:\n return cast(\"AnySASessionT\", self._session_maker())\n\n def _select_session_obj(self, session_id: str) -> \"Select\":\n return sa.select(self._model).where(self._model.session_id == session_id)\n\n def _update_session_expiry(self, session_obj: SessionModelMixin) -> None:\n session_obj.expires = datetime.utcnow().replace(tzinfo=None) + timedelta(seconds=self.config.max_age)\n\n @abstractmethod\n async def delete_expired(self) -> None:\n \"\"\"Delete all expired sessions from the database.\"\"\"\n\n\nclass AsyncSQLAlchemyBackend(BaseSQLAlchemyBackend[AsyncSASession]):\n \"\"\"Asynchronous SQLAlchemy backend.\"\"\"\n\n async def _get_session_obj(self, *, sa_session: AsyncSASession, session_id: str) -> Optional[SessionModelMixin]:\n result = await sa_session.scalars(self._select_session_obj(session_id))\n return result.one_or_none()\n\n async def get(self, session_id: str) -> Optional[bytes]:\n \"\"\"Retrieve data associated with `session_id`.\n\n Args:\n session_id: The session-ID\n\n Returns:\n The session data, if existing, otherwise `None`.\n \"\"\"\n async with self._create_sa_session() as sa_session:\n session_obj = await self._get_session_obj(sa_session=sa_session, session_id=session_id)\n if session_obj:\n if not session_obj.expired: # type: ignore[truthy-function]\n self._update_session_expiry(session_obj) # type: ignore[unreachable]\n await sa_session.commit()\n return session_obj.data\n await sa_session.delete(session_obj)\n await sa_session.commit()\n return None\n\n async def set(self, session_id: str, data: bytes) -> None:\n \"\"\"Store `data` under the `session_id` for later retrieval.\n\n If there is already data associated with `session_id`, replace\n it with `data` and reset its expiry time\n\n Args:\n session_id: The session-ID.\n data: Serialized session data\n\n Returns:\n None\n \"\"\"\n async with self._create_sa_session() as sa_session:\n session_obj = await self._get_session_obj(sa_session=sa_session, session_id=session_id)\n\n if not session_obj:\n session_obj = self._model(session_id=session_id) # type: ignore[call-arg]\n sa_session.add(session_obj)\n session_obj.data = data\n self._update_session_expiry(session_obj)\n await sa_session.commit()\n\n async def delete(self, session_id: str) -> None:\n \"\"\"Delete the data associated with `session_id`. Fails silently if no such session-ID exists.\n\n Args:\n session_id: The session-ID\n\n Returns:\n None\n \"\"\"\n async with self._create_sa_session() as sa_session:\n await sa_session.execute(sa.delete(self._model).where(self._model.session_id == session_id))\n await sa_session.commit()\n\n async def delete_all(self) -> None:\n \"\"\"Delete all session data.\n\n Returns:\n None\n \"\"\"\n async with self._create_sa_session() as sa_session:\n await sa_session.execute(sa.delete(self._model))\n await sa_session.commit()\n\n async def delete_expired(self) -> None:\n \"\"\"Delete all expired session from the database.\n\n Returns:\n None\n \"\"\"\n async with self._create_sa_session() as sa_session:\n await sa_session.execute(sa.delete(self._model).where(self._model.expired))\n\n\nclass SQLAlchemyBackend(BaseSQLAlchemyBackend[SASession]):\n \"\"\"Synchronous SQLAlchemy backend.\"\"\"\n\n def _get_session_obj(self, *, sa_session: SASession, session_id: str) -> Optional[SessionModelMixin]:\n return sa_session.scalars(self._select_session_obj(session_id)).one_or_none()\n\n def _get_sync(self, session_id: str) -> Optional[bytes]:\n sa_session = self._create_sa_session()\n session_obj = self._get_session_obj(sa_session=sa_session, session_id=session_id)\n\n if session_obj:\n if not session_obj.expired: # type: ignore[truthy-function]\n self._update_session_expiry(session_obj) # type: ignore[unreachable]\n sa_session.commit()\n return session_obj.data\n sa_session.delete(session_obj)\n sa_session.commit()\n return None\n\n async def get(self, session_id: str) -> Optional[bytes]:\n \"\"\"Retrieve data associated with `session_id`.\n\n Args:\n session_id: The session-ID\n\n Returns:\n The session data, if existing, otherwise `None`.\n \"\"\"\n return await anyio.to_thread.run_sync(self._get_sync, session_id)\n\n def _set_sync(self, session_id: str, data: bytes) -> None:\n sa_session = self._create_sa_session()\n session_obj = self._get_session_obj(sa_session=sa_session, session_id=session_id)\n\n if not session_obj:\n session_obj = self._model(session_id=session_id) # type: ignore[call-arg]\n sa_session.add(session_obj)\n session_obj.data = data\n self._update_session_expiry(session_obj)\n sa_session.commit()\n\n async def set(self, session_id: str, data: bytes) -> None:\n \"\"\"Store `data` under the `session_id` for later retrieval.\n\n If there is already data associated with `session_id`, replace\n it with `data` and reset its expiry time\n\n Args:\n session_id: The session-ID\n data: Serialized session data\n\n Returns:\n None\n \"\"\"\n return await anyio.to_thread.run_sync(self._set_sync, session_id, data)\n\n def _delete_sync(self, session_id: str) -> None:\n sa_session = self._create_sa_session()\n sa_session.execute(sa.delete(self._model).where(self._model.session_id == session_id))\n sa_session.commit()\n\n async def delete(self, session_id: str) -> None:\n \"\"\"Delete the data associated with `session_id`. Fails silently if no such session-ID exists.\n\n Args:\n session_id: The session-ID\n\n Returns:\n None\n \"\"\"\n return await anyio.to_thread.run_sync(self._delete_sync, session_id)\n\n def _delete_all_sync(self) -> None:\n sa_session = self._create_sa_session()\n\n sa_session.execute(sa.delete(self._model))\n sa_session.commit()\n\n async def delete_all(self) -> None:\n \"\"\"Delete all session data.\n\n Returns:\n None\n \"\"\"\n await anyio.to_thread.run_sync(self._delete_all_sync)\n\n def _delete_expired_sync(self) -> None:\n sa_session = self._create_sa_session()\n sa_session.execute(sa.delete(self._model).where(self._model.expired))\n\n async def delete_expired(self) -> None:\n \"\"\"Delete all expired session from the database.\n\n Returns:\n None\n \"\"\"\n await anyio.to_thread.run_sync(self._delete_expired_sync)\n\n\nclass SQLAlchemyBackendConfig(ServerSideSessionConfig):\n \"\"\"Configuration for `SQLAlchemyBackend` and `AsyncSQLAlchemyBackend`\"\"\"\n\n model: Type[SessionModelMixin]\n plugin: SQLAlchemyPlugin\n\n @validator(\"plugin\", always=True)\n def validate_plugin_config(cls, value: SQLAlchemyPlugin) -> SQLAlchemyPlugin: # pylint: disable=no-self-argument)\n \"\"\"Check if the SQLAlchemyPlugin is configured.\"\"\"\n if not (value._config and value._config.session_maker): # pylint: disable=protected-access\n raise ValueError(\"Plugin needs to be configured\")\n return value\n\n @property\n def _backend_class(self) -> Type[Union[SQLAlchemyBackend, AsyncSQLAlchemyBackend]]: # type: ignore[override]\n \"\"\"Return either `SQLAlchemyBackend` or `AsyncSQLAlchemyBackend`, depending on the engine type configured in the\n `SQLAlchemyPlugin`\n \"\"\"\n if cast(\"SQLAlchemyPluginConfig\", self.plugin._config).use_async_engine: # pylint: disable=protected-access\n return AsyncSQLAlchemyBackend\n return SQLAlchemyBackend\n", "path": "starlite/middleware/session/sqlalchemy_backend.py"}], "after_files": [{"content": "from abc import ABC, abstractmethod\nfrom datetime import datetime, timedelta\nfrom typing import TYPE_CHECKING, Any, Generic, Optional, Type, TypeVar, Union, cast\n\nimport anyio.to_thread\nimport sqlalchemy as sa\nfrom pydantic import validator\nfrom sqlalchemy.ext.asyncio import AsyncSession as AsyncSASession\nfrom sqlalchemy.ext.hybrid import hybrid_property\nfrom sqlalchemy.orm import Mapped\nfrom sqlalchemy.orm import Session as SASession\nfrom sqlalchemy.orm import declarative_mixin, registry\n\nfrom starlite.middleware.session.base import ServerSideBackend, ServerSideSessionConfig\nfrom starlite.plugins.sql_alchemy import SQLAlchemyPlugin\n\nif TYPE_CHECKING:\n from sqlalchemy.sql import Select\n from sqlalchemy.sql.elements import BooleanClauseList\n\n from starlite.plugins.sql_alchemy import SQLAlchemyConfig as SQLAlchemyPluginConfig\n\n\nAnySASession = Union[SASession, AsyncSASession]\nAnySASessionT = TypeVar(\"AnySASessionT\", bound=AnySASession)\nSessionModelT = TypeVar(\"SessionModelT\", bound=\"SessionModelMixin\")\n\n\n@declarative_mixin\nclass SessionModelMixin:\n \"\"\"Mixin for session storage.\"\"\"\n\n session_id: Mapped[str] = sa.Column(sa.String, nullable=False, unique=True, index=True) # pyright: ignore\n data: Mapped[bytes] = sa.Column(sa.LargeBinary, nullable=False) # pyright: ignore\n expires: Mapped[datetime] = sa.Column(sa.DateTime, nullable=False) # pyright: ignore\n\n @hybrid_property\n def expired(self) -> bool: # pyright: ignore\n \"\"\"Boolean indicating if the session has expired.\"\"\"\n return datetime.utcnow().replace(tzinfo=None) > self.expires\n\n @expired.expression # type: ignore[no-redef]\n def expired(cls) -> \"BooleanClauseList\": # pylint: disable=no-self-argument\n \"\"\"SQL-Expression to check if the session has expired.\"\"\"\n return datetime.utcnow().replace(tzinfo=None) > cls.expires # pyright: ignore\n\n\nclass SessionModel(SessionModelMixin):\n \"\"\"Session storage model.\"\"\"\n\n __tablename__ = \"session\"\n id: Mapped[int] = sa.Column(sa.Integer, primary_key=True) # pyright: ignore\n\n\ndef create_session_model(base: Type[Any], table_name: str = \"session\") -> Type[SessionModelMixin]:\n \"\"\"Dynamically generate a session storage model and register it with the declarative base.\n\n Args:\n base: SQLAlchemy declarative base\n table_name: Alternative table name\n\n Returns:\n A mapped model subclassing `base` and `SessionModelMixin`\n \"\"\"\n\n class Model(base, SessionModelMixin): # type: ignore[valid-type,misc]\n __tablename__ = table_name\n id: Mapped[int] = sa.Column(sa.Integer, primary_key=True) # pyright: ignore\n\n return Model\n\n\ndef register_session_model(base: Union[registry, Any], model: Type[SessionModelT]) -> Type[SessionModelT]:\n \"\"\"Map and register a pre-existing model subclassing `SessionModelMixin` with a declarative base or registry.\n\n Args:\n base: Either a `orm.registry` or `DeclarativeBase`\n model: SQLAlchemy model to register\n\n Returns:\n A mapped model subclassing `SessionModelMixin`, and registered in `registry`\n \"\"\"\n registry_ = base.registry if not isinstance(base, registry) else base\n return cast(\"Type[SessionModelT]\", registry_.map_declaratively(model))\n\n\nclass BaseSQLAlchemyBackend(Generic[AnySASessionT], ServerSideBackend[\"SQLAlchemyBackendConfig\"], ABC):\n \"\"\"Session backend to store data in a database with SQLAlchemy. Works with both sync and async engines.\n\n Notes:\n - Requires `sqlalchemy` which needs to be installed separately, and a configured\n [SQLAlchemyPlugin][starlite.plugins.sql_alchemy.SQLAlchemyPlugin].\n \"\"\"\n\n __slots__ = (\"_model\", \"_session_maker\")\n\n def __init__(self, config: \"SQLAlchemyBackendConfig\") -> None:\n \"\"\"Initialize `BaseSQLAlchemyBackend`.\n\n Args:\n config: An instance of `SQLAlchemyBackendConfig`\n \"\"\"\n super().__init__(config=config)\n self._model = config.model\n self._session_maker = cast(\"SQLAlchemyPluginConfig\", config.plugin._config).session_maker\n\n def _create_sa_session(self) -> AnySASessionT:\n return cast(\"AnySASessionT\", self._session_maker())\n\n def _select_session_obj(self, session_id: str) -> \"Select\":\n return sa.select(self._model).where(self._model.session_id == session_id)\n\n def _update_session_expiry(self, session_obj: SessionModelMixin) -> None:\n session_obj.expires = datetime.utcnow().replace(tzinfo=None) + timedelta(seconds=self.config.max_age)\n\n @abstractmethod\n async def delete_expired(self) -> None:\n \"\"\"Delete all expired sessions from the database.\"\"\"\n\n\nclass AsyncSQLAlchemyBackend(BaseSQLAlchemyBackend[AsyncSASession]):\n \"\"\"Asynchronous SQLAlchemy backend.\"\"\"\n\n async def _get_session_obj(self, *, sa_session: AsyncSASession, session_id: str) -> Optional[SessionModelMixin]:\n result = await sa_session.scalars(self._select_session_obj(session_id))\n return result.one_or_none()\n\n async def get(self, session_id: str) -> Optional[bytes]:\n \"\"\"Retrieve data associated with `session_id`.\n\n Args:\n session_id: The session-ID\n\n Returns:\n The session data, if existing, otherwise `None`.\n \"\"\"\n async with self._create_sa_session() as sa_session:\n session_obj = await self._get_session_obj(sa_session=sa_session, session_id=session_id)\n if session_obj:\n if not session_obj.expired: # type: ignore[truthy-function]\n self._update_session_expiry(session_obj) # type: ignore[unreachable]\n await sa_session.commit()\n return session_obj.data\n await sa_session.delete(session_obj)\n await sa_session.commit()\n return None\n\n async def set(self, session_id: str, data: bytes) -> None:\n \"\"\"Store `data` under the `session_id` for later retrieval.\n\n If there is already data associated with `session_id`, replace\n it with `data` and reset its expiry time\n\n Args:\n session_id: The session-ID.\n data: Serialized session data\n\n Returns:\n None\n \"\"\"\n async with self._create_sa_session() as sa_session:\n session_obj = await self._get_session_obj(sa_session=sa_session, session_id=session_id)\n\n if not session_obj:\n session_obj = self._model(session_id=session_id) # type: ignore[call-arg]\n sa_session.add(session_obj)\n session_obj.data = data\n self._update_session_expiry(session_obj)\n await sa_session.commit()\n\n async def delete(self, session_id: str) -> None:\n \"\"\"Delete the data associated with `session_id`. Fails silently if no such session-ID exists.\n\n Args:\n session_id: The session-ID\n\n Returns:\n None\n \"\"\"\n async with self._create_sa_session() as sa_session:\n await sa_session.execute(sa.delete(self._model).where(self._model.session_id == session_id))\n await sa_session.commit()\n\n async def delete_all(self) -> None:\n \"\"\"Delete all session data.\n\n Returns:\n None\n \"\"\"\n async with self._create_sa_session() as sa_session:\n await sa_session.execute(sa.delete(self._model))\n await sa_session.commit()\n\n async def delete_expired(self) -> None:\n \"\"\"Delete all expired session from the database.\n\n Returns:\n None\n \"\"\"\n async with self._create_sa_session() as sa_session:\n await sa_session.execute(sa.delete(self._model).where(self._model.expired))\n\n\nclass SQLAlchemyBackend(BaseSQLAlchemyBackend[SASession]):\n \"\"\"Synchronous SQLAlchemy backend.\"\"\"\n\n def _get_session_obj(self, *, sa_session: SASession, session_id: str) -> Optional[SessionModelMixin]:\n return sa_session.scalars(self._select_session_obj(session_id)).one_or_none()\n\n def _get_sync(self, session_id: str) -> Optional[bytes]:\n sa_session = self._create_sa_session()\n session_obj = self._get_session_obj(sa_session=sa_session, session_id=session_id)\n\n if session_obj:\n if not session_obj.expired: # type: ignore[truthy-function]\n self._update_session_expiry(session_obj) # type: ignore[unreachable]\n sa_session.commit()\n return session_obj.data\n sa_session.delete(session_obj)\n sa_session.commit()\n return None\n\n async def get(self, session_id: str) -> Optional[bytes]:\n \"\"\"Retrieve data associated with `session_id`.\n\n Args:\n session_id: The session-ID\n\n Returns:\n The session data, if existing, otherwise `None`.\n \"\"\"\n return await anyio.to_thread.run_sync(self._get_sync, session_id)\n\n def _set_sync(self, session_id: str, data: bytes) -> None:\n sa_session = self._create_sa_session()\n session_obj = self._get_session_obj(sa_session=sa_session, session_id=session_id)\n\n if not session_obj:\n session_obj = self._model(session_id=session_id) # type: ignore[call-arg]\n sa_session.add(session_obj)\n session_obj.data = data\n self._update_session_expiry(session_obj)\n sa_session.commit()\n\n async def set(self, session_id: str, data: bytes) -> None:\n \"\"\"Store `data` under the `session_id` for later retrieval.\n\n If there is already data associated with `session_id`, replace\n it with `data` and reset its expiry time\n\n Args:\n session_id: The session-ID\n data: Serialized session data\n\n Returns:\n None\n \"\"\"\n return await anyio.to_thread.run_sync(self._set_sync, session_id, data)\n\n def _delete_sync(self, session_id: str) -> None:\n sa_session = self._create_sa_session()\n sa_session.execute(sa.delete(self._model).where(self._model.session_id == session_id))\n sa_session.commit()\n\n async def delete(self, session_id: str) -> None:\n \"\"\"Delete the data associated with `session_id`. Fails silently if no such session-ID exists.\n\n Args:\n session_id: The session-ID\n\n Returns:\n None\n \"\"\"\n return await anyio.to_thread.run_sync(self._delete_sync, session_id)\n\n def _delete_all_sync(self) -> None:\n sa_session = self._create_sa_session()\n\n sa_session.execute(sa.delete(self._model))\n sa_session.commit()\n\n async def delete_all(self) -> None:\n \"\"\"Delete all session data.\n\n Returns:\n None\n \"\"\"\n await anyio.to_thread.run_sync(self._delete_all_sync)\n\n def _delete_expired_sync(self) -> None:\n sa_session = self._create_sa_session()\n sa_session.execute(sa.delete(self._model).where(self._model.expired))\n\n async def delete_expired(self) -> None:\n \"\"\"Delete all expired session from the database.\n\n Returns:\n None\n \"\"\"\n await anyio.to_thread.run_sync(self._delete_expired_sync)\n\n\nclass SQLAlchemyBackendConfig(ServerSideSessionConfig):\n \"\"\"Configuration for `SQLAlchemyBackend` and `AsyncSQLAlchemyBackend`\"\"\"\n\n model: Type[SessionModelMixin]\n plugin: SQLAlchemyPlugin\n\n @validator(\"plugin\", always=True)\n def validate_plugin_config(cls, value: SQLAlchemyPlugin) -> SQLAlchemyPlugin: # pylint: disable=no-self-argument)\n \"\"\"Check if the SQLAlchemyPlugin is configured.\"\"\"\n if not (value._config and value._config.session_maker): # pylint: disable=protected-access\n raise ValueError(\"Plugin needs to be configured\")\n return value\n\n @property\n def _backend_class(self) -> Type[Union[SQLAlchemyBackend, AsyncSQLAlchemyBackend]]: # type: ignore[override]\n \"\"\"Return either `SQLAlchemyBackend` or `AsyncSQLAlchemyBackend`, depending on the engine type configured in the\n `SQLAlchemyPlugin`\n \"\"\"\n if cast(\"SQLAlchemyPluginConfig\", self.plugin._config).use_async_engine: # pylint: disable=protected-access\n return AsyncSQLAlchemyBackend\n return SQLAlchemyBackend\n", "path": "starlite/middleware/session/sqlalchemy_backend.py"}]} | 3,903 | 180 |
gh_patches_debug_42035 | rasdani/github-patches | git_diff | mitmproxy__mitmproxy-1984 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Crash of har_dump.py
I sometimes get either `b'' is not JSON serializable` or `Object of type 'bytes' is not JSON serializable`
I used mitmproxy to extract such a flow and uploaded it [here](https://gist.github.com/Junkern/495d4c7c1b572d7749f0291adfb0dd15)
##### Steps to reproduce the problem:
1.
2.
3.
##### Any other comments? What have you tried so far?
It seems, that the error mostly occurs for `POST` and `HEAD` requests. Sometimes with an empty response body, sometimes not. Seldomly a `GET` requests triggers this error
##### System information
mitmproxy 1.0.2
macOs Sierra 10.12.3
Edit: Updated the Gist link to point to a working binary gist
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/complex/har_dump.py`
Content:
```
1 """
2 This inline script can be used to dump flows as HAR files.
3 """
4
5
6 import json
7 import sys
8 import base64
9 import zlib
10
11 from datetime import datetime
12 import pytz
13
14 import mitmproxy
15
16 from mitmproxy import version
17 from mitmproxy.utils import strutils
18 from mitmproxy.net.http import cookies
19
20 HAR = {}
21
22 # A list of server seen till now is maintained so we can avoid
23 # using 'connect' time for entries that use an existing connection.
24 SERVERS_SEEN = set()
25
26
27 def start():
28 """
29 Called once on script startup before any other events.
30 """
31 if len(sys.argv) != 2:
32 raise ValueError(
33 'Usage: -s "har_dump.py filename" '
34 '(- will output to stdout, filenames ending with .zhar '
35 'will result in compressed har)'
36 )
37
38 HAR.update({
39 "log": {
40 "version": "1.2",
41 "creator": {
42 "name": "mitmproxy har_dump",
43 "version": "0.1",
44 "comment": "mitmproxy version %s" % version.MITMPROXY
45 },
46 "entries": []
47 }
48 })
49
50
51 def response(flow):
52 """
53 Called when a server response has been received.
54 """
55
56 # -1 indicates that these values do not apply to current request
57 ssl_time = -1
58 connect_time = -1
59
60 if flow.server_conn and flow.server_conn not in SERVERS_SEEN:
61 connect_time = (flow.server_conn.timestamp_tcp_setup -
62 flow.server_conn.timestamp_start)
63
64 if flow.server_conn.timestamp_ssl_setup is not None:
65 ssl_time = (flow.server_conn.timestamp_ssl_setup -
66 flow.server_conn.timestamp_tcp_setup)
67
68 SERVERS_SEEN.add(flow.server_conn)
69
70 # Calculate raw timings from timestamps. DNS timings can not be calculated
71 # for lack of a way to measure it. The same goes for HAR blocked.
72 # mitmproxy will open a server connection as soon as it receives the host
73 # and port from the client connection. So, the time spent waiting is actually
74 # spent waiting between request.timestamp_end and response.timestamp_start
75 # thus it correlates to HAR wait instead.
76 timings_raw = {
77 'send': flow.request.timestamp_end - flow.request.timestamp_start,
78 'receive': flow.response.timestamp_end - flow.response.timestamp_start,
79 'wait': flow.response.timestamp_start - flow.request.timestamp_end,
80 'connect': connect_time,
81 'ssl': ssl_time,
82 }
83
84 # HAR timings are integers in ms, so we re-encode the raw timings to that format.
85 timings = dict([(k, int(1000 * v)) for k, v in timings_raw.items()])
86
87 # full_time is the sum of all timings.
88 # Timings set to -1 will be ignored as per spec.
89 full_time = sum(v for v in timings.values() if v > -1)
90
91 started_date_time = format_datetime(datetime.utcfromtimestamp(flow.request.timestamp_start))
92
93 # Response body size and encoding
94 response_body_size = len(flow.response.raw_content)
95 response_body_decoded_size = len(flow.response.content)
96 response_body_compression = response_body_decoded_size - response_body_size
97
98 entry = {
99 "startedDateTime": started_date_time,
100 "time": full_time,
101 "request": {
102 "method": flow.request.method,
103 "url": flow.request.url,
104 "httpVersion": flow.request.http_version,
105 "cookies": format_request_cookies(flow.request.cookies.fields),
106 "headers": name_value(flow.request.headers),
107 "queryString": name_value(flow.request.query or {}),
108 "headersSize": len(str(flow.request.headers)),
109 "bodySize": len(flow.request.content),
110 },
111 "response": {
112 "status": flow.response.status_code,
113 "statusText": flow.response.reason,
114 "httpVersion": flow.response.http_version,
115 "cookies": format_response_cookies(flow.response.cookies.fields),
116 "headers": name_value(flow.response.headers),
117 "content": {
118 "size": response_body_size,
119 "compression": response_body_compression,
120 "mimeType": flow.response.headers.get('Content-Type', '')
121 },
122 "redirectURL": flow.response.headers.get('Location', ''),
123 "headersSize": len(str(flow.response.headers)),
124 "bodySize": response_body_size,
125 },
126 "cache": {},
127 "timings": timings,
128 }
129
130 # Store binary data as base64
131 if strutils.is_mostly_bin(flow.response.content):
132 entry["response"]["content"]["text"] = base64.b64encode(flow.response.content).decode()
133 entry["response"]["content"]["encoding"] = "base64"
134 else:
135 entry["response"]["content"]["text"] = flow.response.get_text(strict=False)
136
137 if flow.request.method in ["POST", "PUT", "PATCH"]:
138 params = [
139 {"name": a, "value": b}
140 for a, b in flow.request.urlencoded_form.items(multi=True)
141 ]
142 entry["request"]["postData"] = {
143 "mimeType": flow.request.headers.get("Content-Type", ""),
144 "text": flow.request.get_text(strict=False),
145 "params": params
146 }
147
148 if flow.server_conn.connected():
149 entry["serverIPAddress"] = str(flow.server_conn.ip_address.address[0])
150
151 HAR["log"]["entries"].append(entry)
152
153
154 def done():
155 """
156 Called once on script shutdown, after any other events.
157 """
158 dump_file = sys.argv[1]
159
160 json_dump = json.dumps(HAR, indent=2) # type: str
161
162 if dump_file == '-':
163 mitmproxy.ctx.log(json_dump)
164 else:
165 raw = json_dump.encode() # type: bytes
166 if dump_file.endswith('.zhar'):
167 raw = zlib.compress(raw, 9)
168
169 with open(dump_file, "wb") as f:
170 f.write(raw)
171
172 mitmproxy.ctx.log("HAR dump finished (wrote %s bytes to file)" % len(json_dump))
173
174
175 def format_datetime(dt):
176 return dt.replace(tzinfo=pytz.timezone("UTC")).isoformat()
177
178
179 def format_cookies(cookie_list):
180 rv = []
181
182 for name, value, attrs in cookie_list:
183 cookie_har = {
184 "name": name,
185 "value": value,
186 }
187
188 # HAR only needs some attributes
189 for key in ["path", "domain", "comment"]:
190 if key in attrs:
191 cookie_har[key] = attrs[key]
192
193 # These keys need to be boolean!
194 for key in ["httpOnly", "secure"]:
195 cookie_har[key] = bool(key in attrs)
196
197 # Expiration time needs to be formatted
198 expire_ts = cookies.get_expiration_ts(attrs)
199 if expire_ts is not None:
200 cookie_har["expires"] = format_datetime(datetime.fromtimestamp(expire_ts))
201
202 rv.append(cookie_har)
203
204 return rv
205
206
207 def format_request_cookies(fields):
208 return format_cookies(cookies.group_cookies(fields))
209
210
211 def format_response_cookies(fields):
212 return format_cookies((c[0], c[1].value, c[1].attrs) for c in fields)
213
214
215 def name_value(obj):
216 """
217 Convert (key, value) pairs to HAR format.
218 """
219 return [{"name": k, "value": v} for k, v in obj.items()]
220
```
Path: `mitmproxy/net/http/encoding.py`
Content:
```
1 """
2 Utility functions for decoding response bodies.
3 """
4
5 import codecs
6 import collections
7 from io import BytesIO
8
9 import gzip
10 import zlib
11 import brotli
12
13 from typing import Union
14
15
16 # We have a shared single-element cache for encoding and decoding.
17 # This is quite useful in practice, e.g.
18 # flow.request.content = flow.request.content.replace(b"foo", b"bar")
19 # does not require an .encode() call if content does not contain b"foo"
20 CachedDecode = collections.namedtuple("CachedDecode", "encoded encoding errors decoded")
21 _cache = CachedDecode(None, None, None, None)
22
23
24 def decode(encoded: Union[str, bytes], encoding: str, errors: str='strict') -> Union[str, bytes]:
25 """
26 Decode the given input object
27
28 Returns:
29 The decoded value
30
31 Raises:
32 ValueError, if decoding fails.
33 """
34 if len(encoded) == 0:
35 return encoded
36
37 global _cache
38 cached = (
39 isinstance(encoded, bytes) and
40 _cache.encoded == encoded and
41 _cache.encoding == encoding and
42 _cache.errors == errors
43 )
44 if cached:
45 return _cache.decoded
46 try:
47 try:
48 decoded = custom_decode[encoding](encoded)
49 except KeyError:
50 decoded = codecs.decode(encoded, encoding, errors)
51 if encoding in ("gzip", "deflate", "br"):
52 _cache = CachedDecode(encoded, encoding, errors, decoded)
53 return decoded
54 except TypeError:
55 raise
56 except Exception as e:
57 raise ValueError("{} when decoding {} with {}: {}".format(
58 type(e).__name__,
59 repr(encoded)[:10],
60 repr(encoding),
61 repr(e),
62 ))
63
64
65 def encode(decoded: Union[str, bytes], encoding: str, errors: str='strict') -> Union[str, bytes]:
66 """
67 Encode the given input object
68
69 Returns:
70 The encoded value
71
72 Raises:
73 ValueError, if encoding fails.
74 """
75 if len(decoded) == 0:
76 return decoded
77
78 global _cache
79 cached = (
80 isinstance(decoded, bytes) and
81 _cache.decoded == decoded and
82 _cache.encoding == encoding and
83 _cache.errors == errors
84 )
85 if cached:
86 return _cache.encoded
87 try:
88 try:
89 value = decoded
90 if isinstance(value, str):
91 value = decoded.encode()
92 encoded = custom_encode[encoding](value)
93 except KeyError:
94 encoded = codecs.encode(decoded, encoding, errors)
95 if encoding in ("gzip", "deflate", "br"):
96 _cache = CachedDecode(encoded, encoding, errors, decoded)
97 return encoded
98 except TypeError:
99 raise
100 except Exception as e:
101 raise ValueError("{} when encoding {} with {}: {}".format(
102 type(e).__name__,
103 repr(decoded)[:10],
104 repr(encoding),
105 repr(e),
106 ))
107
108
109 def identity(content):
110 """
111 Returns content unchanged. Identity is the default value of
112 Accept-Encoding headers.
113 """
114 return content
115
116
117 def decode_gzip(content):
118 gfile = gzip.GzipFile(fileobj=BytesIO(content))
119 return gfile.read()
120
121
122 def encode_gzip(content):
123 s = BytesIO()
124 gf = gzip.GzipFile(fileobj=s, mode='wb')
125 gf.write(content)
126 gf.close()
127 return s.getvalue()
128
129
130 def decode_brotli(content):
131 return brotli.decompress(content)
132
133
134 def encode_brotli(content):
135 return brotli.compress(content)
136
137
138 def decode_deflate(content):
139 """
140 Returns decompressed data for DEFLATE. Some servers may respond with
141 compressed data without a zlib header or checksum. An undocumented
142 feature of zlib permits the lenient decompression of data missing both
143 values.
144
145 http://bugs.python.org/issue5784
146 """
147 try:
148 return zlib.decompress(content)
149 except zlib.error:
150 return zlib.decompress(content, -15)
151
152
153 def encode_deflate(content):
154 """
155 Returns compressed content, always including zlib header and checksum.
156 """
157 return zlib.compress(content)
158
159
160 custom_decode = {
161 "none": identity,
162 "identity": identity,
163 "gzip": decode_gzip,
164 "deflate": decode_deflate,
165 "br": decode_brotli,
166 }
167 custom_encode = {
168 "none": identity,
169 "identity": identity,
170 "gzip": encode_gzip,
171 "deflate": encode_deflate,
172 "br": encode_brotli,
173 }
174
175 __all__ = ["encode", "decode"]
176
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/complex/har_dump.py b/examples/complex/har_dump.py
--- a/examples/complex/har_dump.py
+++ b/examples/complex/har_dump.py
@@ -7,6 +7,7 @@
import sys
import base64
import zlib
+import os
from datetime import datetime
import pytz
@@ -166,7 +167,7 @@
if dump_file.endswith('.zhar'):
raw = zlib.compress(raw, 9)
- with open(dump_file, "wb") as f:
+ with open(os.path.expanduser(dump_file), "wb") as f:
f.write(raw)
mitmproxy.ctx.log("HAR dump finished (wrote %s bytes to file)" % len(json_dump))
diff --git a/mitmproxy/net/http/encoding.py b/mitmproxy/net/http/encoding.py
--- a/mitmproxy/net/http/encoding.py
+++ b/mitmproxy/net/http/encoding.py
@@ -31,8 +31,8 @@
Raises:
ValueError, if decoding fails.
"""
- if len(encoded) == 0:
- return encoded
+ if encoded is None:
+ return None
global _cache
cached = (
@@ -72,8 +72,8 @@
Raises:
ValueError, if encoding fails.
"""
- if len(decoded) == 0:
- return decoded
+ if decoded is None:
+ return None
global _cache
cached = (
@@ -86,10 +86,7 @@
return _cache.encoded
try:
try:
- value = decoded
- if isinstance(value, str):
- value = decoded.encode()
- encoded = custom_encode[encoding](value)
+ encoded = custom_encode[encoding](decoded)
except KeyError:
encoded = codecs.encode(decoded, encoding, errors)
if encoding in ("gzip", "deflate", "br"):
@@ -114,12 +111,14 @@
return content
-def decode_gzip(content):
+def decode_gzip(content: bytes) -> bytes:
+ if not content:
+ return b""
gfile = gzip.GzipFile(fileobj=BytesIO(content))
return gfile.read()
-def encode_gzip(content):
+def encode_gzip(content: bytes) -> bytes:
s = BytesIO()
gf = gzip.GzipFile(fileobj=s, mode='wb')
gf.write(content)
@@ -127,15 +126,17 @@
return s.getvalue()
-def decode_brotli(content):
+def decode_brotli(content: bytes) -> bytes:
+ if not content:
+ return b""
return brotli.decompress(content)
-def encode_brotli(content):
+def encode_brotli(content: bytes) -> bytes:
return brotli.compress(content)
-def decode_deflate(content):
+def decode_deflate(content: bytes) -> bytes:
"""
Returns decompressed data for DEFLATE. Some servers may respond with
compressed data without a zlib header or checksum. An undocumented
@@ -144,13 +145,15 @@
http://bugs.python.org/issue5784
"""
+ if not content:
+ return b""
try:
return zlib.decompress(content)
except zlib.error:
return zlib.decompress(content, -15)
-def encode_deflate(content):
+def encode_deflate(content: bytes) -> bytes:
"""
Returns compressed content, always including zlib header and checksum.
"""
| {"golden_diff": "diff --git a/examples/complex/har_dump.py b/examples/complex/har_dump.py\n--- a/examples/complex/har_dump.py\n+++ b/examples/complex/har_dump.py\n@@ -7,6 +7,7 @@\n import sys\n import base64\n import zlib\n+import os\n \n from datetime import datetime\n import pytz\n@@ -166,7 +167,7 @@\n if dump_file.endswith('.zhar'):\n raw = zlib.compress(raw, 9)\n \n- with open(dump_file, \"wb\") as f:\n+ with open(os.path.expanduser(dump_file), \"wb\") as f:\n f.write(raw)\n \n mitmproxy.ctx.log(\"HAR dump finished (wrote %s bytes to file)\" % len(json_dump))\ndiff --git a/mitmproxy/net/http/encoding.py b/mitmproxy/net/http/encoding.py\n--- a/mitmproxy/net/http/encoding.py\n+++ b/mitmproxy/net/http/encoding.py\n@@ -31,8 +31,8 @@\n Raises:\n ValueError, if decoding fails.\n \"\"\"\n- if len(encoded) == 0:\n- return encoded\n+ if encoded is None:\n+ return None\n \n global _cache\n cached = (\n@@ -72,8 +72,8 @@\n Raises:\n ValueError, if encoding fails.\n \"\"\"\n- if len(decoded) == 0:\n- return decoded\n+ if decoded is None:\n+ return None\n \n global _cache\n cached = (\n@@ -86,10 +86,7 @@\n return _cache.encoded\n try:\n try:\n- value = decoded\n- if isinstance(value, str):\n- value = decoded.encode()\n- encoded = custom_encode[encoding](value)\n+ encoded = custom_encode[encoding](decoded)\n except KeyError:\n encoded = codecs.encode(decoded, encoding, errors)\n if encoding in (\"gzip\", \"deflate\", \"br\"):\n@@ -114,12 +111,14 @@\n return content\n \n \n-def decode_gzip(content):\n+def decode_gzip(content: bytes) -> bytes:\n+ if not content:\n+ return b\"\"\n gfile = gzip.GzipFile(fileobj=BytesIO(content))\n return gfile.read()\n \n \n-def encode_gzip(content):\n+def encode_gzip(content: bytes) -> bytes:\n s = BytesIO()\n gf = gzip.GzipFile(fileobj=s, mode='wb')\n gf.write(content)\n@@ -127,15 +126,17 @@\n return s.getvalue()\n \n \n-def decode_brotli(content):\n+def decode_brotli(content: bytes) -> bytes:\n+ if not content:\n+ return b\"\"\n return brotli.decompress(content)\n \n \n-def encode_brotli(content):\n+def encode_brotli(content: bytes) -> bytes:\n return brotli.compress(content)\n \n \n-def decode_deflate(content):\n+def decode_deflate(content: bytes) -> bytes:\n \"\"\"\n Returns decompressed data for DEFLATE. Some servers may respond with\n compressed data without a zlib header or checksum. An undocumented\n@@ -144,13 +145,15 @@\n \n http://bugs.python.org/issue5784\n \"\"\"\n+ if not content:\n+ return b\"\"\n try:\n return zlib.decompress(content)\n except zlib.error:\n return zlib.decompress(content, -15)\n \n \n-def encode_deflate(content):\n+def encode_deflate(content: bytes) -> bytes:\n \"\"\"\n Returns compressed content, always including zlib header and checksum.\n \"\"\"\n", "issue": "Crash of har_dump.py\nI sometimes get either `b'' is not JSON serializable` or `Object of type 'bytes' is not JSON serializable`\r\n\r\nI used mitmproxy to extract such a flow and uploaded it [here](https://gist.github.com/Junkern/495d4c7c1b572d7749f0291adfb0dd15)\r\n\r\n##### Steps to reproduce the problem:\r\n\r\n1.\r\n2.\r\n3.\r\n\r\n\r\n##### Any other comments? What have you tried so far?\r\n\r\nIt seems, that the error mostly occurs for `POST` and `HEAD` requests. Sometimes with an empty response body, sometimes not. Seldomly a `GET` requests triggers this error\r\n\r\n##### System information\r\nmitmproxy 1.0.2\r\nmacOs Sierra 10.12.3\r\n\r\nEdit: Updated the Gist link to point to a working binary gist\n", "before_files": [{"content": "\"\"\"\nThis inline script can be used to dump flows as HAR files.\n\"\"\"\n\n\nimport json\nimport sys\nimport base64\nimport zlib\n\nfrom datetime import datetime\nimport pytz\n\nimport mitmproxy\n\nfrom mitmproxy import version\nfrom mitmproxy.utils import strutils\nfrom mitmproxy.net.http import cookies\n\nHAR = {}\n\n# A list of server seen till now is maintained so we can avoid\n# using 'connect' time for entries that use an existing connection.\nSERVERS_SEEN = set()\n\n\ndef start():\n \"\"\"\n Called once on script startup before any other events.\n \"\"\"\n if len(sys.argv) != 2:\n raise ValueError(\n 'Usage: -s \"har_dump.py filename\" '\n '(- will output to stdout, filenames ending with .zhar '\n 'will result in compressed har)'\n )\n\n HAR.update({\n \"log\": {\n \"version\": \"1.2\",\n \"creator\": {\n \"name\": \"mitmproxy har_dump\",\n \"version\": \"0.1\",\n \"comment\": \"mitmproxy version %s\" % version.MITMPROXY\n },\n \"entries\": []\n }\n })\n\n\ndef response(flow):\n \"\"\"\n Called when a server response has been received.\n \"\"\"\n\n # -1 indicates that these values do not apply to current request\n ssl_time = -1\n connect_time = -1\n\n if flow.server_conn and flow.server_conn not in SERVERS_SEEN:\n connect_time = (flow.server_conn.timestamp_tcp_setup -\n flow.server_conn.timestamp_start)\n\n if flow.server_conn.timestamp_ssl_setup is not None:\n ssl_time = (flow.server_conn.timestamp_ssl_setup -\n flow.server_conn.timestamp_tcp_setup)\n\n SERVERS_SEEN.add(flow.server_conn)\n\n # Calculate raw timings from timestamps. DNS timings can not be calculated\n # for lack of a way to measure it. The same goes for HAR blocked.\n # mitmproxy will open a server connection as soon as it receives the host\n # and port from the client connection. So, the time spent waiting is actually\n # spent waiting between request.timestamp_end and response.timestamp_start\n # thus it correlates to HAR wait instead.\n timings_raw = {\n 'send': flow.request.timestamp_end - flow.request.timestamp_start,\n 'receive': flow.response.timestamp_end - flow.response.timestamp_start,\n 'wait': flow.response.timestamp_start - flow.request.timestamp_end,\n 'connect': connect_time,\n 'ssl': ssl_time,\n }\n\n # HAR timings are integers in ms, so we re-encode the raw timings to that format.\n timings = dict([(k, int(1000 * v)) for k, v in timings_raw.items()])\n\n # full_time is the sum of all timings.\n # Timings set to -1 will be ignored as per spec.\n full_time = sum(v for v in timings.values() if v > -1)\n\n started_date_time = format_datetime(datetime.utcfromtimestamp(flow.request.timestamp_start))\n\n # Response body size and encoding\n response_body_size = len(flow.response.raw_content)\n response_body_decoded_size = len(flow.response.content)\n response_body_compression = response_body_decoded_size - response_body_size\n\n entry = {\n \"startedDateTime\": started_date_time,\n \"time\": full_time,\n \"request\": {\n \"method\": flow.request.method,\n \"url\": flow.request.url,\n \"httpVersion\": flow.request.http_version,\n \"cookies\": format_request_cookies(flow.request.cookies.fields),\n \"headers\": name_value(flow.request.headers),\n \"queryString\": name_value(flow.request.query or {}),\n \"headersSize\": len(str(flow.request.headers)),\n \"bodySize\": len(flow.request.content),\n },\n \"response\": {\n \"status\": flow.response.status_code,\n \"statusText\": flow.response.reason,\n \"httpVersion\": flow.response.http_version,\n \"cookies\": format_response_cookies(flow.response.cookies.fields),\n \"headers\": name_value(flow.response.headers),\n \"content\": {\n \"size\": response_body_size,\n \"compression\": response_body_compression,\n \"mimeType\": flow.response.headers.get('Content-Type', '')\n },\n \"redirectURL\": flow.response.headers.get('Location', ''),\n \"headersSize\": len(str(flow.response.headers)),\n \"bodySize\": response_body_size,\n },\n \"cache\": {},\n \"timings\": timings,\n }\n\n # Store binary data as base64\n if strutils.is_mostly_bin(flow.response.content):\n entry[\"response\"][\"content\"][\"text\"] = base64.b64encode(flow.response.content).decode()\n entry[\"response\"][\"content\"][\"encoding\"] = \"base64\"\n else:\n entry[\"response\"][\"content\"][\"text\"] = flow.response.get_text(strict=False)\n\n if flow.request.method in [\"POST\", \"PUT\", \"PATCH\"]:\n params = [\n {\"name\": a, \"value\": b}\n for a, b in flow.request.urlencoded_form.items(multi=True)\n ]\n entry[\"request\"][\"postData\"] = {\n \"mimeType\": flow.request.headers.get(\"Content-Type\", \"\"),\n \"text\": flow.request.get_text(strict=False),\n \"params\": params\n }\n\n if flow.server_conn.connected():\n entry[\"serverIPAddress\"] = str(flow.server_conn.ip_address.address[0])\n\n HAR[\"log\"][\"entries\"].append(entry)\n\n\ndef done():\n \"\"\"\n Called once on script shutdown, after any other events.\n \"\"\"\n dump_file = sys.argv[1]\n\n json_dump = json.dumps(HAR, indent=2) # type: str\n\n if dump_file == '-':\n mitmproxy.ctx.log(json_dump)\n else:\n raw = json_dump.encode() # type: bytes\n if dump_file.endswith('.zhar'):\n raw = zlib.compress(raw, 9)\n\n with open(dump_file, \"wb\") as f:\n f.write(raw)\n\n mitmproxy.ctx.log(\"HAR dump finished (wrote %s bytes to file)\" % len(json_dump))\n\n\ndef format_datetime(dt):\n return dt.replace(tzinfo=pytz.timezone(\"UTC\")).isoformat()\n\n\ndef format_cookies(cookie_list):\n rv = []\n\n for name, value, attrs in cookie_list:\n cookie_har = {\n \"name\": name,\n \"value\": value,\n }\n\n # HAR only needs some attributes\n for key in [\"path\", \"domain\", \"comment\"]:\n if key in attrs:\n cookie_har[key] = attrs[key]\n\n # These keys need to be boolean!\n for key in [\"httpOnly\", \"secure\"]:\n cookie_har[key] = bool(key in attrs)\n\n # Expiration time needs to be formatted\n expire_ts = cookies.get_expiration_ts(attrs)\n if expire_ts is not None:\n cookie_har[\"expires\"] = format_datetime(datetime.fromtimestamp(expire_ts))\n\n rv.append(cookie_har)\n\n return rv\n\n\ndef format_request_cookies(fields):\n return format_cookies(cookies.group_cookies(fields))\n\n\ndef format_response_cookies(fields):\n return format_cookies((c[0], c[1].value, c[1].attrs) for c in fields)\n\n\ndef name_value(obj):\n \"\"\"\n Convert (key, value) pairs to HAR format.\n \"\"\"\n return [{\"name\": k, \"value\": v} for k, v in obj.items()]\n", "path": "examples/complex/har_dump.py"}, {"content": "\"\"\"\nUtility functions for decoding response bodies.\n\"\"\"\n\nimport codecs\nimport collections\nfrom io import BytesIO\n\nimport gzip\nimport zlib\nimport brotli\n\nfrom typing import Union\n\n\n# We have a shared single-element cache for encoding and decoding.\n# This is quite useful in practice, e.g.\n# flow.request.content = flow.request.content.replace(b\"foo\", b\"bar\")\n# does not require an .encode() call if content does not contain b\"foo\"\nCachedDecode = collections.namedtuple(\"CachedDecode\", \"encoded encoding errors decoded\")\n_cache = CachedDecode(None, None, None, None)\n\n\ndef decode(encoded: Union[str, bytes], encoding: str, errors: str='strict') -> Union[str, bytes]:\n \"\"\"\n Decode the given input object\n\n Returns:\n The decoded value\n\n Raises:\n ValueError, if decoding fails.\n \"\"\"\n if len(encoded) == 0:\n return encoded\n\n global _cache\n cached = (\n isinstance(encoded, bytes) and\n _cache.encoded == encoded and\n _cache.encoding == encoding and\n _cache.errors == errors\n )\n if cached:\n return _cache.decoded\n try:\n try:\n decoded = custom_decode[encoding](encoded)\n except KeyError:\n decoded = codecs.decode(encoded, encoding, errors)\n if encoding in (\"gzip\", \"deflate\", \"br\"):\n _cache = CachedDecode(encoded, encoding, errors, decoded)\n return decoded\n except TypeError:\n raise\n except Exception as e:\n raise ValueError(\"{} when decoding {} with {}: {}\".format(\n type(e).__name__,\n repr(encoded)[:10],\n repr(encoding),\n repr(e),\n ))\n\n\ndef encode(decoded: Union[str, bytes], encoding: str, errors: str='strict') -> Union[str, bytes]:\n \"\"\"\n Encode the given input object\n\n Returns:\n The encoded value\n\n Raises:\n ValueError, if encoding fails.\n \"\"\"\n if len(decoded) == 0:\n return decoded\n\n global _cache\n cached = (\n isinstance(decoded, bytes) and\n _cache.decoded == decoded and\n _cache.encoding == encoding and\n _cache.errors == errors\n )\n if cached:\n return _cache.encoded\n try:\n try:\n value = decoded\n if isinstance(value, str):\n value = decoded.encode()\n encoded = custom_encode[encoding](value)\n except KeyError:\n encoded = codecs.encode(decoded, encoding, errors)\n if encoding in (\"gzip\", \"deflate\", \"br\"):\n _cache = CachedDecode(encoded, encoding, errors, decoded)\n return encoded\n except TypeError:\n raise\n except Exception as e:\n raise ValueError(\"{} when encoding {} with {}: {}\".format(\n type(e).__name__,\n repr(decoded)[:10],\n repr(encoding),\n repr(e),\n ))\n\n\ndef identity(content):\n \"\"\"\n Returns content unchanged. Identity is the default value of\n Accept-Encoding headers.\n \"\"\"\n return content\n\n\ndef decode_gzip(content):\n gfile = gzip.GzipFile(fileobj=BytesIO(content))\n return gfile.read()\n\n\ndef encode_gzip(content):\n s = BytesIO()\n gf = gzip.GzipFile(fileobj=s, mode='wb')\n gf.write(content)\n gf.close()\n return s.getvalue()\n\n\ndef decode_brotli(content):\n return brotli.decompress(content)\n\n\ndef encode_brotli(content):\n return brotli.compress(content)\n\n\ndef decode_deflate(content):\n \"\"\"\n Returns decompressed data for DEFLATE. Some servers may respond with\n compressed data without a zlib header or checksum. An undocumented\n feature of zlib permits the lenient decompression of data missing both\n values.\n\n http://bugs.python.org/issue5784\n \"\"\"\n try:\n return zlib.decompress(content)\n except zlib.error:\n return zlib.decompress(content, -15)\n\n\ndef encode_deflate(content):\n \"\"\"\n Returns compressed content, always including zlib header and checksum.\n \"\"\"\n return zlib.compress(content)\n\n\ncustom_decode = {\n \"none\": identity,\n \"identity\": identity,\n \"gzip\": decode_gzip,\n \"deflate\": decode_deflate,\n \"br\": decode_brotli,\n}\ncustom_encode = {\n \"none\": identity,\n \"identity\": identity,\n \"gzip\": encode_gzip,\n \"deflate\": encode_deflate,\n \"br\": encode_brotli,\n}\n\n__all__ = [\"encode\", \"decode\"]\n", "path": "mitmproxy/net/http/encoding.py"}], "after_files": [{"content": "\"\"\"\nThis inline script can be used to dump flows as HAR files.\n\"\"\"\n\n\nimport json\nimport sys\nimport base64\nimport zlib\nimport os\n\nfrom datetime import datetime\nimport pytz\n\nimport mitmproxy\n\nfrom mitmproxy import version\nfrom mitmproxy.utils import strutils\nfrom mitmproxy.net.http import cookies\n\nHAR = {}\n\n# A list of server seen till now is maintained so we can avoid\n# using 'connect' time for entries that use an existing connection.\nSERVERS_SEEN = set()\n\n\ndef start():\n \"\"\"\n Called once on script startup before any other events.\n \"\"\"\n if len(sys.argv) != 2:\n raise ValueError(\n 'Usage: -s \"har_dump.py filename\" '\n '(- will output to stdout, filenames ending with .zhar '\n 'will result in compressed har)'\n )\n\n HAR.update({\n \"log\": {\n \"version\": \"1.2\",\n \"creator\": {\n \"name\": \"mitmproxy har_dump\",\n \"version\": \"0.1\",\n \"comment\": \"mitmproxy version %s\" % version.MITMPROXY\n },\n \"entries\": []\n }\n })\n\n\ndef response(flow):\n \"\"\"\n Called when a server response has been received.\n \"\"\"\n\n # -1 indicates that these values do not apply to current request\n ssl_time = -1\n connect_time = -1\n\n if flow.server_conn and flow.server_conn not in SERVERS_SEEN:\n connect_time = (flow.server_conn.timestamp_tcp_setup -\n flow.server_conn.timestamp_start)\n\n if flow.server_conn.timestamp_ssl_setup is not None:\n ssl_time = (flow.server_conn.timestamp_ssl_setup -\n flow.server_conn.timestamp_tcp_setup)\n\n SERVERS_SEEN.add(flow.server_conn)\n\n # Calculate raw timings from timestamps. DNS timings can not be calculated\n # for lack of a way to measure it. The same goes for HAR blocked.\n # mitmproxy will open a server connection as soon as it receives the host\n # and port from the client connection. So, the time spent waiting is actually\n # spent waiting between request.timestamp_end and response.timestamp_start\n # thus it correlates to HAR wait instead.\n timings_raw = {\n 'send': flow.request.timestamp_end - flow.request.timestamp_start,\n 'receive': flow.response.timestamp_end - flow.response.timestamp_start,\n 'wait': flow.response.timestamp_start - flow.request.timestamp_end,\n 'connect': connect_time,\n 'ssl': ssl_time,\n }\n\n # HAR timings are integers in ms, so we re-encode the raw timings to that format.\n timings = dict([(k, int(1000 * v)) for k, v in timings_raw.items()])\n\n # full_time is the sum of all timings.\n # Timings set to -1 will be ignored as per spec.\n full_time = sum(v for v in timings.values() if v > -1)\n\n started_date_time = format_datetime(datetime.utcfromtimestamp(flow.request.timestamp_start))\n\n # Response body size and encoding\n response_body_size = len(flow.response.raw_content)\n response_body_decoded_size = len(flow.response.content)\n response_body_compression = response_body_decoded_size - response_body_size\n\n entry = {\n \"startedDateTime\": started_date_time,\n \"time\": full_time,\n \"request\": {\n \"method\": flow.request.method,\n \"url\": flow.request.url,\n \"httpVersion\": flow.request.http_version,\n \"cookies\": format_request_cookies(flow.request.cookies.fields),\n \"headers\": name_value(flow.request.headers),\n \"queryString\": name_value(flow.request.query or {}),\n \"headersSize\": len(str(flow.request.headers)),\n \"bodySize\": len(flow.request.content),\n },\n \"response\": {\n \"status\": flow.response.status_code,\n \"statusText\": flow.response.reason,\n \"httpVersion\": flow.response.http_version,\n \"cookies\": format_response_cookies(flow.response.cookies.fields),\n \"headers\": name_value(flow.response.headers),\n \"content\": {\n \"size\": response_body_size,\n \"compression\": response_body_compression,\n \"mimeType\": flow.response.headers.get('Content-Type', '')\n },\n \"redirectURL\": flow.response.headers.get('Location', ''),\n \"headersSize\": len(str(flow.response.headers)),\n \"bodySize\": response_body_size,\n },\n \"cache\": {},\n \"timings\": timings,\n }\n\n # Store binary data as base64\n if strutils.is_mostly_bin(flow.response.content):\n entry[\"response\"][\"content\"][\"text\"] = base64.b64encode(flow.response.content).decode()\n entry[\"response\"][\"content\"][\"encoding\"] = \"base64\"\n else:\n entry[\"response\"][\"content\"][\"text\"] = flow.response.get_text(strict=False)\n\n if flow.request.method in [\"POST\", \"PUT\", \"PATCH\"]:\n params = [\n {\"name\": a, \"value\": b}\n for a, b in flow.request.urlencoded_form.items(multi=True)\n ]\n entry[\"request\"][\"postData\"] = {\n \"mimeType\": flow.request.headers.get(\"Content-Type\", \"\"),\n \"text\": flow.request.get_text(strict=False),\n \"params\": params\n }\n\n if flow.server_conn.connected():\n entry[\"serverIPAddress\"] = str(flow.server_conn.ip_address.address[0])\n\n HAR[\"log\"][\"entries\"].append(entry)\n\n\ndef done():\n \"\"\"\n Called once on script shutdown, after any other events.\n \"\"\"\n dump_file = sys.argv[1]\n\n json_dump = json.dumps(HAR, indent=2) # type: str\n\n if dump_file == '-':\n mitmproxy.ctx.log(json_dump)\n else:\n raw = json_dump.encode() # type: bytes\n if dump_file.endswith('.zhar'):\n raw = zlib.compress(raw, 9)\n\n with open(os.path.expanduser(dump_file), \"wb\") as f:\n f.write(raw)\n\n mitmproxy.ctx.log(\"HAR dump finished (wrote %s bytes to file)\" % len(json_dump))\n\n\ndef format_datetime(dt):\n return dt.replace(tzinfo=pytz.timezone(\"UTC\")).isoformat()\n\n\ndef format_cookies(cookie_list):\n rv = []\n\n for name, value, attrs in cookie_list:\n cookie_har = {\n \"name\": name,\n \"value\": value,\n }\n\n # HAR only needs some attributes\n for key in [\"path\", \"domain\", \"comment\"]:\n if key in attrs:\n cookie_har[key] = attrs[key]\n\n # These keys need to be boolean!\n for key in [\"httpOnly\", \"secure\"]:\n cookie_har[key] = bool(key in attrs)\n\n # Expiration time needs to be formatted\n expire_ts = cookies.get_expiration_ts(attrs)\n if expire_ts is not None:\n cookie_har[\"expires\"] = format_datetime(datetime.fromtimestamp(expire_ts))\n\n rv.append(cookie_har)\n\n return rv\n\n\ndef format_request_cookies(fields):\n return format_cookies(cookies.group_cookies(fields))\n\n\ndef format_response_cookies(fields):\n return format_cookies((c[0], c[1].value, c[1].attrs) for c in fields)\n\n\ndef name_value(obj):\n \"\"\"\n Convert (key, value) pairs to HAR format.\n \"\"\"\n return [{\"name\": k, \"value\": v} for k, v in obj.items()]\n", "path": "examples/complex/har_dump.py"}, {"content": "\"\"\"\nUtility functions for decoding response bodies.\n\"\"\"\n\nimport codecs\nimport collections\nfrom io import BytesIO\n\nimport gzip\nimport zlib\nimport brotli\n\nfrom typing import Union\n\n\n# We have a shared single-element cache for encoding and decoding.\n# This is quite useful in practice, e.g.\n# flow.request.content = flow.request.content.replace(b\"foo\", b\"bar\")\n# does not require an .encode() call if content does not contain b\"foo\"\nCachedDecode = collections.namedtuple(\"CachedDecode\", \"encoded encoding errors decoded\")\n_cache = CachedDecode(None, None, None, None)\n\n\ndef decode(encoded: Union[str, bytes], encoding: str, errors: str='strict') -> Union[str, bytes]:\n \"\"\"\n Decode the given input object\n\n Returns:\n The decoded value\n\n Raises:\n ValueError, if decoding fails.\n \"\"\"\n if encoded is None:\n return None\n\n global _cache\n cached = (\n isinstance(encoded, bytes) and\n _cache.encoded == encoded and\n _cache.encoding == encoding and\n _cache.errors == errors\n )\n if cached:\n return _cache.decoded\n try:\n try:\n decoded = custom_decode[encoding](encoded)\n except KeyError:\n decoded = codecs.decode(encoded, encoding, errors)\n if encoding in (\"gzip\", \"deflate\", \"br\"):\n _cache = CachedDecode(encoded, encoding, errors, decoded)\n return decoded\n except TypeError:\n raise\n except Exception as e:\n raise ValueError(\"{} when decoding {} with {}: {}\".format(\n type(e).__name__,\n repr(encoded)[:10],\n repr(encoding),\n repr(e),\n ))\n\n\ndef encode(decoded: Union[str, bytes], encoding: str, errors: str='strict') -> Union[str, bytes]:\n \"\"\"\n Encode the given input object\n\n Returns:\n The encoded value\n\n Raises:\n ValueError, if encoding fails.\n \"\"\"\n if decoded is None:\n return None\n\n global _cache\n cached = (\n isinstance(decoded, bytes) and\n _cache.decoded == decoded and\n _cache.encoding == encoding and\n _cache.errors == errors\n )\n if cached:\n return _cache.encoded\n try:\n try:\n encoded = custom_encode[encoding](decoded)\n except KeyError:\n encoded = codecs.encode(decoded, encoding, errors)\n if encoding in (\"gzip\", \"deflate\", \"br\"):\n _cache = CachedDecode(encoded, encoding, errors, decoded)\n return encoded\n except TypeError:\n raise\n except Exception as e:\n raise ValueError(\"{} when encoding {} with {}: {}\".format(\n type(e).__name__,\n repr(decoded)[:10],\n repr(encoding),\n repr(e),\n ))\n\n\ndef identity(content):\n \"\"\"\n Returns content unchanged. Identity is the default value of\n Accept-Encoding headers.\n \"\"\"\n return content\n\n\ndef decode_gzip(content: bytes) -> bytes:\n if not content:\n return b\"\"\n gfile = gzip.GzipFile(fileobj=BytesIO(content))\n return gfile.read()\n\n\ndef encode_gzip(content: bytes) -> bytes:\n s = BytesIO()\n gf = gzip.GzipFile(fileobj=s, mode='wb')\n gf.write(content)\n gf.close()\n return s.getvalue()\n\n\ndef decode_brotli(content: bytes) -> bytes:\n if not content:\n return b\"\"\n return brotli.decompress(content)\n\n\ndef encode_brotli(content: bytes) -> bytes:\n return brotli.compress(content)\n\n\ndef decode_deflate(content: bytes) -> bytes:\n \"\"\"\n Returns decompressed data for DEFLATE. Some servers may respond with\n compressed data without a zlib header or checksum. An undocumented\n feature of zlib permits the lenient decompression of data missing both\n values.\n\n http://bugs.python.org/issue5784\n \"\"\"\n if not content:\n return b\"\"\n try:\n return zlib.decompress(content)\n except zlib.error:\n return zlib.decompress(content, -15)\n\n\ndef encode_deflate(content: bytes) -> bytes:\n \"\"\"\n Returns compressed content, always including zlib header and checksum.\n \"\"\"\n return zlib.compress(content)\n\n\ncustom_decode = {\n \"none\": identity,\n \"identity\": identity,\n \"gzip\": decode_gzip,\n \"deflate\": decode_deflate,\n \"br\": decode_brotli,\n}\ncustom_encode = {\n \"none\": identity,\n \"identity\": identity,\n \"gzip\": encode_gzip,\n \"deflate\": encode_deflate,\n \"br\": encode_brotli,\n}\n\n__all__ = [\"encode\", \"decode\"]\n", "path": "mitmproxy/net/http/encoding.py"}]} | 4,041 | 794 |
gh_patches_debug_23721 | rasdani/github-patches | git_diff | ray-project__ray-11021 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Object spilling] Raylet automatically reloads spilled objects back into object store
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `python/ray/experimental/object_spilling.py`
Content:
```
1 import ray
2
3
4 def force_spill_objects(object_refs):
5 """Force spilling objects to external storage.
6
7 Args:
8 object_refs: Object refs of the objects to be
9 spilled.
10 """
11 core_worker = ray.worker.global_worker.core_worker
12 # Make sure that the values are object refs.
13 for object_ref in object_refs:
14 if not isinstance(object_ref, ray.ObjectRef):
15 raise TypeError(
16 f"Attempting to call `force_spill_objects` on the "
17 f"value {object_ref}, which is not an ray.ObjectRef.")
18 return core_worker.force_spill_objects(object_refs)
19
20
21 def force_restore_spilled_objects(object_refs):
22 """Force restoring objects from external storage.
23
24 Args:
25 object_refs: Object refs of the objects to be
26 restored.
27 """
28 core_worker = ray.worker.global_worker.core_worker
29 # Make sure that the values are object refs.
30 for object_ref in object_refs:
31 if not isinstance(object_ref, ray.ObjectRef):
32 raise TypeError(
33 f"Attempting to call `force_restore_spilled_objects` on the "
34 f"value {object_ref}, which is not an ray.ObjectRef.")
35 return core_worker.force_restore_spilled_objects(object_refs)
36
```
Path: `python/ray/experimental/__init__.py`
Content:
```
1 from .dynamic_resources import set_resource
2 from .object_spilling import force_spill_objects, force_restore_spilled_objects
3 __all__ = [
4 "set_resource",
5 "force_spill_objects",
6 "force_restore_spilled_objects",
7 ]
8
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/python/ray/experimental/__init__.py b/python/ray/experimental/__init__.py
--- a/python/ray/experimental/__init__.py
+++ b/python/ray/experimental/__init__.py
@@ -1,7 +1,6 @@
from .dynamic_resources import set_resource
-from .object_spilling import force_spill_objects, force_restore_spilled_objects
+from .object_spilling import force_spill_objects
__all__ = [
"set_resource",
"force_spill_objects",
- "force_restore_spilled_objects",
]
diff --git a/python/ray/experimental/object_spilling.py b/python/ray/experimental/object_spilling.py
--- a/python/ray/experimental/object_spilling.py
+++ b/python/ray/experimental/object_spilling.py
@@ -16,20 +16,3 @@
f"Attempting to call `force_spill_objects` on the "
f"value {object_ref}, which is not an ray.ObjectRef.")
return core_worker.force_spill_objects(object_refs)
-
-
-def force_restore_spilled_objects(object_refs):
- """Force restoring objects from external storage.
-
- Args:
- object_refs: Object refs of the objects to be
- restored.
- """
- core_worker = ray.worker.global_worker.core_worker
- # Make sure that the values are object refs.
- for object_ref in object_refs:
- if not isinstance(object_ref, ray.ObjectRef):
- raise TypeError(
- f"Attempting to call `force_restore_spilled_objects` on the "
- f"value {object_ref}, which is not an ray.ObjectRef.")
- return core_worker.force_restore_spilled_objects(object_refs)
| {"golden_diff": "diff --git a/python/ray/experimental/__init__.py b/python/ray/experimental/__init__.py\n--- a/python/ray/experimental/__init__.py\n+++ b/python/ray/experimental/__init__.py\n@@ -1,7 +1,6 @@\n from .dynamic_resources import set_resource\n-from .object_spilling import force_spill_objects, force_restore_spilled_objects\n+from .object_spilling import force_spill_objects\n __all__ = [\n \"set_resource\",\n \"force_spill_objects\",\n- \"force_restore_spilled_objects\",\n ]\ndiff --git a/python/ray/experimental/object_spilling.py b/python/ray/experimental/object_spilling.py\n--- a/python/ray/experimental/object_spilling.py\n+++ b/python/ray/experimental/object_spilling.py\n@@ -16,20 +16,3 @@\n f\"Attempting to call `force_spill_objects` on the \"\n f\"value {object_ref}, which is not an ray.ObjectRef.\")\n return core_worker.force_spill_objects(object_refs)\n-\n-\n-def force_restore_spilled_objects(object_refs):\n- \"\"\"Force restoring objects from external storage.\n-\n- Args:\n- object_refs: Object refs of the objects to be\n- restored.\n- \"\"\"\n- core_worker = ray.worker.global_worker.core_worker\n- # Make sure that the values are object refs.\n- for object_ref in object_refs:\n- if not isinstance(object_ref, ray.ObjectRef):\n- raise TypeError(\n- f\"Attempting to call `force_restore_spilled_objects` on the \"\n- f\"value {object_ref}, which is not an ray.ObjectRef.\")\n- return core_worker.force_restore_spilled_objects(object_refs)\n", "issue": "[Object spilling] Raylet automatically reloads spilled objects back into object store\n\r\n\n", "before_files": [{"content": "import ray\n\n\ndef force_spill_objects(object_refs):\n \"\"\"Force spilling objects to external storage.\n\n Args:\n object_refs: Object refs of the objects to be\n spilled.\n \"\"\"\n core_worker = ray.worker.global_worker.core_worker\n # Make sure that the values are object refs.\n for object_ref in object_refs:\n if not isinstance(object_ref, ray.ObjectRef):\n raise TypeError(\n f\"Attempting to call `force_spill_objects` on the \"\n f\"value {object_ref}, which is not an ray.ObjectRef.\")\n return core_worker.force_spill_objects(object_refs)\n\n\ndef force_restore_spilled_objects(object_refs):\n \"\"\"Force restoring objects from external storage.\n\n Args:\n object_refs: Object refs of the objects to be\n restored.\n \"\"\"\n core_worker = ray.worker.global_worker.core_worker\n # Make sure that the values are object refs.\n for object_ref in object_refs:\n if not isinstance(object_ref, ray.ObjectRef):\n raise TypeError(\n f\"Attempting to call `force_restore_spilled_objects` on the \"\n f\"value {object_ref}, which is not an ray.ObjectRef.\")\n return core_worker.force_restore_spilled_objects(object_refs)\n", "path": "python/ray/experimental/object_spilling.py"}, {"content": "from .dynamic_resources import set_resource\nfrom .object_spilling import force_spill_objects, force_restore_spilled_objects\n__all__ = [\n \"set_resource\",\n \"force_spill_objects\",\n \"force_restore_spilled_objects\",\n]\n", "path": "python/ray/experimental/__init__.py"}], "after_files": [{"content": "import ray\n\n\ndef force_spill_objects(object_refs):\n \"\"\"Force spilling objects to external storage.\n\n Args:\n object_refs: Object refs of the objects to be\n spilled.\n \"\"\"\n core_worker = ray.worker.global_worker.core_worker\n # Make sure that the values are object refs.\n for object_ref in object_refs:\n if not isinstance(object_ref, ray.ObjectRef):\n raise TypeError(\n f\"Attempting to call `force_spill_objects` on the \"\n f\"value {object_ref}, which is not an ray.ObjectRef.\")\n return core_worker.force_spill_objects(object_refs)\n", "path": "python/ray/experimental/object_spilling.py"}, {"content": "from .dynamic_resources import set_resource\nfrom .object_spilling import force_spill_objects\n__all__ = [\n \"set_resource\",\n \"force_spill_objects\",\n]\n", "path": "python/ray/experimental/__init__.py"}]} | 683 | 366 |
gh_patches_debug_9575 | rasdani/github-patches | git_diff | ansible-collections__community.vmware-1335 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
community.vmware.vmware_cfg_backup: Failed to write backup file
<!--- Verify first that your issue is not already reported on GitHub -->
<!--- Also test if the latest release and devel branch are affected too -->
<!--- Complete *all* sections as described, this form is processed automatically -->
##### SUMMARY
When I run the ansible playbook it's return "Failed to write backup file" error.
```
[WARNING]: provided hosts list is empty, only localhost is available. Note that the implicit localhost does not match 'all'
PLAY [localhost] *************************************************************************************************************************************************************************************************************************
TASK [Gathering Facts] *******************************************************************************************************************************************************************************************************************
ok: [localhost]
TASK [ESXI backup test] ******************************************************************************************************************************************************************************************************************
fatal: [localhost -> localhost]: FAILED! => {"changed": false, "msg": "Failed to write backup file. Ensure that the dest path exists and is writable. Details : <urlopen error [Errno 111] Connection refused>"}
PLAY RECAP *******************************************************************************************************************************************************************************************************************************
localhost : ok=1 changed=0 unreachable=0 failed=1 skipped=0 rescued=0 ignored=0
```
##### ISSUE TYPE
- Bug Report
##### COMPONENT NAME
<!--- Write the short name of the module, plugin, task or feature below, use your best guess if unsure -->
`community.vmware.vmware_cfg_backup`
##### ANSIBLE VERSION
<!--- Paste verbatim output from "ansible --version" between quotes -->
```paste below
ansible 2.9.6
config file = /etc/ansible/ansible.cfg
configured module search path = ['/home/sergen/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /usr/lib/python3/dist-packages/ansible
executable location = /usr/bin/ansible
python version = 3.8.10 (default, Nov 26 2021, 20:14:08) [GCC 9.3.0]
```
##### COLLECTION VERSION
<!--- Paste verbatim output from "ansible-galaxy collection list <namespace>.<collection>" between the quotes
for example: ansible-galaxy collection list community.general
-->
```paste below
ansible-galaxy collection list community.vmware
usage: ansible-galaxy collection [-h] COLLECTION_ACTION ...
ansible-galaxy collection: error: argument COLLECTION_ACTION: invalid choice: 'list' (choose from 'init', 'build', 'publish', 'install')
```
List command is not working but I install collection with this command
`ansible-galaxy collection install community.vmware`
##### CONFIGURATION
<!--- Paste verbatim output from "ansible-config dump --only-changed" between quotes -->
```paste below
```
no output
##### OS / ENVIRONMENT
<!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. -->
Ubuntu 20.04
VMware ESXI 7.0.3
##YAML FILE
returns the same result in both files
```
- hosts: localhost
vars:
esxi_hostname: "192.168.88.154"
esxi_username: "root"
esxi_password: "password"
tasks:
- name: ESXI backup test
local_action:
module: vmware_cfg_backup
hostname: '{{esxi_hostname}}'
username: '{{esxi_username}}'
password: '{{esxi_password}}'
state: saved
dest: /tmp/
validate_certs: no
```
```
cat vmware.yaml
- hosts: localhost
vars:
esxi_hostname: "192.168.88.154"
esxi_username: "root"
esxi_password: "password"
tasks:
- name: Save
community.vmware.vmware_cfg_backup:
hostname: '{{ esxi_hostname }}'
username: '{{ esxi_username }}'
password: '{{ esxi_password }}'
state: saved
dest: /tmp/
validate_certs: no
delegate_to: localhost
```
##### STEPS TO REPRODUCE
<!--- Describe exactly how to reproduce the problem, using a minimal test-case -->
<!--- Paste example playbooks or commands between quotes below -->
```yaml
```
<!--- HINT: You can paste gist.github.com links for larger files -->
##### EXPECTED RESULTS
<!--- Describe what you expected to happen when running the steps above -->
##### ACTUAL RESULTS
<!--- Describe what actually happened. If possible run with extra verbosity (-vvvv) -->
<!--- Paste verbatim command output between quotes -->
```
ansible-playbook vmware.yaml
[WARNING]: provided hosts list is empty, only localhost is available. Note that the implicit localhost does not match 'all'
PLAY [localhost] *************************************************************************************************************************************************************************************************************************
TASK [Gathering Facts] *******************************************************************************************************************************************************************************************************************
ok: [localhost]
TASK [Save] ******************************************************************************************************************************************************************************************************************************
fatal: [localhost -> localhost]: FAILED! => {"changed": false, "msg": "Failed to write backup file. Ensure that the dest path exists and is writable. Details : <urlopen error [Errno 111] Connection refused>"}
PLAY RECAP *******************************************************************************************************************************************************************************************************************************
localhost : ok=1 changed=0 unreachable=0 failed=1 skipped=0 rescued=0 ignored=0
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `plugins/modules/vmware_cfg_backup.py`
Content:
```
1 #!/usr/bin/python
2 # -*- coding: utf-8 -*-
3
4 # Copyright: (c) 2017, IBM Corp
5 # Author(s): Andreas Nafpliotis <[email protected]>
6 # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
7
8 from __future__ import absolute_import, division, print_function
9 __metaclass__ = type
10
11
12 DOCUMENTATION = r'''
13 ---
14 module: vmware_cfg_backup
15 short_description: Backup / Restore / Reset ESXi host configuration
16 description:
17 - This module can be used to perform various operations related to backup, restore and reset of ESXi host configuration.
18 author:
19 - Andreas Nafpliotis (@nafpliot-ibm)
20 notes:
21 - Tested on ESXi 6.0
22 - Works only for ESXi hosts
23 - For configuration load or reset, the host will be switched automatically to maintenance mode.
24 requirements:
25 - "python >= 2.6"
26 - PyVmomi installed
27 options:
28 esxi_hostname:
29 description:
30 - Name of ESXi server. This is required only if authentication against a vCenter is done.
31 required: False
32 type: str
33 dest:
34 description:
35 - The destination where the ESXi configuration bundle will be saved. The I(dest) can be a folder or a file.
36 - If I(dest) is a folder, the backup file will be saved in the folder with the default filename generated from the ESXi server.
37 - If I(dest) is a file, the backup file will be saved with that filename. The file extension will always be .tgz.
38 type: path
39 src:
40 description:
41 - The file containing the ESXi configuration that will be restored.
42 type: path
43 state:
44 description:
45 - If C(saved), the .tgz backup bundle will be saved in I(dest).
46 - If C(absent), the host configuration will be reset to default values.
47 - If C(loaded), the backup file in I(src) will be loaded to the ESXi host rewriting the hosts settings.
48 choices: [saved, absent, loaded]
49 type: str
50 required: True
51 extends_documentation_fragment:
52 - community.vmware.vmware.documentation
53
54 '''
55
56 EXAMPLES = r'''
57 - name: Save the ESXi configuration locally by authenticating directly against the ESXi host
58 community.vmware.vmware_cfg_backup:
59 hostname: '{{ esxi_hostname }}'
60 username: '{{ esxi_username }}'
61 password: '{{ esxi_password }}'
62 state: saved
63 dest: /tmp/
64 delegate_to: localhost
65
66 - name: Save the ESXi configuration locally by authenticating against the vCenter and selecting the ESXi host
67 community.vmware.vmware_cfg_backup:
68 hostname: '{{ vcenter_hostname }}'
69 esxi_hostname: '{{ esxi_hostname }}'
70 username: '{{ esxi_username }}'
71 password: '{{ esxi_password }}'
72 state: saved
73 dest: /tmp/
74 delegate_to: localhost
75 '''
76
77 RETURN = r'''
78 dest_file:
79 description: The full path of where the file holding the ESXi configurations was stored
80 returned: changed
81 type: str
82 sample: /tmp/configBundle-esxi.host.domain.tgz
83 '''
84
85 import os
86 try:
87 from pyVmomi import vim
88 except ImportError:
89 pass
90
91 from ansible_collections.community.vmware.plugins.module_utils.vmware import vmware_argument_spec, get_all_objs, wait_for_task, PyVmomi
92 from ansible.module_utils.basic import AnsibleModule
93 from ansible.module_utils.urls import open_url
94 from ansible.module_utils.six.moves.urllib.error import HTTPError
95 from ansible.module_utils._text import to_native
96
97
98 class VMwareConfigurationBackup(PyVmomi):
99 def __init__(self, module):
100 super(VMwareConfigurationBackup, self).__init__(module)
101 self.state = self.module.params['state']
102 self.dest = self.module.params['dest']
103 self.src = self.module.params['src']
104 self.hostname = self.module.params['hostname']
105 self.username = self.module.params['username']
106 self.password = self.module.params['password']
107 self.validate_certs = self.module.params['validate_certs']
108 self.esxi_hostname = self.module.params.get('esxi_hostname', None)
109 self.host = self.find_host_system()
110
111 def find_host_system(self):
112 if self.esxi_hostname:
113 host_system_obj = self.find_hostsystem_by_name(host_name=self.esxi_hostname)
114 if host_system_obj:
115 return host_system_obj
116 else:
117 self.module.fail_json(msg="Failed to find ESXi %s" % self.esxi_hostname)
118
119 host_system = get_all_objs(self.content, [vim.HostSystem])
120 return list(host_system)[0]
121
122 def process_state(self):
123 if self.state == 'saved':
124 self.save_configuration()
125
126 if self.state == 'absent':
127 self.reset_configuration()
128
129 if self.state == 'loaded':
130 self.load_configuration()
131
132 def load_configuration(self):
133 if not os.path.isfile(self.src):
134 self.module.fail_json(msg="Source file {0} does not exist".format(self.src))
135
136 url = self.host.configManager.firmwareSystem.QueryFirmwareConfigUploadURL()
137 url = url.replace('*', self.host.name)
138 # find manually the url if there is a redirect because urllib2 -per RFC- doesn't do automatic redirects for PUT requests
139 try:
140 open_url(url=url, method='HEAD', validate_certs=self.validate_certs)
141 except HTTPError as e:
142 url = e.geturl()
143
144 try:
145 with open(self.src, 'rb') as file:
146 data = file.read()
147 open_url(
148 url=url, data=data, method='PUT', validate_certs=self.validate_certs,
149 url_username=self.username, url_password=self.password, force_basic_auth=True)
150 except Exception as e:
151 self.module.fail_json(msg=to_native(e))
152
153 if not self.host.runtime.inMaintenanceMode:
154 self.enter_maintenance()
155 try:
156 self.host.configManager.firmwareSystem.RestoreFirmwareConfiguration(force=True)
157 self.module.exit_json(changed=True)
158 except Exception as e:
159 self.exit_maintenance()
160 self.module.fail_json(msg=to_native(e))
161
162 def reset_configuration(self):
163 if not self.host.runtime.inMaintenanceMode:
164 self.enter_maintenance()
165 try:
166 self.host.configManager.firmwareSystem.ResetFirmwareToFactoryDefaults()
167 self.module.exit_json(changed=True)
168 except Exception as e:
169 self.exit_maintenance()
170 self.module.fail_json(msg=to_native(e))
171
172 def save_configuration(self):
173 url = self.host.configManager.firmwareSystem.BackupFirmwareConfiguration()
174 url = url.replace('*', self.host.name)
175 if os.path.isdir(self.dest):
176 filename = url.rsplit('/', 1)[1]
177 self.dest = os.path.join(self.dest, filename)
178 else:
179 filename, file_extension = os.path.splitext(self.dest)
180 if file_extension != ".tgz":
181 self.dest = filename + ".tgz"
182 try:
183 request = open_url(url=url, validate_certs=self.validate_certs)
184 with open(self.dest, "wb") as file:
185 file.write(request.read())
186 self.module.exit_json(changed=True, dest_file=self.dest)
187 except IOError as e:
188 self.module.fail_json(msg="Failed to write backup file. Ensure that "
189 "the dest path exists and is writable. Details : %s" % to_native(e))
190 except Exception as e:
191 self.module.fail_json(msg=to_native(e))
192
193 def enter_maintenance(self):
194 try:
195 task = self.host.EnterMaintenanceMode_Task(timeout=15)
196 success, result = wait_for_task(task)
197 except Exception as e:
198 self.module.fail_json(msg="Failed to enter maintenance mode."
199 " Ensure that there are no powered on machines on the host. %s" % to_native(e))
200
201 def exit_maintenance(self):
202 try:
203 task = self.host.ExitMaintenanceMode_Task(timeout=15)
204 success, result = wait_for_task(task)
205 except Exception as generic_exc:
206 self.module.fail_json(msg="Failed to exit maintenance mode due to %s" % to_native(generic_exc))
207
208
209 def main():
210 argument_spec = vmware_argument_spec()
211 argument_spec.update(dict(dest=dict(required=False, type='path'),
212 esxi_hostname=dict(required=False, type='str'),
213 src=dict(required=False, type='path'),
214 state=dict(required=True, choices=['saved', 'absent', 'loaded'], type='str')))
215 required_if = [('state', 'saved', ['dest']),
216 ('state', 'loaded', ['src'])]
217
218 module = AnsibleModule(argument_spec=argument_spec,
219 required_if=required_if,
220 supports_check_mode=False)
221
222 vmware_cfg_backup = VMwareConfigurationBackup(module)
223 vmware_cfg_backup.process_state()
224
225
226 if __name__ == '__main__':
227 main()
228
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/plugins/modules/vmware_cfg_backup.py b/plugins/modules/vmware_cfg_backup.py
--- a/plugins/modules/vmware_cfg_backup.py
+++ b/plugins/modules/vmware_cfg_backup.py
@@ -172,6 +172,8 @@
def save_configuration(self):
url = self.host.configManager.firmwareSystem.BackupFirmwareConfiguration()
url = url.replace('*', self.host.name)
+ if self.module.params["port"] == 443:
+ url = url.replace("http:", "https:")
if os.path.isdir(self.dest):
filename = url.rsplit('/', 1)[1]
self.dest = os.path.join(self.dest, filename)
| {"golden_diff": "diff --git a/plugins/modules/vmware_cfg_backup.py b/plugins/modules/vmware_cfg_backup.py\n--- a/plugins/modules/vmware_cfg_backup.py\n+++ b/plugins/modules/vmware_cfg_backup.py\n@@ -172,6 +172,8 @@\n def save_configuration(self):\n url = self.host.configManager.firmwareSystem.BackupFirmwareConfiguration()\n url = url.replace('*', self.host.name)\n+ if self.module.params[\"port\"] == 443:\n+ url = url.replace(\"http:\", \"https:\")\n if os.path.isdir(self.dest):\n filename = url.rsplit('/', 1)[1]\n self.dest = os.path.join(self.dest, filename)\n", "issue": "community.vmware.vmware_cfg_backup: Failed to write backup file\n<!--- Verify first that your issue is not already reported on GitHub -->\r\n<!--- Also test if the latest release and devel branch are affected too -->\r\n<!--- Complete *all* sections as described, this form is processed automatically -->\r\n\r\n##### SUMMARY\r\nWhen I run the ansible playbook it's return \"Failed to write backup file\" error.\r\n\r\n```\r\n[WARNING]: provided hosts list is empty, only localhost is available. Note that the implicit localhost does not match 'all'\r\n\r\nPLAY [localhost] *************************************************************************************************************************************************************************************************************************\r\n\r\nTASK [Gathering Facts] *******************************************************************************************************************************************************************************************************************\r\nok: [localhost]\r\n\r\nTASK [ESXI backup test] ******************************************************************************************************************************************************************************************************************\r\nfatal: [localhost -> localhost]: FAILED! => {\"changed\": false, \"msg\": \"Failed to write backup file. Ensure that the dest path exists and is writable. Details : <urlopen error [Errno 111] Connection refused>\"}\r\n\r\nPLAY RECAP *******************************************************************************************************************************************************************************************************************************\r\nlocalhost : ok=1 changed=0 unreachable=0 failed=1 skipped=0 rescued=0 ignored=0\r\n```\r\n\r\n\r\n##### ISSUE TYPE\r\n- Bug Report\r\n\r\n##### COMPONENT NAME\r\n<!--- Write the short name of the module, plugin, task or feature below, use your best guess if unsure -->\r\n`community.vmware.vmware_cfg_backup`\r\n\r\n##### ANSIBLE VERSION\r\n<!--- Paste verbatim output from \"ansible --version\" between quotes -->\r\n```paste below\r\nansible 2.9.6\r\n config file = /etc/ansible/ansible.cfg\r\n configured module search path = ['/home/sergen/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']\r\n ansible python module location = /usr/lib/python3/dist-packages/ansible\r\n executable location = /usr/bin/ansible\r\n python version = 3.8.10 (default, Nov 26 2021, 20:14:08) [GCC 9.3.0]\r\n\r\n```\r\n\r\n##### COLLECTION VERSION\r\n<!--- Paste verbatim output from \"ansible-galaxy collection list <namespace>.<collection>\" between the quotes\r\nfor example: ansible-galaxy collection list community.general\r\n-->\r\n```paste below\r\nansible-galaxy collection list community.vmware\r\nusage: ansible-galaxy collection [-h] COLLECTION_ACTION ...\r\nansible-galaxy collection: error: argument COLLECTION_ACTION: invalid choice: 'list' (choose from 'init', 'build', 'publish', 'install')\r\n\r\n```\r\nList command is not working but I install collection with this command\r\n\r\n`ansible-galaxy collection install community.vmware`\r\n\r\n##### CONFIGURATION\r\n<!--- Paste verbatim output from \"ansible-config dump --only-changed\" between quotes -->\r\n```paste below\r\n\r\n```\r\nno output\r\n\r\n##### OS / ENVIRONMENT\r\n<!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. -->\r\nUbuntu 20.04\r\nVMware ESXI 7.0.3\r\n\r\n##YAML FILE\r\n\r\nreturns the same result in both files \r\n\r\n```\r\n- hosts: localhost\r\n vars:\r\n esxi_hostname: \"192.168.88.154\"\r\n esxi_username: \"root\"\r\n esxi_password: \"password\"\r\n tasks:\r\n - name: ESXI backup test\r\n local_action:\r\n module: vmware_cfg_backup\r\n hostname: '{{esxi_hostname}}'\r\n username: '{{esxi_username}}'\r\n password: '{{esxi_password}}'\r\n state: saved\r\n dest: /tmp/\r\n validate_certs: no\r\n```\r\n```\r\n\r\ncat vmware.yaml\r\n- hosts: localhost\r\n vars:\r\n esxi_hostname: \"192.168.88.154\"\r\n esxi_username: \"root\"\r\n esxi_password: \"password\"\r\n tasks:\r\n - name: Save\r\n community.vmware.vmware_cfg_backup:\r\n hostname: '{{ esxi_hostname }}'\r\n username: '{{ esxi_username }}'\r\n password: '{{ esxi_password }}'\r\n state: saved\r\n dest: /tmp/\r\n validate_certs: no\r\n delegate_to: localhost\r\n\r\n```\r\n\r\n\r\n##### STEPS TO REPRODUCE\r\n<!--- Describe exactly how to reproduce the problem, using a minimal test-case -->\r\n\r\n<!--- Paste example playbooks or commands between quotes below -->\r\n```yaml\r\n\r\n```\r\n\r\n<!--- HINT: You can paste gist.github.com links for larger files -->\r\n\r\n##### EXPECTED RESULTS\r\n<!--- Describe what you expected to happen when running the steps above -->\r\n\r\n\r\n##### ACTUAL RESULTS\r\n<!--- Describe what actually happened. If possible run with extra verbosity (-vvvv) -->\r\n\r\n<!--- Paste verbatim command output between quotes -->\r\n```\r\nansible-playbook vmware.yaml\r\n[WARNING]: provided hosts list is empty, only localhost is available. Note that the implicit localhost does not match 'all'\r\n\r\nPLAY [localhost] *************************************************************************************************************************************************************************************************************************\r\n\r\nTASK [Gathering Facts] *******************************************************************************************************************************************************************************************************************\r\nok: [localhost]\r\n\r\nTASK [Save] ******************************************************************************************************************************************************************************************************************************\r\nfatal: [localhost -> localhost]: FAILED! => {\"changed\": false, \"msg\": \"Failed to write backup file. Ensure that the dest path exists and is writable. Details : <urlopen error [Errno 111] Connection refused>\"}\r\n\r\nPLAY RECAP *******************************************************************************************************************************************************************************************************************************\r\nlocalhost : ok=1 changed=0 unreachable=0 failed=1 skipped=0 rescued=0 ignored=0\r\n\r\n\r\n```\r\n\n", "before_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# Copyright: (c) 2017, IBM Corp\n# Author(s): Andreas Nafpliotis <[email protected]>\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nDOCUMENTATION = r'''\n---\nmodule: vmware_cfg_backup\nshort_description: Backup / Restore / Reset ESXi host configuration\ndescription:\n - This module can be used to perform various operations related to backup, restore and reset of ESXi host configuration.\nauthor:\n - Andreas Nafpliotis (@nafpliot-ibm)\nnotes:\n - Tested on ESXi 6.0\n - Works only for ESXi hosts\n - For configuration load or reset, the host will be switched automatically to maintenance mode.\nrequirements:\n - \"python >= 2.6\"\n - PyVmomi installed\noptions:\n esxi_hostname:\n description:\n - Name of ESXi server. This is required only if authentication against a vCenter is done.\n required: False\n type: str\n dest:\n description:\n - The destination where the ESXi configuration bundle will be saved. The I(dest) can be a folder or a file.\n - If I(dest) is a folder, the backup file will be saved in the folder with the default filename generated from the ESXi server.\n - If I(dest) is a file, the backup file will be saved with that filename. The file extension will always be .tgz.\n type: path\n src:\n description:\n - The file containing the ESXi configuration that will be restored.\n type: path\n state:\n description:\n - If C(saved), the .tgz backup bundle will be saved in I(dest).\n - If C(absent), the host configuration will be reset to default values.\n - If C(loaded), the backup file in I(src) will be loaded to the ESXi host rewriting the hosts settings.\n choices: [saved, absent, loaded]\n type: str\n required: True\nextends_documentation_fragment:\n- community.vmware.vmware.documentation\n\n'''\n\nEXAMPLES = r'''\n- name: Save the ESXi configuration locally by authenticating directly against the ESXi host\n community.vmware.vmware_cfg_backup:\n hostname: '{{ esxi_hostname }}'\n username: '{{ esxi_username }}'\n password: '{{ esxi_password }}'\n state: saved\n dest: /tmp/\n delegate_to: localhost\n\n- name: Save the ESXi configuration locally by authenticating against the vCenter and selecting the ESXi host\n community.vmware.vmware_cfg_backup:\n hostname: '{{ vcenter_hostname }}'\n esxi_hostname: '{{ esxi_hostname }}'\n username: '{{ esxi_username }}'\n password: '{{ esxi_password }}'\n state: saved\n dest: /tmp/\n delegate_to: localhost\n'''\n\nRETURN = r'''\ndest_file:\n description: The full path of where the file holding the ESXi configurations was stored\n returned: changed\n type: str\n sample: /tmp/configBundle-esxi.host.domain.tgz\n'''\n\nimport os\ntry:\n from pyVmomi import vim\nexcept ImportError:\n pass\n\nfrom ansible_collections.community.vmware.plugins.module_utils.vmware import vmware_argument_spec, get_all_objs, wait_for_task, PyVmomi\nfrom ansible.module_utils.basic import AnsibleModule\nfrom ansible.module_utils.urls import open_url\nfrom ansible.module_utils.six.moves.urllib.error import HTTPError\nfrom ansible.module_utils._text import to_native\n\n\nclass VMwareConfigurationBackup(PyVmomi):\n def __init__(self, module):\n super(VMwareConfigurationBackup, self).__init__(module)\n self.state = self.module.params['state']\n self.dest = self.module.params['dest']\n self.src = self.module.params['src']\n self.hostname = self.module.params['hostname']\n self.username = self.module.params['username']\n self.password = self.module.params['password']\n self.validate_certs = self.module.params['validate_certs']\n self.esxi_hostname = self.module.params.get('esxi_hostname', None)\n self.host = self.find_host_system()\n\n def find_host_system(self):\n if self.esxi_hostname:\n host_system_obj = self.find_hostsystem_by_name(host_name=self.esxi_hostname)\n if host_system_obj:\n return host_system_obj\n else:\n self.module.fail_json(msg=\"Failed to find ESXi %s\" % self.esxi_hostname)\n\n host_system = get_all_objs(self.content, [vim.HostSystem])\n return list(host_system)[0]\n\n def process_state(self):\n if self.state == 'saved':\n self.save_configuration()\n\n if self.state == 'absent':\n self.reset_configuration()\n\n if self.state == 'loaded':\n self.load_configuration()\n\n def load_configuration(self):\n if not os.path.isfile(self.src):\n self.module.fail_json(msg=\"Source file {0} does not exist\".format(self.src))\n\n url = self.host.configManager.firmwareSystem.QueryFirmwareConfigUploadURL()\n url = url.replace('*', self.host.name)\n # find manually the url if there is a redirect because urllib2 -per RFC- doesn't do automatic redirects for PUT requests\n try:\n open_url(url=url, method='HEAD', validate_certs=self.validate_certs)\n except HTTPError as e:\n url = e.geturl()\n\n try:\n with open(self.src, 'rb') as file:\n data = file.read()\n open_url(\n url=url, data=data, method='PUT', validate_certs=self.validate_certs,\n url_username=self.username, url_password=self.password, force_basic_auth=True)\n except Exception as e:\n self.module.fail_json(msg=to_native(e))\n\n if not self.host.runtime.inMaintenanceMode:\n self.enter_maintenance()\n try:\n self.host.configManager.firmwareSystem.RestoreFirmwareConfiguration(force=True)\n self.module.exit_json(changed=True)\n except Exception as e:\n self.exit_maintenance()\n self.module.fail_json(msg=to_native(e))\n\n def reset_configuration(self):\n if not self.host.runtime.inMaintenanceMode:\n self.enter_maintenance()\n try:\n self.host.configManager.firmwareSystem.ResetFirmwareToFactoryDefaults()\n self.module.exit_json(changed=True)\n except Exception as e:\n self.exit_maintenance()\n self.module.fail_json(msg=to_native(e))\n\n def save_configuration(self):\n url = self.host.configManager.firmwareSystem.BackupFirmwareConfiguration()\n url = url.replace('*', self.host.name)\n if os.path.isdir(self.dest):\n filename = url.rsplit('/', 1)[1]\n self.dest = os.path.join(self.dest, filename)\n else:\n filename, file_extension = os.path.splitext(self.dest)\n if file_extension != \".tgz\":\n self.dest = filename + \".tgz\"\n try:\n request = open_url(url=url, validate_certs=self.validate_certs)\n with open(self.dest, \"wb\") as file:\n file.write(request.read())\n self.module.exit_json(changed=True, dest_file=self.dest)\n except IOError as e:\n self.module.fail_json(msg=\"Failed to write backup file. Ensure that \"\n \"the dest path exists and is writable. Details : %s\" % to_native(e))\n except Exception as e:\n self.module.fail_json(msg=to_native(e))\n\n def enter_maintenance(self):\n try:\n task = self.host.EnterMaintenanceMode_Task(timeout=15)\n success, result = wait_for_task(task)\n except Exception as e:\n self.module.fail_json(msg=\"Failed to enter maintenance mode.\"\n \" Ensure that there are no powered on machines on the host. %s\" % to_native(e))\n\n def exit_maintenance(self):\n try:\n task = self.host.ExitMaintenanceMode_Task(timeout=15)\n success, result = wait_for_task(task)\n except Exception as generic_exc:\n self.module.fail_json(msg=\"Failed to exit maintenance mode due to %s\" % to_native(generic_exc))\n\n\ndef main():\n argument_spec = vmware_argument_spec()\n argument_spec.update(dict(dest=dict(required=False, type='path'),\n esxi_hostname=dict(required=False, type='str'),\n src=dict(required=False, type='path'),\n state=dict(required=True, choices=['saved', 'absent', 'loaded'], type='str')))\n required_if = [('state', 'saved', ['dest']),\n ('state', 'loaded', ['src'])]\n\n module = AnsibleModule(argument_spec=argument_spec,\n required_if=required_if,\n supports_check_mode=False)\n\n vmware_cfg_backup = VMwareConfigurationBackup(module)\n vmware_cfg_backup.process_state()\n\n\nif __name__ == '__main__':\n main()\n", "path": "plugins/modules/vmware_cfg_backup.py"}], "after_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# Copyright: (c) 2017, IBM Corp\n# Author(s): Andreas Nafpliotis <[email protected]>\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nDOCUMENTATION = r'''\n---\nmodule: vmware_cfg_backup\nshort_description: Backup / Restore / Reset ESXi host configuration\ndescription:\n - This module can be used to perform various operations related to backup, restore and reset of ESXi host configuration.\nauthor:\n - Andreas Nafpliotis (@nafpliot-ibm)\nnotes:\n - Tested on ESXi 6.0\n - Works only for ESXi hosts\n - For configuration load or reset, the host will be switched automatically to maintenance mode.\nrequirements:\n - \"python >= 2.6\"\n - PyVmomi installed\noptions:\n esxi_hostname:\n description:\n - Name of ESXi server. This is required only if authentication against a vCenter is done.\n required: False\n type: str\n dest:\n description:\n - The destination where the ESXi configuration bundle will be saved. The I(dest) can be a folder or a file.\n - If I(dest) is a folder, the backup file will be saved in the folder with the default filename generated from the ESXi server.\n - If I(dest) is a file, the backup file will be saved with that filename. The file extension will always be .tgz.\n type: path\n src:\n description:\n - The file containing the ESXi configuration that will be restored.\n type: path\n state:\n description:\n - If C(saved), the .tgz backup bundle will be saved in I(dest).\n - If C(absent), the host configuration will be reset to default values.\n - If C(loaded), the backup file in I(src) will be loaded to the ESXi host rewriting the hosts settings.\n choices: [saved, absent, loaded]\n type: str\n required: True\nextends_documentation_fragment:\n- community.vmware.vmware.documentation\n\n'''\n\nEXAMPLES = r'''\n- name: Save the ESXi configuration locally by authenticating directly against the ESXi host\n community.vmware.vmware_cfg_backup:\n hostname: '{{ esxi_hostname }}'\n username: '{{ esxi_username }}'\n password: '{{ esxi_password }}'\n state: saved\n dest: /tmp/\n delegate_to: localhost\n\n- name: Save the ESXi configuration locally by authenticating against the vCenter and selecting the ESXi host\n community.vmware.vmware_cfg_backup:\n hostname: '{{ vcenter_hostname }}'\n esxi_hostname: '{{ esxi_hostname }}'\n username: '{{ esxi_username }}'\n password: '{{ esxi_password }}'\n state: saved\n dest: /tmp/\n delegate_to: localhost\n'''\n\nRETURN = r'''\ndest_file:\n description: The full path of where the file holding the ESXi configurations was stored\n returned: changed\n type: str\n sample: /tmp/configBundle-esxi.host.domain.tgz\n'''\n\nimport os\ntry:\n from pyVmomi import vim\nexcept ImportError:\n pass\n\nfrom ansible_collections.community.vmware.plugins.module_utils.vmware import vmware_argument_spec, get_all_objs, wait_for_task, PyVmomi\nfrom ansible.module_utils.basic import AnsibleModule\nfrom ansible.module_utils.urls import open_url\nfrom ansible.module_utils.six.moves.urllib.error import HTTPError\nfrom ansible.module_utils._text import to_native\n\n\nclass VMwareConfigurationBackup(PyVmomi):\n def __init__(self, module):\n super(VMwareConfigurationBackup, self).__init__(module)\n self.state = self.module.params['state']\n self.dest = self.module.params['dest']\n self.src = self.module.params['src']\n self.hostname = self.module.params['hostname']\n self.username = self.module.params['username']\n self.password = self.module.params['password']\n self.validate_certs = self.module.params['validate_certs']\n self.esxi_hostname = self.module.params.get('esxi_hostname', None)\n self.host = self.find_host_system()\n\n def find_host_system(self):\n if self.esxi_hostname:\n host_system_obj = self.find_hostsystem_by_name(host_name=self.esxi_hostname)\n if host_system_obj:\n return host_system_obj\n else:\n self.module.fail_json(msg=\"Failed to find ESXi %s\" % self.esxi_hostname)\n\n host_system = get_all_objs(self.content, [vim.HostSystem])\n return list(host_system)[0]\n\n def process_state(self):\n if self.state == 'saved':\n self.save_configuration()\n\n if self.state == 'absent':\n self.reset_configuration()\n\n if self.state == 'loaded':\n self.load_configuration()\n\n def load_configuration(self):\n if not os.path.isfile(self.src):\n self.module.fail_json(msg=\"Source file {0} does not exist\".format(self.src))\n\n url = self.host.configManager.firmwareSystem.QueryFirmwareConfigUploadURL()\n url = url.replace('*', self.host.name)\n # find manually the url if there is a redirect because urllib2 -per RFC- doesn't do automatic redirects for PUT requests\n try:\n open_url(url=url, method='HEAD', validate_certs=self.validate_certs)\n except HTTPError as e:\n url = e.geturl()\n\n try:\n with open(self.src, 'rb') as file:\n data = file.read()\n open_url(\n url=url, data=data, method='PUT', validate_certs=self.validate_certs,\n url_username=self.username, url_password=self.password, force_basic_auth=True)\n except Exception as e:\n self.module.fail_json(msg=to_native(e))\n\n if not self.host.runtime.inMaintenanceMode:\n self.enter_maintenance()\n try:\n self.host.configManager.firmwareSystem.RestoreFirmwareConfiguration(force=True)\n self.module.exit_json(changed=True)\n except Exception as e:\n self.exit_maintenance()\n self.module.fail_json(msg=to_native(e))\n\n def reset_configuration(self):\n if not self.host.runtime.inMaintenanceMode:\n self.enter_maintenance()\n try:\n self.host.configManager.firmwareSystem.ResetFirmwareToFactoryDefaults()\n self.module.exit_json(changed=True)\n except Exception as e:\n self.exit_maintenance()\n self.module.fail_json(msg=to_native(e))\n\n def save_configuration(self):\n url = self.host.configManager.firmwareSystem.BackupFirmwareConfiguration()\n url = url.replace('*', self.host.name)\n if self.module.params[\"port\"] == 443:\n url = url.replace(\"http:\", \"https:\")\n if os.path.isdir(self.dest):\n filename = url.rsplit('/', 1)[1]\n self.dest = os.path.join(self.dest, filename)\n else:\n filename, file_extension = os.path.splitext(self.dest)\n if file_extension != \".tgz\":\n self.dest = filename + \".tgz\"\n try:\n request = open_url(url=url, validate_certs=self.validate_certs)\n with open(self.dest, \"wb\") as file:\n file.write(request.read())\n self.module.exit_json(changed=True, dest_file=self.dest)\n except IOError as e:\n self.module.fail_json(msg=\"Failed to write backup file. Ensure that \"\n \"the dest path exists and is writable. Details : %s\" % to_native(e))\n except Exception as e:\n self.module.fail_json(msg=to_native(e))\n\n def enter_maintenance(self):\n try:\n task = self.host.EnterMaintenanceMode_Task(timeout=15)\n success, result = wait_for_task(task)\n except Exception as e:\n self.module.fail_json(msg=\"Failed to enter maintenance mode.\"\n \" Ensure that there are no powered on machines on the host. %s\" % to_native(e))\n\n def exit_maintenance(self):\n try:\n task = self.host.ExitMaintenanceMode_Task(timeout=15)\n success, result = wait_for_task(task)\n except Exception as generic_exc:\n self.module.fail_json(msg=\"Failed to exit maintenance mode due to %s\" % to_native(generic_exc))\n\n\ndef main():\n argument_spec = vmware_argument_spec()\n argument_spec.update(dict(dest=dict(required=False, type='path'),\n esxi_hostname=dict(required=False, type='str'),\n src=dict(required=False, type='path'),\n state=dict(required=True, choices=['saved', 'absent', 'loaded'], type='str')))\n required_if = [('state', 'saved', ['dest']),\n ('state', 'loaded', ['src'])]\n\n module = AnsibleModule(argument_spec=argument_spec,\n required_if=required_if,\n supports_check_mode=False)\n\n vmware_cfg_backup = VMwareConfigurationBackup(module)\n vmware_cfg_backup.process_state()\n\n\nif __name__ == '__main__':\n main()\n", "path": "plugins/modules/vmware_cfg_backup.py"}]} | 3,932 | 149 |
gh_patches_debug_44205 | rasdani/github-patches | git_diff | mampfes__hacs_waste_collection_schedule-1601 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Bug]: Error when configuring Recycle Coach
### I Have A Problem With:
A specific source
### What's Your Problem
Receiving error when configuring recycle coach for Vaughan Ontarion (unlisted city, but supported by recycle coach)
### Source (if relevant)
recycle coach
### Logs
```Shell
Logger: waste_collection_schedule.source_shell
Source: custom_components/waste_collection_schedule/waste_collection_schedule/source_shell.py:136
Integration: waste_collection_schedule (documentation)
First occurred: 3:10:21 PM (1 occurrences)
Last logged: 3:10:21 PM
fetch failed for source Recycle Coach: Traceback (most recent call last): File "/config/custom_components/waste_collection_schedule/waste_collection_schedule/source_shell.py", line 134, in fetch entries = self._source.fetch() ^^^^^^^^^^^^^^^^^^^^ File "/config/custom_components/waste_collection_schedule/waste_collection_schedule/source/recyclecoach_com.py", line 159, in fetch self._lookup_zones() File "/config/custom_components/waste_collection_schedule/waste_collection_schedule/source/recyclecoach_com.py", line 135, in _lookup_zones for zone_res in zone_data["results"]: ~~~~~~~~~^^^^^^^^^^^ KeyError: 'results'
```
### Relevant Configuration
```YAML
waste_collection_schedule:
sources:
- name: recyclecoach_com
calendar_title: Waste Collection Schedule
args:
street: <<street address in lowercase>>
city: <<city in lowercase>>
state: <<province in lowercase>>
```
### Checklist Source Error
- [X] Use the example parameters for your source (often available in the documentation) (don't forget to restart Home Assistant after changing the configuration)
- [X] Checked that the website of your service provider is still working
- [X] Tested my attributes on the service provider website (if possible)
- [X] I have tested with the latest version of the integration (master) (for HACS in the 3 dot menu of the integration click on "Redownload" and choose master as version)
### Checklist Sensor Error
- [ ] Checked in the Home Assistant Calendar tab if the event names match the types names (if types argument is used)
### Required
- [X] I have searched past (closed AND opened) issues to see if this bug has already been reported, and it hasn't been.
- [X] I understand that people give their precious time for free, and thus I've done my very best to make this problem as easy as possible to investigate.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `custom_components/waste_collection_schedule/waste_collection_schedule/source/recyclecoach_com.py`
Content:
```
1 import json
2 from datetime import datetime
3
4 import requests
5 from waste_collection_schedule import Collection
6
7 TITLE = "Recycle Coach"
8 DESCRIPTION = "Source loader for recyclecoach.com"
9 URL = "https://recyclecoach.com"
10 COUNTRY = "us"
11
12 ICON_MAP = {
13 "Garbage": "mdi:trash-can",
14 "Recycling": "mdi:recycle",
15 "Yard Waste": "mdi:leaf",
16 }
17
18 EXTRA_INFO = [
19 {
20 "title": "Albuquerque, New Mexico, USA",
21 "url": "https://recyclecoach.com/cities/usa-nm-city-of-albuquerque/",
22 },
23 {
24 "title": "Tucson, Arizona, USA",
25 "url": "https://recyclecoach.com/cities/usa-az-city-of-tucson/",
26 },
27 {
28 "title": "Olympia, Washington, USA",
29 "url": "https://recyclecoach.com/cities/usa-wa-city-of-olympia/",
30 },
31 {
32 "title": "Newark, Delaware, USA",
33 "url": "https://recyclecoach.com/cities/usa-de-city-of-newark/",
34 },
35 {
36 "title": "Louisville, Kentucky, USA",
37 "url": "https://recyclecoach.com/cities/usa-ky-city-of-louisville/",
38 },
39 {"title": "London (ON)", "url": "https://london.ca/", "country": "ca"},
40 {"title": "Aurora (ON)", "url": "https://www.aurora.ca/", "country": "ca"},
41 ]
42
43 TEST_CASES = {
44 "Default": {"street": "2242 grinstead drive", "city": "louisville", "state": "KY"},
45 "Problematic City Lookup": {
46 "street": "2202 E Florence Dr",
47 "city": "Tucson",
48 "state": "AZ",
49 "district_id": "TUC",
50 "project_id": "532",
51 },
52 "olympia": {
53 "street": "1003 Lybarger St NE",
54 "city": "Olympia",
55 "state": "Washington",
56 },
57 "newark": {"street": "24 Townsend Rd", "city": "Newark", "state": "Delaware"},
58 "albuquerque": {
59 "street": "1505 Silver Ave SE",
60 "city": "Albuquerque",
61 "state": "New Mexico",
62 },
63 "london ontario": {
64 "street": "1065 Sunningdale Rd E",
65 "city": "London",
66 "state": "Ontario",
67 },
68 "london ontario with districtID": {
69 "street": "1065 Sunningdale Rd E",
70 "city": "London",
71 "state": "Ontario",
72 "project_id": "528",
73 "district_id": "CityofLondon",
74 "zone_id": "zone-z547",
75 },
76 "aurora ontario": {
77 "street": "123 Cranberry Lane",
78 "city": "Aurora",
79 "state": "Ontario",
80 },
81 }
82
83
84 class Source:
85 def __init__(
86 self, street, city, state, project_id=None, district_id=None, zone_id=None
87 ): # argX correspond to the args dict in the source configuration
88 self.street = self._format_key(street)
89 self.city = self._format_key(city)
90 self.state = self._format_key(state)
91 self.project_id = self._format_key(project_id) if project_id else None
92 self.district_id = district_id.strip() if district_id else None
93
94 self.zone_id = zone_id # uses lowercase z's, not sure if matters
95 self.stage = 0
96
97 def _format_key(self, param):
98 """Get rid of ambiguity in caps/spacing."""
99 return param.upper().strip()
100
101 def _lookup_city(self):
102 city_finder = f"https://recyclecoach.com/wp-json/rec/v1/cities?find={self.city}, {self.state}"
103 res = requests.get(city_finder)
104 city_data = res.json()
105
106 if len(city_data["cities"]) == 1:
107 self.project_id = city_data["cities"][0]["project_id"]
108 self.district_id = city_data["cities"][0]["district_id"]
109 self.stage = float(city_data["cities"][0]["stage"])
110
111 if self.stage < 3:
112 raise Exception(
113 "Found your city, but it is not yet supported fully by recycle coach."
114 )
115
116 elif len(city_data["cities"]) > 1:
117
118 for city in city_data["cities"]:
119 if city["city_nm"].upper() == self.city.upper():
120 self.project_id = city["project_id"]
121 self.district_id = city["district_id"]
122 self.stage = float(city["stage"])
123 return True
124
125 # not sure what to do with ambiguity here
126 # print(json.dumps(city_data['cities'], indent=4))
127 raise Exception(
128 "Could not determine district or project, Debug here to find your discrict and project_id"
129 )
130
131 def _lookup_zones(self):
132 zone_finder = f"https://api-city.recyclecoach.com/zone-setup/address?sku={self.project_id}&district={self.district_id}&prompt=undefined&term={self.street}"
133 res = requests.get(zone_finder)
134 zone_data = res.json()
135 for zone_res in zone_data["results"]:
136 streetpart = self._format_key(zone_res["address"]).split(",")[0]
137
138 if streetpart in self.street:
139 self.zone_id = self._build_zone_string(zone_res["zones"])
140 return self.zone_id
141
142 raise Exception("Unable to find zone")
143
144 def _build_zone_string(self, z_match):
145 """Take matching json and build a format zone-z12312-z1894323-z8461."""
146 zone_str = "zone"
147
148 for zonekey in z_match:
149 zone_str += f"-{z_match[zonekey]}"
150
151 return zone_str
152
153 def fetch(self):
154 """Build the date fetching request through looking up address on separate endpoints, skip these requests if you can provide the district_id, project_id and/or zone_id."""
155 if not self.project_id or not self.district_id:
156 self._lookup_city()
157
158 if not self.zone_id:
159 self._lookup_zones()
160
161 collection_def_url = f"https://reg.my-waste.mobi/collections?project_id={self.project_id}&district_id={self.district_id}&zone_id={self.zone_id}&lang_cd=en_US"
162 schedule_url = f"https://pkg.my-waste.mobi/app_data_zone_schedules?project_id={self.project_id}&district_id={self.district_id}&zone_id={self.zone_id}"
163
164 collection_def = None
165 schedule_def = None
166 collection_types = None
167
168 response = requests.get(collection_def_url)
169 collection_def = json.loads(response.text)
170
171 response = requests.get(schedule_url)
172 schedule_def = json.loads(response.text)
173
174 collection_types = collection_def["collection"]["types"]
175
176 entries = []
177 date_format = "%Y-%m-%d"
178
179 for year in schedule_def["DATA"]:
180 for month in year["months"]:
181 for event in month["events"]:
182 for collection in event["collections"]:
183 if collection["status"] == "is_none":
184 continue
185 ct = collection_types["collection-" + str(collection["id"])]
186 c = Collection(
187 datetime.strptime(event["date"], date_format).date(),
188 ct["title"],
189 ICON_MAP.get(ct["title"]),
190 )
191 entries.append(c)
192 return entries
193
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/recyclecoach_com.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/recyclecoach_com.py
--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/recyclecoach_com.py
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/recyclecoach_com.py
@@ -2,7 +2,7 @@
from datetime import datetime
import requests
-from waste_collection_schedule import Collection
+from waste_collection_schedule import Collection # type: ignore[attr-defined]
TITLE = "Recycle Coach"
DESCRIPTION = "Source loader for recyclecoach.com"
@@ -38,6 +38,7 @@
},
{"title": "London (ON)", "url": "https://london.ca/", "country": "ca"},
{"title": "Aurora (ON)", "url": "https://www.aurora.ca/", "country": "ca"},
+ {"title": "Vaughan (ON)", "url": "https://www.vaughan.ca/", "country": "ca"},
]
TEST_CASES = {
@@ -78,6 +79,11 @@
"city": "Aurora",
"state": "Ontario",
},
+ "Vaughan, Ontario, Canada": { # https://app.my-waste.mobi/widget/576-Vaughan/home.php
+ "street": "Main St",
+ "city": "Vaughan",
+ "state": "Ontario",
+ },
}
@@ -89,14 +95,14 @@
self.city = self._format_key(city)
self.state = self._format_key(state)
self.project_id = self._format_key(project_id) if project_id else None
- self.district_id = district_id.strip() if district_id else None
+ self.district_id = str(district_id).strip() if district_id else None
self.zone_id = zone_id # uses lowercase z's, not sure if matters
self.stage = 0
def _format_key(self, param):
"""Get rid of ambiguity in caps/spacing."""
- return param.upper().strip()
+ return str(param).upper().strip()
def _lookup_city(self):
city_finder = f"https://recyclecoach.com/wp-json/rec/v1/cities?find={self.city}, {self.state}"
@@ -114,7 +120,6 @@
)
elif len(city_data["cities"]) > 1:
-
for city in city_data["cities"]:
if city["city_nm"].upper() == self.city.upper():
self.project_id = city["project_id"]
@@ -128,10 +133,35 @@
"Could not determine district or project, Debug here to find your discrict and project_id"
)
+ def _lookup_zones_with_geo(self):
+ pos_finder = f"https://api-city.recyclecoach.com/geo/address?address={self.street}&uuid=ecdb86fe-e42d-4a9d-94d6-7057777ef283&project_id={self.project_id}&district_id={self.district_id}"
+ res = requests.get(pos_finder)
+ lat = None
+ pos_data = res.json()
+ for pos_res in pos_data:
+ streetpart = self._format_key(pos_res["address"]).split(",")[0]
+
+ if streetpart in self.street:
+ lat = pos_res["lat"]
+ lng = pos_res["lng"]
+ break
+
+ if not lat:
+ raise Exception("Unable to find zone")
+
+ zone_finder = f"https://pkg.my-waste.mobi/get_zones?project_id={self.project_id}&district_id={self.district_id}&lat={lat}&lng={lng}"
+ res = requests.get(zone_finder)
+ zone_data = {z["prompt_id"]: "z" + z["zone_id"] for z in res.json()}
+ self.zone_id = self._build_zone_string(zone_data)
+
+ return self.zone_id
+
def _lookup_zones(self):
zone_finder = f"https://api-city.recyclecoach.com/zone-setup/address?sku={self.project_id}&district={self.district_id}&prompt=undefined&term={self.street}"
res = requests.get(zone_finder)
zone_data = res.json()
+ if "results" not in zone_data:
+ return self._lookup_zones_with_geo()
for zone_res in zone_data["results"]:
streetpart = self._format_key(zone_res["address"]).split(",")[0]
| {"golden_diff": "diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/recyclecoach_com.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/recyclecoach_com.py\n--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/recyclecoach_com.py\n+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/recyclecoach_com.py\n@@ -2,7 +2,7 @@\n from datetime import datetime\n \n import requests\n-from waste_collection_schedule import Collection\n+from waste_collection_schedule import Collection # type: ignore[attr-defined]\n \n TITLE = \"Recycle Coach\"\n DESCRIPTION = \"Source loader for recyclecoach.com\"\n@@ -38,6 +38,7 @@\n },\n {\"title\": \"London (ON)\", \"url\": \"https://london.ca/\", \"country\": \"ca\"},\n {\"title\": \"Aurora (ON)\", \"url\": \"https://www.aurora.ca/\", \"country\": \"ca\"},\n+ {\"title\": \"Vaughan (ON)\", \"url\": \"https://www.vaughan.ca/\", \"country\": \"ca\"},\n ]\n \n TEST_CASES = {\n@@ -78,6 +79,11 @@\n \"city\": \"Aurora\",\n \"state\": \"Ontario\",\n },\n+ \"Vaughan, Ontario, Canada\": { # https://app.my-waste.mobi/widget/576-Vaughan/home.php\n+ \"street\": \"Main St\",\n+ \"city\": \"Vaughan\",\n+ \"state\": \"Ontario\",\n+ },\n }\n \n \n@@ -89,14 +95,14 @@\n self.city = self._format_key(city)\n self.state = self._format_key(state)\n self.project_id = self._format_key(project_id) if project_id else None\n- self.district_id = district_id.strip() if district_id else None\n+ self.district_id = str(district_id).strip() if district_id else None\n \n self.zone_id = zone_id # uses lowercase z's, not sure if matters\n self.stage = 0\n \n def _format_key(self, param):\n \"\"\"Get rid of ambiguity in caps/spacing.\"\"\"\n- return param.upper().strip()\n+ return str(param).upper().strip()\n \n def _lookup_city(self):\n city_finder = f\"https://recyclecoach.com/wp-json/rec/v1/cities?find={self.city}, {self.state}\"\n@@ -114,7 +120,6 @@\n )\n \n elif len(city_data[\"cities\"]) > 1:\n-\n for city in city_data[\"cities\"]:\n if city[\"city_nm\"].upper() == self.city.upper():\n self.project_id = city[\"project_id\"]\n@@ -128,10 +133,35 @@\n \"Could not determine district or project, Debug here to find your discrict and project_id\"\n )\n \n+ def _lookup_zones_with_geo(self):\n+ pos_finder = f\"https://api-city.recyclecoach.com/geo/address?address={self.street}&uuid=ecdb86fe-e42d-4a9d-94d6-7057777ef283&project_id={self.project_id}&district_id={self.district_id}\"\n+ res = requests.get(pos_finder)\n+ lat = None\n+ pos_data = res.json()\n+ for pos_res in pos_data:\n+ streetpart = self._format_key(pos_res[\"address\"]).split(\",\")[0]\n+\n+ if streetpart in self.street:\n+ lat = pos_res[\"lat\"]\n+ lng = pos_res[\"lng\"]\n+ break\n+\n+ if not lat:\n+ raise Exception(\"Unable to find zone\")\n+\n+ zone_finder = f\"https://pkg.my-waste.mobi/get_zones?project_id={self.project_id}&district_id={self.district_id}&lat={lat}&lng={lng}\"\n+ res = requests.get(zone_finder)\n+ zone_data = {z[\"prompt_id\"]: \"z\" + z[\"zone_id\"] for z in res.json()}\n+ self.zone_id = self._build_zone_string(zone_data)\n+\n+ return self.zone_id\n+\n def _lookup_zones(self):\n zone_finder = f\"https://api-city.recyclecoach.com/zone-setup/address?sku={self.project_id}&district={self.district_id}&prompt=undefined&term={self.street}\"\n res = requests.get(zone_finder)\n zone_data = res.json()\n+ if \"results\" not in zone_data:\n+ return self._lookup_zones_with_geo()\n for zone_res in zone_data[\"results\"]:\n streetpart = self._format_key(zone_res[\"address\"]).split(\",\")[0]\n", "issue": "[Bug]: Error when configuring Recycle Coach\n### I Have A Problem With:\n\nA specific source\n\n### What's Your Problem\n\nReceiving error when configuring recycle coach for Vaughan Ontarion (unlisted city, but supported by recycle coach)\r\n\n\n### Source (if relevant)\n\nrecycle coach\n\n### Logs\n\n```Shell\nLogger: waste_collection_schedule.source_shell\r\nSource: custom_components/waste_collection_schedule/waste_collection_schedule/source_shell.py:136\r\nIntegration: waste_collection_schedule (documentation)\r\nFirst occurred: 3:10:21 PM (1 occurrences)\r\nLast logged: 3:10:21 PM\r\n\r\nfetch failed for source Recycle Coach: Traceback (most recent call last): File \"/config/custom_components/waste_collection_schedule/waste_collection_schedule/source_shell.py\", line 134, in fetch entries = self._source.fetch() ^^^^^^^^^^^^^^^^^^^^ File \"/config/custom_components/waste_collection_schedule/waste_collection_schedule/source/recyclecoach_com.py\", line 159, in fetch self._lookup_zones() File \"/config/custom_components/waste_collection_schedule/waste_collection_schedule/source/recyclecoach_com.py\", line 135, in _lookup_zones for zone_res in zone_data[\"results\"]: ~~~~~~~~~^^^^^^^^^^^ KeyError: 'results'\n```\n\n\n### Relevant Configuration\n\n```YAML\nwaste_collection_schedule:\r\n sources:\r\n - name: recyclecoach_com \r\n calendar_title: Waste Collection Schedule\r\n args:\r\n street: <<street address in lowercase>>\r\n city: <<city in lowercase>>\r\n state: <<province in lowercase>>\n```\n\n\n### Checklist Source Error\n\n- [X] Use the example parameters for your source (often available in the documentation) (don't forget to restart Home Assistant after changing the configuration)\n- [X] Checked that the website of your service provider is still working\n- [X] Tested my attributes on the service provider website (if possible)\n- [X] I have tested with the latest version of the integration (master) (for HACS in the 3 dot menu of the integration click on \"Redownload\" and choose master as version)\n\n### Checklist Sensor Error\n\n- [ ] Checked in the Home Assistant Calendar tab if the event names match the types names (if types argument is used)\n\n### Required\n\n- [X] I have searched past (closed AND opened) issues to see if this bug has already been reported, and it hasn't been.\n- [X] I understand that people give their precious time for free, and thus I've done my very best to make this problem as easy as possible to investigate.\n", "before_files": [{"content": "import json\nfrom datetime import datetime\n\nimport requests\nfrom waste_collection_schedule import Collection\n\nTITLE = \"Recycle Coach\"\nDESCRIPTION = \"Source loader for recyclecoach.com\"\nURL = \"https://recyclecoach.com\"\nCOUNTRY = \"us\"\n\nICON_MAP = {\n \"Garbage\": \"mdi:trash-can\",\n \"Recycling\": \"mdi:recycle\",\n \"Yard Waste\": \"mdi:leaf\",\n}\n\nEXTRA_INFO = [\n {\n \"title\": \"Albuquerque, New Mexico, USA\",\n \"url\": \"https://recyclecoach.com/cities/usa-nm-city-of-albuquerque/\",\n },\n {\n \"title\": \"Tucson, Arizona, USA\",\n \"url\": \"https://recyclecoach.com/cities/usa-az-city-of-tucson/\",\n },\n {\n \"title\": \"Olympia, Washington, USA\",\n \"url\": \"https://recyclecoach.com/cities/usa-wa-city-of-olympia/\",\n },\n {\n \"title\": \"Newark, Delaware, USA\",\n \"url\": \"https://recyclecoach.com/cities/usa-de-city-of-newark/\",\n },\n {\n \"title\": \"Louisville, Kentucky, USA\",\n \"url\": \"https://recyclecoach.com/cities/usa-ky-city-of-louisville/\",\n },\n {\"title\": \"London (ON)\", \"url\": \"https://london.ca/\", \"country\": \"ca\"},\n {\"title\": \"Aurora (ON)\", \"url\": \"https://www.aurora.ca/\", \"country\": \"ca\"},\n]\n\nTEST_CASES = {\n \"Default\": {\"street\": \"2242 grinstead drive\", \"city\": \"louisville\", \"state\": \"KY\"},\n \"Problematic City Lookup\": {\n \"street\": \"2202 E Florence Dr\",\n \"city\": \"Tucson\",\n \"state\": \"AZ\",\n \"district_id\": \"TUC\",\n \"project_id\": \"532\",\n },\n \"olympia\": {\n \"street\": \"1003 Lybarger St NE\",\n \"city\": \"Olympia\",\n \"state\": \"Washington\",\n },\n \"newark\": {\"street\": \"24 Townsend Rd\", \"city\": \"Newark\", \"state\": \"Delaware\"},\n \"albuquerque\": {\n \"street\": \"1505 Silver Ave SE\",\n \"city\": \"Albuquerque\",\n \"state\": \"New Mexico\",\n },\n \"london ontario\": {\n \"street\": \"1065 Sunningdale Rd E\",\n \"city\": \"London\",\n \"state\": \"Ontario\",\n },\n \"london ontario with districtID\": {\n \"street\": \"1065 Sunningdale Rd E\",\n \"city\": \"London\",\n \"state\": \"Ontario\",\n \"project_id\": \"528\",\n \"district_id\": \"CityofLondon\",\n \"zone_id\": \"zone-z547\",\n },\n \"aurora ontario\": {\n \"street\": \"123 Cranberry Lane\",\n \"city\": \"Aurora\",\n \"state\": \"Ontario\",\n },\n}\n\n\nclass Source:\n def __init__(\n self, street, city, state, project_id=None, district_id=None, zone_id=None\n ): # argX correspond to the args dict in the source configuration\n self.street = self._format_key(street)\n self.city = self._format_key(city)\n self.state = self._format_key(state)\n self.project_id = self._format_key(project_id) if project_id else None\n self.district_id = district_id.strip() if district_id else None\n\n self.zone_id = zone_id # uses lowercase z's, not sure if matters\n self.stage = 0\n\n def _format_key(self, param):\n \"\"\"Get rid of ambiguity in caps/spacing.\"\"\"\n return param.upper().strip()\n\n def _lookup_city(self):\n city_finder = f\"https://recyclecoach.com/wp-json/rec/v1/cities?find={self.city}, {self.state}\"\n res = requests.get(city_finder)\n city_data = res.json()\n\n if len(city_data[\"cities\"]) == 1:\n self.project_id = city_data[\"cities\"][0][\"project_id\"]\n self.district_id = city_data[\"cities\"][0][\"district_id\"]\n self.stage = float(city_data[\"cities\"][0][\"stage\"])\n\n if self.stage < 3:\n raise Exception(\n \"Found your city, but it is not yet supported fully by recycle coach.\"\n )\n\n elif len(city_data[\"cities\"]) > 1:\n\n for city in city_data[\"cities\"]:\n if city[\"city_nm\"].upper() == self.city.upper():\n self.project_id = city[\"project_id\"]\n self.district_id = city[\"district_id\"]\n self.stage = float(city[\"stage\"])\n return True\n\n # not sure what to do with ambiguity here\n # print(json.dumps(city_data['cities'], indent=4))\n raise Exception(\n \"Could not determine district or project, Debug here to find your discrict and project_id\"\n )\n\n def _lookup_zones(self):\n zone_finder = f\"https://api-city.recyclecoach.com/zone-setup/address?sku={self.project_id}&district={self.district_id}&prompt=undefined&term={self.street}\"\n res = requests.get(zone_finder)\n zone_data = res.json()\n for zone_res in zone_data[\"results\"]:\n streetpart = self._format_key(zone_res[\"address\"]).split(\",\")[0]\n\n if streetpart in self.street:\n self.zone_id = self._build_zone_string(zone_res[\"zones\"])\n return self.zone_id\n\n raise Exception(\"Unable to find zone\")\n\n def _build_zone_string(self, z_match):\n \"\"\"Take matching json and build a format zone-z12312-z1894323-z8461.\"\"\"\n zone_str = \"zone\"\n\n for zonekey in z_match:\n zone_str += f\"-{z_match[zonekey]}\"\n\n return zone_str\n\n def fetch(self):\n \"\"\"Build the date fetching request through looking up address on separate endpoints, skip these requests if you can provide the district_id, project_id and/or zone_id.\"\"\"\n if not self.project_id or not self.district_id:\n self._lookup_city()\n\n if not self.zone_id:\n self._lookup_zones()\n\n collection_def_url = f\"https://reg.my-waste.mobi/collections?project_id={self.project_id}&district_id={self.district_id}&zone_id={self.zone_id}&lang_cd=en_US\"\n schedule_url = f\"https://pkg.my-waste.mobi/app_data_zone_schedules?project_id={self.project_id}&district_id={self.district_id}&zone_id={self.zone_id}\"\n\n collection_def = None\n schedule_def = None\n collection_types = None\n\n response = requests.get(collection_def_url)\n collection_def = json.loads(response.text)\n\n response = requests.get(schedule_url)\n schedule_def = json.loads(response.text)\n\n collection_types = collection_def[\"collection\"][\"types\"]\n\n entries = []\n date_format = \"%Y-%m-%d\"\n\n for year in schedule_def[\"DATA\"]:\n for month in year[\"months\"]:\n for event in month[\"events\"]:\n for collection in event[\"collections\"]:\n if collection[\"status\"] == \"is_none\":\n continue\n ct = collection_types[\"collection-\" + str(collection[\"id\"])]\n c = Collection(\n datetime.strptime(event[\"date\"], date_format).date(),\n ct[\"title\"],\n ICON_MAP.get(ct[\"title\"]),\n )\n entries.append(c)\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/recyclecoach_com.py"}], "after_files": [{"content": "import json\nfrom datetime import datetime\n\nimport requests\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\n\nTITLE = \"Recycle Coach\"\nDESCRIPTION = \"Source loader for recyclecoach.com\"\nURL = \"https://recyclecoach.com\"\nCOUNTRY = \"us\"\n\nICON_MAP = {\n \"Garbage\": \"mdi:trash-can\",\n \"Recycling\": \"mdi:recycle\",\n \"Yard Waste\": \"mdi:leaf\",\n}\n\nEXTRA_INFO = [\n {\n \"title\": \"Albuquerque, New Mexico, USA\",\n \"url\": \"https://recyclecoach.com/cities/usa-nm-city-of-albuquerque/\",\n },\n {\n \"title\": \"Tucson, Arizona, USA\",\n \"url\": \"https://recyclecoach.com/cities/usa-az-city-of-tucson/\",\n },\n {\n \"title\": \"Olympia, Washington, USA\",\n \"url\": \"https://recyclecoach.com/cities/usa-wa-city-of-olympia/\",\n },\n {\n \"title\": \"Newark, Delaware, USA\",\n \"url\": \"https://recyclecoach.com/cities/usa-de-city-of-newark/\",\n },\n {\n \"title\": \"Louisville, Kentucky, USA\",\n \"url\": \"https://recyclecoach.com/cities/usa-ky-city-of-louisville/\",\n },\n {\"title\": \"London (ON)\", \"url\": \"https://london.ca/\", \"country\": \"ca\"},\n {\"title\": \"Aurora (ON)\", \"url\": \"https://www.aurora.ca/\", \"country\": \"ca\"},\n {\"title\": \"Vaughan (ON)\", \"url\": \"https://www.vaughan.ca/\", \"country\": \"ca\"},\n]\n\nTEST_CASES = {\n \"Default\": {\"street\": \"2242 grinstead drive\", \"city\": \"louisville\", \"state\": \"KY\"},\n \"Problematic City Lookup\": {\n \"street\": \"2202 E Florence Dr\",\n \"city\": \"Tucson\",\n \"state\": \"AZ\",\n \"district_id\": \"TUC\",\n \"project_id\": \"532\",\n },\n \"olympia\": {\n \"street\": \"1003 Lybarger St NE\",\n \"city\": \"Olympia\",\n \"state\": \"Washington\",\n },\n \"newark\": {\"street\": \"24 Townsend Rd\", \"city\": \"Newark\", \"state\": \"Delaware\"},\n \"albuquerque\": {\n \"street\": \"1505 Silver Ave SE\",\n \"city\": \"Albuquerque\",\n \"state\": \"New Mexico\",\n },\n \"london ontario\": {\n \"street\": \"1065 Sunningdale Rd E\",\n \"city\": \"London\",\n \"state\": \"Ontario\",\n },\n \"london ontario with districtID\": {\n \"street\": \"1065 Sunningdale Rd E\",\n \"city\": \"London\",\n \"state\": \"Ontario\",\n \"project_id\": \"528\",\n \"district_id\": \"CityofLondon\",\n \"zone_id\": \"zone-z547\",\n },\n \"aurora ontario\": {\n \"street\": \"123 Cranberry Lane\",\n \"city\": \"Aurora\",\n \"state\": \"Ontario\",\n },\n \"Vaughan, Ontario, Canada\": { # https://app.my-waste.mobi/widget/576-Vaughan/home.php\n \"street\": \"Main St\",\n \"city\": \"Vaughan\",\n \"state\": \"Ontario\",\n },\n}\n\n\nclass Source:\n def __init__(\n self, street, city, state, project_id=None, district_id=None, zone_id=None\n ): # argX correspond to the args dict in the source configuration\n self.street = self._format_key(street)\n self.city = self._format_key(city)\n self.state = self._format_key(state)\n self.project_id = self._format_key(project_id) if project_id else None\n self.district_id = str(district_id).strip() if district_id else None\n\n self.zone_id = zone_id # uses lowercase z's, not sure if matters\n self.stage = 0\n\n def _format_key(self, param):\n \"\"\"Get rid of ambiguity in caps/spacing.\"\"\"\n return str(param).upper().strip()\n\n def _lookup_city(self):\n city_finder = f\"https://recyclecoach.com/wp-json/rec/v1/cities?find={self.city}, {self.state}\"\n res = requests.get(city_finder)\n city_data = res.json()\n\n if len(city_data[\"cities\"]) == 1:\n self.project_id = city_data[\"cities\"][0][\"project_id\"]\n self.district_id = city_data[\"cities\"][0][\"district_id\"]\n self.stage = float(city_data[\"cities\"][0][\"stage\"])\n\n if self.stage < 3:\n raise Exception(\n \"Found your city, but it is not yet supported fully by recycle coach.\"\n )\n\n elif len(city_data[\"cities\"]) > 1:\n for city in city_data[\"cities\"]:\n if city[\"city_nm\"].upper() == self.city.upper():\n self.project_id = city[\"project_id\"]\n self.district_id = city[\"district_id\"]\n self.stage = float(city[\"stage\"])\n return True\n\n # not sure what to do with ambiguity here\n # print(json.dumps(city_data['cities'], indent=4))\n raise Exception(\n \"Could not determine district or project, Debug here to find your discrict and project_id\"\n )\n\n def _lookup_zones_with_geo(self):\n pos_finder = f\"https://api-city.recyclecoach.com/geo/address?address={self.street}&uuid=ecdb86fe-e42d-4a9d-94d6-7057777ef283&project_id={self.project_id}&district_id={self.district_id}\"\n res = requests.get(pos_finder)\n lat = None\n pos_data = res.json()\n for pos_res in pos_data:\n streetpart = self._format_key(pos_res[\"address\"]).split(\",\")[0]\n\n if streetpart in self.street:\n lat = pos_res[\"lat\"]\n lng = pos_res[\"lng\"]\n break\n\n if not lat:\n raise Exception(\"Unable to find zone\")\n\n zone_finder = f\"https://pkg.my-waste.mobi/get_zones?project_id={self.project_id}&district_id={self.district_id}&lat={lat}&lng={lng}\"\n res = requests.get(zone_finder)\n zone_data = {z[\"prompt_id\"]: \"z\" + z[\"zone_id\"] for z in res.json()}\n self.zone_id = self._build_zone_string(zone_data)\n\n return self.zone_id\n\n def _lookup_zones(self):\n zone_finder = f\"https://api-city.recyclecoach.com/zone-setup/address?sku={self.project_id}&district={self.district_id}&prompt=undefined&term={self.street}\"\n res = requests.get(zone_finder)\n zone_data = res.json()\n if \"results\" not in zone_data:\n return self._lookup_zones_with_geo()\n for zone_res in zone_data[\"results\"]:\n streetpart = self._format_key(zone_res[\"address\"]).split(\",\")[0]\n\n if streetpart in self.street:\n self.zone_id = self._build_zone_string(zone_res[\"zones\"])\n return self.zone_id\n\n raise Exception(\"Unable to find zone\")\n\n def _build_zone_string(self, z_match):\n \"\"\"Take matching json and build a format zone-z12312-z1894323-z8461.\"\"\"\n zone_str = \"zone\"\n\n for zonekey in z_match:\n zone_str += f\"-{z_match[zonekey]}\"\n\n return zone_str\n\n def fetch(self):\n \"\"\"Build the date fetching request through looking up address on separate endpoints, skip these requests if you can provide the district_id, project_id and/or zone_id.\"\"\"\n if not self.project_id or not self.district_id:\n self._lookup_city()\n\n if not self.zone_id:\n self._lookup_zones()\n\n collection_def_url = f\"https://reg.my-waste.mobi/collections?project_id={self.project_id}&district_id={self.district_id}&zone_id={self.zone_id}&lang_cd=en_US\"\n schedule_url = f\"https://pkg.my-waste.mobi/app_data_zone_schedules?project_id={self.project_id}&district_id={self.district_id}&zone_id={self.zone_id}\"\n\n collection_def = None\n schedule_def = None\n collection_types = None\n\n response = requests.get(collection_def_url)\n collection_def = json.loads(response.text)\n\n response = requests.get(schedule_url)\n schedule_def = json.loads(response.text)\n\n collection_types = collection_def[\"collection\"][\"types\"]\n\n entries = []\n date_format = \"%Y-%m-%d\"\n\n for year in schedule_def[\"DATA\"]:\n for month in year[\"months\"]:\n for event in month[\"events\"]:\n for collection in event[\"collections\"]:\n if collection[\"status\"] == \"is_none\":\n continue\n ct = collection_types[\"collection-\" + str(collection[\"id\"])]\n c = Collection(\n datetime.strptime(event[\"date\"], date_format).date(),\n ct[\"title\"],\n ICON_MAP.get(ct[\"title\"]),\n )\n entries.append(c)\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/recyclecoach_com.py"}]} | 2,969 | 1,020 |
gh_patches_debug_24414 | rasdani/github-patches | git_diff | netbox-community__netbox-12192 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Introduce a permission specifically to allow the creation of API tokens for other users
### NetBox version
v3.4.7
### Feature type
Change to existing functionality
### Proposed functionality
This idea was [first proposed](https://github.com/netbox-community/netbox/issues/11091#issuecomment-1382039803) by @kkthxbye-code under #11091. This permission will control whether a specific user has the ability to create API tokens on behalf of other users.
### Use case
Provides more granular control over the creation of API tokens.
### Database changes
_No response_
### External dependencies
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `netbox/users/api/serializers.py`
Content:
```
1 from django.conf import settings
2 from django.contrib.auth.models import Group, User
3 from django.contrib.contenttypes.models import ContentType
4 from rest_framework import serializers
5
6 from netbox.api.fields import ContentTypeField, IPNetworkSerializer, SerializedPKRelatedField
7 from netbox.api.serializers import ValidatedModelSerializer
8 from users.models import ObjectPermission, Token
9 from .nested_serializers import *
10
11
12 __all__ = (
13 'GroupSerializer',
14 'ObjectPermissionSerializer',
15 'TokenSerializer',
16 'UserSerializer',
17 )
18
19
20 class UserSerializer(ValidatedModelSerializer):
21 url = serializers.HyperlinkedIdentityField(view_name='users-api:user-detail')
22 groups = SerializedPKRelatedField(
23 queryset=Group.objects.all(),
24 serializer=NestedGroupSerializer,
25 required=False,
26 many=True
27 )
28
29 class Meta:
30 model = User
31 fields = (
32 'id', 'url', 'display', 'username', 'password', 'first_name', 'last_name', 'email', 'is_staff', 'is_active',
33 'date_joined', 'groups',
34 )
35 extra_kwargs = {
36 'password': {'write_only': True}
37 }
38
39 def create(self, validated_data):
40 """
41 Extract the password from validated data and set it separately to ensure proper hash generation.
42 """
43 password = validated_data.pop('password')
44 user = super().create(validated_data)
45 user.set_password(password)
46 user.save()
47
48 return user
49
50 def get_display(self, obj):
51 if full_name := obj.get_full_name():
52 return f"{obj.username} ({full_name})"
53 return obj.username
54
55
56 class GroupSerializer(ValidatedModelSerializer):
57 url = serializers.HyperlinkedIdentityField(view_name='users-api:group-detail')
58 user_count = serializers.IntegerField(read_only=True)
59
60 class Meta:
61 model = Group
62 fields = ('id', 'url', 'display', 'name', 'user_count')
63
64
65 class TokenSerializer(ValidatedModelSerializer):
66 url = serializers.HyperlinkedIdentityField(view_name='users-api:token-detail')
67 key = serializers.CharField(
68 min_length=40,
69 max_length=40,
70 allow_blank=True,
71 required=False,
72 write_only=not settings.ALLOW_TOKEN_RETRIEVAL
73 )
74 user = NestedUserSerializer()
75 allowed_ips = serializers.ListField(
76 child=IPNetworkSerializer(),
77 required=False,
78 allow_empty=True,
79 default=[]
80 )
81
82 class Meta:
83 model = Token
84 fields = (
85 'id', 'url', 'display', 'user', 'created', 'expires', 'last_used', 'key', 'write_enabled', 'description',
86 'allowed_ips',
87 )
88
89 def to_internal_value(self, data):
90 if 'key' not in data:
91 data['key'] = Token.generate_key()
92 return super().to_internal_value(data)
93
94
95 class TokenProvisionSerializer(serializers.Serializer):
96 username = serializers.CharField()
97 password = serializers.CharField()
98
99
100 class ObjectPermissionSerializer(ValidatedModelSerializer):
101 url = serializers.HyperlinkedIdentityField(view_name='users-api:objectpermission-detail')
102 object_types = ContentTypeField(
103 queryset=ContentType.objects.all(),
104 many=True
105 )
106 groups = SerializedPKRelatedField(
107 queryset=Group.objects.all(),
108 serializer=NestedGroupSerializer,
109 required=False,
110 many=True
111 )
112 users = SerializedPKRelatedField(
113 queryset=User.objects.all(),
114 serializer=NestedUserSerializer,
115 required=False,
116 many=True
117 )
118
119 class Meta:
120 model = ObjectPermission
121 fields = (
122 'id', 'url', 'display', 'name', 'description', 'enabled', 'object_types', 'groups', 'users', 'actions',
123 'constraints',
124 )
125
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/netbox/users/api/serializers.py b/netbox/users/api/serializers.py
--- a/netbox/users/api/serializers.py
+++ b/netbox/users/api/serializers.py
@@ -2,6 +2,7 @@
from django.contrib.auth.models import Group, User
from django.contrib.contenttypes.models import ContentType
from rest_framework import serializers
+from rest_framework.exceptions import PermissionDenied
from netbox.api.fields import ContentTypeField, IPNetworkSerializer, SerializedPKRelatedField
from netbox.api.serializers import ValidatedModelSerializer
@@ -91,6 +92,16 @@
data['key'] = Token.generate_key()
return super().to_internal_value(data)
+ def validate(self, data):
+
+ # If the Token is being created on behalf of another user, enforce the grant_token permission.
+ request = self.context.get('request')
+ token_user = data.get('user')
+ if token_user and token_user != request.user and not request.user.has_perm('users.grant_token'):
+ raise PermissionDenied("This user does not have permission to create tokens for other users.")
+
+ return super().validate(data)
+
class TokenProvisionSerializer(serializers.Serializer):
username = serializers.CharField()
| {"golden_diff": "diff --git a/netbox/users/api/serializers.py b/netbox/users/api/serializers.py\n--- a/netbox/users/api/serializers.py\n+++ b/netbox/users/api/serializers.py\n@@ -2,6 +2,7 @@\n from django.contrib.auth.models import Group, User\n from django.contrib.contenttypes.models import ContentType\n from rest_framework import serializers\n+from rest_framework.exceptions import PermissionDenied\n \n from netbox.api.fields import ContentTypeField, IPNetworkSerializer, SerializedPKRelatedField\n from netbox.api.serializers import ValidatedModelSerializer\n@@ -91,6 +92,16 @@\n data['key'] = Token.generate_key()\n return super().to_internal_value(data)\n \n+ def validate(self, data):\n+\n+ # If the Token is being created on behalf of another user, enforce the grant_token permission.\n+ request = self.context.get('request')\n+ token_user = data.get('user')\n+ if token_user and token_user != request.user and not request.user.has_perm('users.grant_token'):\n+ raise PermissionDenied(\"This user does not have permission to create tokens for other users.\")\n+\n+ return super().validate(data)\n+\n \n class TokenProvisionSerializer(serializers.Serializer):\n username = serializers.CharField()\n", "issue": "Introduce a permission specifically to allow the creation of API tokens for other users\n### NetBox version\n\nv3.4.7\n\n### Feature type\n\nChange to existing functionality\n\n### Proposed functionality\n\nThis idea was [first proposed](https://github.com/netbox-community/netbox/issues/11091#issuecomment-1382039803) by @kkthxbye-code under #11091. This permission will control whether a specific user has the ability to create API tokens on behalf of other users.\n\n### Use case\n\nProvides more granular control over the creation of API tokens.\n\n### Database changes\n\n_No response_\n\n### External dependencies\n\n_No response_\n", "before_files": [{"content": "from django.conf import settings\nfrom django.contrib.auth.models import Group, User\nfrom django.contrib.contenttypes.models import ContentType\nfrom rest_framework import serializers\n\nfrom netbox.api.fields import ContentTypeField, IPNetworkSerializer, SerializedPKRelatedField\nfrom netbox.api.serializers import ValidatedModelSerializer\nfrom users.models import ObjectPermission, Token\nfrom .nested_serializers import *\n\n\n__all__ = (\n 'GroupSerializer',\n 'ObjectPermissionSerializer',\n 'TokenSerializer',\n 'UserSerializer',\n)\n\n\nclass UserSerializer(ValidatedModelSerializer):\n url = serializers.HyperlinkedIdentityField(view_name='users-api:user-detail')\n groups = SerializedPKRelatedField(\n queryset=Group.objects.all(),\n serializer=NestedGroupSerializer,\n required=False,\n many=True\n )\n\n class Meta:\n model = User\n fields = (\n 'id', 'url', 'display', 'username', 'password', 'first_name', 'last_name', 'email', 'is_staff', 'is_active',\n 'date_joined', 'groups',\n )\n extra_kwargs = {\n 'password': {'write_only': True}\n }\n\n def create(self, validated_data):\n \"\"\"\n Extract the password from validated data and set it separately to ensure proper hash generation.\n \"\"\"\n password = validated_data.pop('password')\n user = super().create(validated_data)\n user.set_password(password)\n user.save()\n\n return user\n\n def get_display(self, obj):\n if full_name := obj.get_full_name():\n return f\"{obj.username} ({full_name})\"\n return obj.username\n\n\nclass GroupSerializer(ValidatedModelSerializer):\n url = serializers.HyperlinkedIdentityField(view_name='users-api:group-detail')\n user_count = serializers.IntegerField(read_only=True)\n\n class Meta:\n model = Group\n fields = ('id', 'url', 'display', 'name', 'user_count')\n\n\nclass TokenSerializer(ValidatedModelSerializer):\n url = serializers.HyperlinkedIdentityField(view_name='users-api:token-detail')\n key = serializers.CharField(\n min_length=40,\n max_length=40,\n allow_blank=True,\n required=False,\n write_only=not settings.ALLOW_TOKEN_RETRIEVAL\n )\n user = NestedUserSerializer()\n allowed_ips = serializers.ListField(\n child=IPNetworkSerializer(),\n required=False,\n allow_empty=True,\n default=[]\n )\n\n class Meta:\n model = Token\n fields = (\n 'id', 'url', 'display', 'user', 'created', 'expires', 'last_used', 'key', 'write_enabled', 'description',\n 'allowed_ips',\n )\n\n def to_internal_value(self, data):\n if 'key' not in data:\n data['key'] = Token.generate_key()\n return super().to_internal_value(data)\n\n\nclass TokenProvisionSerializer(serializers.Serializer):\n username = serializers.CharField()\n password = serializers.CharField()\n\n\nclass ObjectPermissionSerializer(ValidatedModelSerializer):\n url = serializers.HyperlinkedIdentityField(view_name='users-api:objectpermission-detail')\n object_types = ContentTypeField(\n queryset=ContentType.objects.all(),\n many=True\n )\n groups = SerializedPKRelatedField(\n queryset=Group.objects.all(),\n serializer=NestedGroupSerializer,\n required=False,\n many=True\n )\n users = SerializedPKRelatedField(\n queryset=User.objects.all(),\n serializer=NestedUserSerializer,\n required=False,\n many=True\n )\n\n class Meta:\n model = ObjectPermission\n fields = (\n 'id', 'url', 'display', 'name', 'description', 'enabled', 'object_types', 'groups', 'users', 'actions',\n 'constraints',\n )\n", "path": "netbox/users/api/serializers.py"}], "after_files": [{"content": "from django.conf import settings\nfrom django.contrib.auth.models import Group, User\nfrom django.contrib.contenttypes.models import ContentType\nfrom rest_framework import serializers\nfrom rest_framework.exceptions import PermissionDenied\n\nfrom netbox.api.fields import ContentTypeField, IPNetworkSerializer, SerializedPKRelatedField\nfrom netbox.api.serializers import ValidatedModelSerializer\nfrom users.models import ObjectPermission, Token\nfrom .nested_serializers import *\n\n\n__all__ = (\n 'GroupSerializer',\n 'ObjectPermissionSerializer',\n 'TokenSerializer',\n 'UserSerializer',\n)\n\n\nclass UserSerializer(ValidatedModelSerializer):\n url = serializers.HyperlinkedIdentityField(view_name='users-api:user-detail')\n groups = SerializedPKRelatedField(\n queryset=Group.objects.all(),\n serializer=NestedGroupSerializer,\n required=False,\n many=True\n )\n\n class Meta:\n model = User\n fields = (\n 'id', 'url', 'display', 'username', 'password', 'first_name', 'last_name', 'email', 'is_staff', 'is_active',\n 'date_joined', 'groups',\n )\n extra_kwargs = {\n 'password': {'write_only': True}\n }\n\n def create(self, validated_data):\n \"\"\"\n Extract the password from validated data and set it separately to ensure proper hash generation.\n \"\"\"\n password = validated_data.pop('password')\n user = super().create(validated_data)\n user.set_password(password)\n user.save()\n\n return user\n\n def get_display(self, obj):\n if full_name := obj.get_full_name():\n return f\"{obj.username} ({full_name})\"\n return obj.username\n\n\nclass GroupSerializer(ValidatedModelSerializer):\n url = serializers.HyperlinkedIdentityField(view_name='users-api:group-detail')\n user_count = serializers.IntegerField(read_only=True)\n\n class Meta:\n model = Group\n fields = ('id', 'url', 'display', 'name', 'user_count')\n\n\nclass TokenSerializer(ValidatedModelSerializer):\n url = serializers.HyperlinkedIdentityField(view_name='users-api:token-detail')\n key = serializers.CharField(\n min_length=40,\n max_length=40,\n allow_blank=True,\n required=False,\n write_only=not settings.ALLOW_TOKEN_RETRIEVAL\n )\n user = NestedUserSerializer()\n allowed_ips = serializers.ListField(\n child=IPNetworkSerializer(),\n required=False,\n allow_empty=True,\n default=[]\n )\n\n class Meta:\n model = Token\n fields = (\n 'id', 'url', 'display', 'user', 'created', 'expires', 'last_used', 'key', 'write_enabled', 'description',\n 'allowed_ips',\n )\n\n def to_internal_value(self, data):\n if 'key' not in data:\n data['key'] = Token.generate_key()\n return super().to_internal_value(data)\n\n def validate(self, data):\n\n # If the Token is being created on behalf of another user, enforce the grant_token permission.\n request = self.context.get('request')\n token_user = data.get('user')\n if token_user and token_user != request.user and not request.user.has_perm('users.grant_token'):\n raise PermissionDenied(\"This user does not have permission to create tokens for other users.\")\n\n return super().validate(data)\n\n\nclass TokenProvisionSerializer(serializers.Serializer):\n username = serializers.CharField()\n password = serializers.CharField()\n\n\nclass ObjectPermissionSerializer(ValidatedModelSerializer):\n url = serializers.HyperlinkedIdentityField(view_name='users-api:objectpermission-detail')\n object_types = ContentTypeField(\n queryset=ContentType.objects.all(),\n many=True\n )\n groups = SerializedPKRelatedField(\n queryset=Group.objects.all(),\n serializer=NestedGroupSerializer,\n required=False,\n many=True\n )\n users = SerializedPKRelatedField(\n queryset=User.objects.all(),\n serializer=NestedUserSerializer,\n required=False,\n many=True\n )\n\n class Meta:\n model = ObjectPermission\n fields = (\n 'id', 'url', 'display', 'name', 'description', 'enabled', 'object_types', 'groups', 'users', 'actions',\n 'constraints',\n )\n", "path": "netbox/users/api/serializers.py"}]} | 1,476 | 268 |
gh_patches_debug_9940 | rasdani/github-patches | git_diff | ray-project__ray-4175 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
dashboard.py is not packaged in the Linux Ray wheels.
See the conversation in https://groups.google.com/forum/?utm_medium=email&utm_source=footer#!msg/ray-dev/M8wGAdEhkTw/QIbvbuoJBAAJ.
I think we can fix this just by putting `__init__.py` in the `ray/python/dashboard` directory, though we have to make sure that that includes the html and javascript files.
cc @virtualluke
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `python/setup.py`
Content:
```
1 from __future__ import absolute_import
2 from __future__ import division
3 from __future__ import print_function
4
5 import os
6 import re
7 import shutil
8 import subprocess
9 import sys
10
11 from setuptools import setup, find_packages, Distribution
12 import setuptools.command.build_ext as _build_ext
13
14 # Ideally, we could include these files by putting them in a
15 # MANIFEST.in or using the package_data argument to setup, but the
16 # MANIFEST.in gets applied at the very beginning when setup.py runs
17 # before these files have been created, so we have to move the files
18 # manually.
19
20 # NOTE: The lists below must be kept in sync with ray/CMakeLists.txt.
21
22 ray_files = [
23 "ray/core/src/ray/thirdparty/redis/src/redis-server",
24 "ray/core/src/ray/gcs/redis_module/libray_redis_module.so",
25 "ray/core/src/plasma/plasma_store_server", "ray/_raylet.so",
26 "ray/core/src/ray/raylet/raylet_monitor", "ray/core/src/ray/raylet/raylet",
27 "ray/WebUI.ipynb"
28 ]
29
30 # These are the directories where automatically generated Python flatbuffer
31 # bindings are created.
32 generated_python_directories = [
33 "ray/core/generated", "ray/core/generated/ray",
34 "ray/core/generated/ray/protocol"
35 ]
36
37 optional_ray_files = []
38
39 ray_ui_files = [
40 "ray/core/src/catapult_files/index.html",
41 "ray/core/src/catapult_files/trace_viewer_full.html"
42 ]
43
44 ray_autoscaler_files = [
45 "ray/autoscaler/aws/example-full.yaml",
46 "ray/autoscaler/gcp/example-full.yaml",
47 "ray/autoscaler/local/example-full.yaml",
48 ]
49
50 if "RAY_USE_NEW_GCS" in os.environ and os.environ["RAY_USE_NEW_GCS"] == "on":
51 ray_files += [
52 "ray/core/src/credis/build/src/libmember.so",
53 "ray/core/src/credis/build/src/libmaster.so",
54 "ray/core/src/credis/redis/src/redis-server"
55 ]
56
57 # The UI files are mandatory if the INCLUDE_UI environment variable equals 1.
58 # Otherwise, they are optional.
59 if "INCLUDE_UI" in os.environ and os.environ["INCLUDE_UI"] == "1":
60 ray_files += ray_ui_files
61 else:
62 optional_ray_files += ray_ui_files
63
64 optional_ray_files += ray_autoscaler_files
65
66 extras = {
67 "rllib": [
68 "pyyaml", "gym[atari]", "opencv-python-headless", "lz4", "scipy"
69 ],
70 "debug": ["psutil", "setproctitle", "py-spy"],
71 "dashboard": ["psutil", "aiohttp"],
72 }
73
74
75 class build_ext(_build_ext.build_ext):
76 def run(self):
77 # Note: We are passing in sys.executable so that we use the same
78 # version of Python to build pyarrow inside the build.sh script. Note
79 # that certain flags will not be passed along such as --user or sudo.
80 # TODO(rkn): Fix this.
81 subprocess.check_call(["../build.sh", "-p", sys.executable])
82
83 # We also need to install pyarrow along with Ray, so make sure that the
84 # relevant non-Python pyarrow files get copied.
85 pyarrow_files = []
86 for (root, dirs, filenames) in os.walk("./ray/pyarrow_files/pyarrow"):
87 for name in filenames:
88 pyarrow_files.append(os.path.join(root, name))
89
90 # Make sure the relevant files for modin get copied.
91 modin_files = []
92 for (root, dirs, filenames) in os.walk("./ray/modin"):
93 for name in filenames:
94 modin_files.append(os.path.join(root, name))
95
96 files_to_include = ray_files + pyarrow_files + modin_files
97
98 # Copy over the autogenerated flatbuffer Python bindings.
99 for directory in generated_python_directories:
100 for filename in os.listdir(directory):
101 if filename[-3:] == ".py":
102 files_to_include.append(os.path.join(directory, filename))
103
104 for filename in files_to_include:
105 self.move_file(filename)
106
107 # Try to copy over the optional files.
108 for filename in optional_ray_files:
109 try:
110 self.move_file(filename)
111 except Exception:
112 print("Failed to copy optional file {}. This is ok."
113 .format(filename))
114
115 def move_file(self, filename):
116 # TODO(rkn): This feels very brittle. It may not handle all cases. See
117 # https://github.com/apache/arrow/blob/master/python/setup.py for an
118 # example.
119 source = filename
120 destination = os.path.join(self.build_lib, filename)
121 # Create the target directory if it doesn't already exist.
122 parent_directory = os.path.dirname(destination)
123 if not os.path.exists(parent_directory):
124 os.makedirs(parent_directory)
125 if not os.path.exists(destination):
126 print("Copying {} to {}.".format(source, destination))
127 shutil.copy(source, destination)
128
129
130 class BinaryDistribution(Distribution):
131 def has_ext_modules(self):
132 return True
133
134
135 def find_version(*filepath):
136 # Extract version information from filepath
137 here = os.path.abspath(os.path.dirname(__file__))
138 with open(os.path.join(here, *filepath)) as fp:
139 version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
140 fp.read(), re.M)
141 if version_match:
142 return version_match.group(1)
143 raise RuntimeError("Unable to find version string.")
144
145
146 requires = [
147 "numpy >= 1.10.4",
148 "filelock",
149 "funcsigs",
150 "click",
151 "colorama",
152 "pytest",
153 "pyyaml",
154 "redis",
155 "six >= 1.12.0",
156 # The typing module is required by modin.
157 "typing",
158 "flatbuffers",
159 "faulthandler;python_version<'3.3'",
160 ]
161
162 setup(
163 name="ray",
164 version=find_version("ray", "__init__.py"),
165 author="Ray Team",
166 author_email="[email protected]",
167 description=("A system for parallel and distributed Python that unifies "
168 "the ML ecosystem."),
169 long_description=open("../README.rst").read(),
170 url="https://github.com/ray-project/ray",
171 keywords=("ray distributed parallel machine-learning "
172 "reinforcement-learning deep-learning python"),
173 packages=find_packages(),
174 cmdclass={"build_ext": build_ext},
175 # The BinaryDistribution argument triggers build_ext.
176 distclass=BinaryDistribution,
177 install_requires=requires,
178 setup_requires=["cython >= 0.29"],
179 extras_require=extras,
180 entry_points={
181 "console_scripts": [
182 "ray=ray.scripts.scripts:main",
183 "rllib=ray.rllib.scripts:cli [rllib]"
184 ]
185 },
186 include_package_data=True,
187 zip_safe=False,
188 license="Apache 2.0")
189
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/python/setup.py b/python/setup.py
--- a/python/setup.py
+++ b/python/setup.py
@@ -24,7 +24,9 @@
"ray/core/src/ray/gcs/redis_module/libray_redis_module.so",
"ray/core/src/plasma/plasma_store_server", "ray/_raylet.so",
"ray/core/src/ray/raylet/raylet_monitor", "ray/core/src/ray/raylet/raylet",
- "ray/WebUI.ipynb"
+ "ray/WebUI.ipynb", "ray/dashboard/dashboard.py",
+ "ray/dashboard/index.html", "ray/dashboard/res/main.css",
+ "ray/dashboard/res/main.js"
]
# These are the directories where automatically generated Python flatbuffer
| {"golden_diff": "diff --git a/python/setup.py b/python/setup.py\n--- a/python/setup.py\n+++ b/python/setup.py\n@@ -24,7 +24,9 @@\n \"ray/core/src/ray/gcs/redis_module/libray_redis_module.so\",\n \"ray/core/src/plasma/plasma_store_server\", \"ray/_raylet.so\",\n \"ray/core/src/ray/raylet/raylet_monitor\", \"ray/core/src/ray/raylet/raylet\",\n- \"ray/WebUI.ipynb\"\n+ \"ray/WebUI.ipynb\", \"ray/dashboard/dashboard.py\",\n+ \"ray/dashboard/index.html\", \"ray/dashboard/res/main.css\",\n+ \"ray/dashboard/res/main.js\"\n ]\n \n # These are the directories where automatically generated Python flatbuffer\n", "issue": "dashboard.py is not packaged in the Linux Ray wheels.\nSee the conversation in https://groups.google.com/forum/?utm_medium=email&utm_source=footer#!msg/ray-dev/M8wGAdEhkTw/QIbvbuoJBAAJ.\r\n\r\nI think we can fix this just by putting `__init__.py` in the `ray/python/dashboard` directory, though we have to make sure that that includes the html and javascript files.\r\n\r\ncc @virtualluke\n", "before_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport re\nimport shutil\nimport subprocess\nimport sys\n\nfrom setuptools import setup, find_packages, Distribution\nimport setuptools.command.build_ext as _build_ext\n\n# Ideally, we could include these files by putting them in a\n# MANIFEST.in or using the package_data argument to setup, but the\n# MANIFEST.in gets applied at the very beginning when setup.py runs\n# before these files have been created, so we have to move the files\n# manually.\n\n# NOTE: The lists below must be kept in sync with ray/CMakeLists.txt.\n\nray_files = [\n \"ray/core/src/ray/thirdparty/redis/src/redis-server\",\n \"ray/core/src/ray/gcs/redis_module/libray_redis_module.so\",\n \"ray/core/src/plasma/plasma_store_server\", \"ray/_raylet.so\",\n \"ray/core/src/ray/raylet/raylet_monitor\", \"ray/core/src/ray/raylet/raylet\",\n \"ray/WebUI.ipynb\"\n]\n\n# These are the directories where automatically generated Python flatbuffer\n# bindings are created.\ngenerated_python_directories = [\n \"ray/core/generated\", \"ray/core/generated/ray\",\n \"ray/core/generated/ray/protocol\"\n]\n\noptional_ray_files = []\n\nray_ui_files = [\n \"ray/core/src/catapult_files/index.html\",\n \"ray/core/src/catapult_files/trace_viewer_full.html\"\n]\n\nray_autoscaler_files = [\n \"ray/autoscaler/aws/example-full.yaml\",\n \"ray/autoscaler/gcp/example-full.yaml\",\n \"ray/autoscaler/local/example-full.yaml\",\n]\n\nif \"RAY_USE_NEW_GCS\" in os.environ and os.environ[\"RAY_USE_NEW_GCS\"] == \"on\":\n ray_files += [\n \"ray/core/src/credis/build/src/libmember.so\",\n \"ray/core/src/credis/build/src/libmaster.so\",\n \"ray/core/src/credis/redis/src/redis-server\"\n ]\n\n# The UI files are mandatory if the INCLUDE_UI environment variable equals 1.\n# Otherwise, they are optional.\nif \"INCLUDE_UI\" in os.environ and os.environ[\"INCLUDE_UI\"] == \"1\":\n ray_files += ray_ui_files\nelse:\n optional_ray_files += ray_ui_files\n\noptional_ray_files += ray_autoscaler_files\n\nextras = {\n \"rllib\": [\n \"pyyaml\", \"gym[atari]\", \"opencv-python-headless\", \"lz4\", \"scipy\"\n ],\n \"debug\": [\"psutil\", \"setproctitle\", \"py-spy\"],\n \"dashboard\": [\"psutil\", \"aiohttp\"],\n}\n\n\nclass build_ext(_build_ext.build_ext):\n def run(self):\n # Note: We are passing in sys.executable so that we use the same\n # version of Python to build pyarrow inside the build.sh script. Note\n # that certain flags will not be passed along such as --user or sudo.\n # TODO(rkn): Fix this.\n subprocess.check_call([\"../build.sh\", \"-p\", sys.executable])\n\n # We also need to install pyarrow along with Ray, so make sure that the\n # relevant non-Python pyarrow files get copied.\n pyarrow_files = []\n for (root, dirs, filenames) in os.walk(\"./ray/pyarrow_files/pyarrow\"):\n for name in filenames:\n pyarrow_files.append(os.path.join(root, name))\n\n # Make sure the relevant files for modin get copied.\n modin_files = []\n for (root, dirs, filenames) in os.walk(\"./ray/modin\"):\n for name in filenames:\n modin_files.append(os.path.join(root, name))\n\n files_to_include = ray_files + pyarrow_files + modin_files\n\n # Copy over the autogenerated flatbuffer Python bindings.\n for directory in generated_python_directories:\n for filename in os.listdir(directory):\n if filename[-3:] == \".py\":\n files_to_include.append(os.path.join(directory, filename))\n\n for filename in files_to_include:\n self.move_file(filename)\n\n # Try to copy over the optional files.\n for filename in optional_ray_files:\n try:\n self.move_file(filename)\n except Exception:\n print(\"Failed to copy optional file {}. This is ok.\"\n .format(filename))\n\n def move_file(self, filename):\n # TODO(rkn): This feels very brittle. It may not handle all cases. See\n # https://github.com/apache/arrow/blob/master/python/setup.py for an\n # example.\n source = filename\n destination = os.path.join(self.build_lib, filename)\n # Create the target directory if it doesn't already exist.\n parent_directory = os.path.dirname(destination)\n if not os.path.exists(parent_directory):\n os.makedirs(parent_directory)\n if not os.path.exists(destination):\n print(\"Copying {} to {}.\".format(source, destination))\n shutil.copy(source, destination)\n\n\nclass BinaryDistribution(Distribution):\n def has_ext_modules(self):\n return True\n\n\ndef find_version(*filepath):\n # Extract version information from filepath\n here = os.path.abspath(os.path.dirname(__file__))\n with open(os.path.join(here, *filepath)) as fp:\n version_match = re.search(r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\",\n fp.read(), re.M)\n if version_match:\n return version_match.group(1)\n raise RuntimeError(\"Unable to find version string.\")\n\n\nrequires = [\n \"numpy >= 1.10.4\",\n \"filelock\",\n \"funcsigs\",\n \"click\",\n \"colorama\",\n \"pytest\",\n \"pyyaml\",\n \"redis\",\n \"six >= 1.12.0\",\n # The typing module is required by modin.\n \"typing\",\n \"flatbuffers\",\n \"faulthandler;python_version<'3.3'\",\n]\n\nsetup(\n name=\"ray\",\n version=find_version(\"ray\", \"__init__.py\"),\n author=\"Ray Team\",\n author_email=\"[email protected]\",\n description=(\"A system for parallel and distributed Python that unifies \"\n \"the ML ecosystem.\"),\n long_description=open(\"../README.rst\").read(),\n url=\"https://github.com/ray-project/ray\",\n keywords=(\"ray distributed parallel machine-learning \"\n \"reinforcement-learning deep-learning python\"),\n packages=find_packages(),\n cmdclass={\"build_ext\": build_ext},\n # The BinaryDistribution argument triggers build_ext.\n distclass=BinaryDistribution,\n install_requires=requires,\n setup_requires=[\"cython >= 0.29\"],\n extras_require=extras,\n entry_points={\n \"console_scripts\": [\n \"ray=ray.scripts.scripts:main\",\n \"rllib=ray.rllib.scripts:cli [rllib]\"\n ]\n },\n include_package_data=True,\n zip_safe=False,\n license=\"Apache 2.0\")\n", "path": "python/setup.py"}], "after_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport re\nimport shutil\nimport subprocess\nimport sys\n\nfrom setuptools import setup, find_packages, Distribution\nimport setuptools.command.build_ext as _build_ext\n\n# Ideally, we could include these files by putting them in a\n# MANIFEST.in or using the package_data argument to setup, but the\n# MANIFEST.in gets applied at the very beginning when setup.py runs\n# before these files have been created, so we have to move the files\n# manually.\n\n# NOTE: The lists below must be kept in sync with ray/CMakeLists.txt.\n\nray_files = [\n \"ray/core/src/ray/thirdparty/redis/src/redis-server\",\n \"ray/core/src/ray/gcs/redis_module/libray_redis_module.so\",\n \"ray/core/src/plasma/plasma_store_server\", \"ray/_raylet.so\",\n \"ray/core/src/ray/raylet/raylet_monitor\", \"ray/core/src/ray/raylet/raylet\",\n \"ray/WebUI.ipynb\", \"ray/dashboard/dashboard.py\",\n \"ray/dashboard/index.html\", \"ray/dashboard/res/main.css\",\n \"ray/dashboard/res/main.js\"\n]\n\n# These are the directories where automatically generated Python flatbuffer\n# bindings are created.\ngenerated_python_directories = [\n \"ray/core/generated\", \"ray/core/generated/ray\",\n \"ray/core/generated/ray/protocol\"\n]\n\noptional_ray_files = []\n\nray_ui_files = [\n \"ray/core/src/catapult_files/index.html\",\n \"ray/core/src/catapult_files/trace_viewer_full.html\"\n]\n\nray_autoscaler_files = [\n \"ray/autoscaler/aws/example-full.yaml\",\n \"ray/autoscaler/gcp/example-full.yaml\",\n \"ray/autoscaler/local/example-full.yaml\",\n]\n\nif \"RAY_USE_NEW_GCS\" in os.environ and os.environ[\"RAY_USE_NEW_GCS\"] == \"on\":\n ray_files += [\n \"ray/core/src/credis/build/src/libmember.so\",\n \"ray/core/src/credis/build/src/libmaster.so\",\n \"ray/core/src/credis/redis/src/redis-server\"\n ]\n\n# The UI files are mandatory if the INCLUDE_UI environment variable equals 1.\n# Otherwise, they are optional.\nif \"INCLUDE_UI\" in os.environ and os.environ[\"INCLUDE_UI\"] == \"1\":\n ray_files += ray_ui_files\nelse:\n optional_ray_files += ray_ui_files\n\noptional_ray_files += ray_autoscaler_files\n\nextras = {\n \"rllib\": [\n \"pyyaml\", \"gym[atari]\", \"opencv-python-headless\", \"lz4\", \"scipy\"\n ],\n \"debug\": [\"psutil\", \"setproctitle\", \"py-spy\"],\n \"dashboard\": [\"psutil\", \"aiohttp\"],\n}\n\n\nclass build_ext(_build_ext.build_ext):\n def run(self):\n # Note: We are passing in sys.executable so that we use the same\n # version of Python to build pyarrow inside the build.sh script. Note\n # that certain flags will not be passed along such as --user or sudo.\n # TODO(rkn): Fix this.\n subprocess.check_call([\"../build.sh\", \"-p\", sys.executable])\n\n # We also need to install pyarrow along with Ray, so make sure that the\n # relevant non-Python pyarrow files get copied.\n pyarrow_files = []\n for (root, dirs, filenames) in os.walk(\"./ray/pyarrow_files/pyarrow\"):\n for name in filenames:\n pyarrow_files.append(os.path.join(root, name))\n\n # Make sure the relevant files for modin get copied.\n modin_files = []\n for (root, dirs, filenames) in os.walk(\"./ray/modin\"):\n for name in filenames:\n modin_files.append(os.path.join(root, name))\n\n files_to_include = ray_files + pyarrow_files + modin_files\n\n # Copy over the autogenerated flatbuffer Python bindings.\n for directory in generated_python_directories:\n for filename in os.listdir(directory):\n if filename[-3:] == \".py\":\n files_to_include.append(os.path.join(directory, filename))\n\n for filename in files_to_include:\n self.move_file(filename)\n\n # Try to copy over the optional files.\n for filename in optional_ray_files:\n try:\n self.move_file(filename)\n except Exception:\n print(\"Failed to copy optional file {}. This is ok.\"\n .format(filename))\n\n def move_file(self, filename):\n # TODO(rkn): This feels very brittle. It may not handle all cases. See\n # https://github.com/apache/arrow/blob/master/python/setup.py for an\n # example.\n source = filename\n destination = os.path.join(self.build_lib, filename)\n # Create the target directory if it doesn't already exist.\n parent_directory = os.path.dirname(destination)\n if not os.path.exists(parent_directory):\n os.makedirs(parent_directory)\n if not os.path.exists(destination):\n print(\"Copying {} to {}.\".format(source, destination))\n shutil.copy(source, destination)\n\n\nclass BinaryDistribution(Distribution):\n def has_ext_modules(self):\n return True\n\n\ndef find_version(*filepath):\n # Extract version information from filepath\n here = os.path.abspath(os.path.dirname(__file__))\n with open(os.path.join(here, *filepath)) as fp:\n version_match = re.search(r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\",\n fp.read(), re.M)\n if version_match:\n return version_match.group(1)\n raise RuntimeError(\"Unable to find version string.\")\n\n\nrequires = [\n \"numpy >= 1.10.4\",\n \"filelock\",\n \"funcsigs\",\n \"click\",\n \"colorama\",\n \"pytest\",\n \"pyyaml\",\n \"redis\",\n \"six >= 1.12.0\",\n # The typing module is required by modin.\n \"typing\",\n \"flatbuffers\",\n \"faulthandler;python_version<'3.3'\",\n]\n\nsetup(\n name=\"ray\",\n version=find_version(\"ray\", \"__init__.py\"),\n author=\"Ray Team\",\n author_email=\"[email protected]\",\n description=(\"A system for parallel and distributed Python that unifies \"\n \"the ML ecosystem.\"),\n long_description=open(\"../README.rst\").read(),\n url=\"https://github.com/ray-project/ray\",\n keywords=(\"ray distributed parallel machine-learning \"\n \"reinforcement-learning deep-learning python\"),\n packages=find_packages(),\n cmdclass={\"build_ext\": build_ext},\n # The BinaryDistribution argument triggers build_ext.\n distclass=BinaryDistribution,\n install_requires=requires,\n setup_requires=[\"cython >= 0.29\"],\n extras_require=extras,\n entry_points={\n \"console_scripts\": [\n \"ray=ray.scripts.scripts:main\",\n \"rllib=ray.rllib.scripts:cli [rllib]\"\n ]\n },\n include_package_data=True,\n zip_safe=False,\n license=\"Apache 2.0\")\n", "path": "python/setup.py"}]} | 2,320 | 165 |
gh_patches_debug_19276 | rasdani/github-patches | git_diff | spack__spack-5135 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Cannot build elfutils
With the current head (b5eb298f3efde1ae32545a3363bed46e1811ab76)
```
$ spack install elfutils
==> Installing elfutils
==> Using cached archive: ~/Documents/git/spack/var/spack/cache/elfutils/elfutils-0.163.tar.bz2
==> Already staged elfutils-0.163-72pp6vadezugf5nmy5gbqksrmpukksa3 in ~/Documents/git/spack/var/spack/stage/elfutils-0.163-72pp6vadezugf5nmy5gbqksrmpukksa3
==> Already patched elfutils
==> Building elfutils [AutotoolsPackage]
==> Executing phase : 'autoreconf'
==> Executing phase : 'configure'
==> Error: ProcessError: Command exited with status 1:
'~/Documents/git/spack/var/spack/stage/elfutils-0.163-72pp6vadezugf5nmy5gbqksrmpukksa3/elfutils-0.163/configure' '--prefix=~/Documents/git/spack/opt/spack/linux-debian8-x86_64/gcc-4.9.2/elfutils-0.163-72pp6vadezugf5nmy5gbqksrmpukksa3' '--enable-maintainer-mode'
~/Documents/git/spack/lib/spack/spack/build_systems/autotools.py:266, in configure:
258 def configure(self, spec, prefix):
259 """Runs configure with the arguments specified in
260 :py:meth:`~.AutotoolsPackage.configure_args`
261 and an appropriately set prefix.
262 """
263 options = ['--prefix={0}'.format(prefix)] + self.configure_args()
264
265 with working_dir(self.build_directory, create=True):
>> 266 inspect.getmodule(self).configure(*options)
See build log for details:
~/spack-stage/spack-stage-9_hjUA/elfutils-0.163/spack-build.out
```
```
$ tail ~/spack-stage/spack-stage-9_hjUA/elfutils-0.163/spack-build.out
checking for inttypes.h... yes
checking for stdint.h... yes
checking for unistd.h... yes
checking size of long... 8
checking for struct user_regs_struct... yes
checking ~/Documents/git/spack/lib/spack/env/gcc/gcc option for 32-bit word size... -m32
checking for 64-bit host... yes
checking whether ~/Documents/git/spack/lib/spack/env/gcc/gcc -m32 makes executables we can run... yes
checking for flex... no
configure: error: flex needed in maintainer mode
```
Adding ```depends_on('flex')``` leads to
```
configure: error: bison needed in maintainer mode
```
Is this a know issue? How do I fix this?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `var/spack/repos/builtin/packages/elfutils/package.py`
Content:
```
1 ##############################################################################
2 # Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
3 # Produced at the Lawrence Livermore National Laboratory.
4 #
5 # This file is part of Spack.
6 # Created by Todd Gamblin, [email protected], All rights reserved.
7 # LLNL-CODE-647188
8 #
9 # For details, see https://github.com/llnl/spack
10 # Please also see the NOTICE and LICENSE files for our notice and the LGPL.
11 #
12 # This program is free software; you can redistribute it and/or modify
13 # it under the terms of the GNU Lesser General Public License (as
14 # published by the Free Software Foundation) version 2.1, February 1999.
15 #
16 # This program is distributed in the hope that it will be useful, but
17 # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
18 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
19 # conditions of the GNU Lesser General Public License for more details.
20 #
21 # You should have received a copy of the GNU Lesser General Public
22 # License along with this program; if not, write to the Free Software
23 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 ##############################################################################
25 from spack import *
26
27
28 class Elfutils(AutotoolsPackage):
29 """elfutils is a collection of various binary tools such as
30 eu-objdump, eu-readelf, and other utilities that allow you to
31 inspect and manipulate ELF files. Refer to Table 5.Tools Included
32 in elfutils for Red Hat Developer for a complete list of binary
33 tools that are distributed with the Red Hat Developer Toolset
34 version of elfutils."""
35
36 homepage = "https://fedorahosted.org/elfutils/"
37
38 url = "https://sourceware.org/elfutils/ftp/0.168/elfutils-0.168.tar.bz2"
39 list_url = "https://sourceware.org/elfutils/ftp"
40 list_depth = 1
41
42 version('0.168', '52adfa40758d0d39e5d5c57689bf38d6')
43 version('0.163', '77ce87f259987d2e54e4d87b86cbee41', preferred=True)
44
45 provides('elf@1')
46
47 def configure_args(self):
48 return ['--enable-maintainer-mode']
49
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/var/spack/repos/builtin/packages/elfutils/package.py b/var/spack/repos/builtin/packages/elfutils/package.py
--- a/var/spack/repos/builtin/packages/elfutils/package.py
+++ b/var/spack/repos/builtin/packages/elfutils/package.py
@@ -35,14 +35,22 @@
homepage = "https://fedorahosted.org/elfutils/"
- url = "https://sourceware.org/elfutils/ftp/0.168/elfutils-0.168.tar.bz2"
+ url = "https://sourceware.org/elfutils/ftp/0.168/elfutils-0.168.tar.bz2"
list_url = "https://sourceware.org/elfutils/ftp"
list_depth = 1
+ version('0.170', '03599aee98c9b726c7a732a2dd0245d5')
version('0.168', '52adfa40758d0d39e5d5c57689bf38d6')
version('0.163', '77ce87f259987d2e54e4d87b86cbee41', preferred=True)
+ depends_on('flex', type='build')
+ depends_on('bison', type='build')
+ depends_on('gettext')
+
provides('elf@1')
def configure_args(self):
- return ['--enable-maintainer-mode']
+ # configure doesn't use LIBS correctly
+ return [
+ 'LDFLAGS=-L%s -lintl' % self.spec['gettext'].prefix.lib,
+ '--enable-maintainer-mode']
| {"golden_diff": "diff --git a/var/spack/repos/builtin/packages/elfutils/package.py b/var/spack/repos/builtin/packages/elfutils/package.py\n--- a/var/spack/repos/builtin/packages/elfutils/package.py\n+++ b/var/spack/repos/builtin/packages/elfutils/package.py\n@@ -35,14 +35,22 @@\n \n homepage = \"https://fedorahosted.org/elfutils/\"\n \n- url = \"https://sourceware.org/elfutils/ftp/0.168/elfutils-0.168.tar.bz2\"\n+ url = \"https://sourceware.org/elfutils/ftp/0.168/elfutils-0.168.tar.bz2\"\n list_url = \"https://sourceware.org/elfutils/ftp\"\n list_depth = 1\n \n+ version('0.170', '03599aee98c9b726c7a732a2dd0245d5')\n version('0.168', '52adfa40758d0d39e5d5c57689bf38d6')\n version('0.163', '77ce87f259987d2e54e4d87b86cbee41', preferred=True)\n \n+ depends_on('flex', type='build')\n+ depends_on('bison', type='build')\n+ depends_on('gettext')\n+\n provides('elf@1')\n \n def configure_args(self):\n- return ['--enable-maintainer-mode']\n+ # configure doesn't use LIBS correctly\n+ return [\n+ 'LDFLAGS=-L%s -lintl' % self.spec['gettext'].prefix.lib,\n+ '--enable-maintainer-mode']\n", "issue": "Cannot build elfutils\nWith the current head (b5eb298f3efde1ae32545a3363bed46e1811ab76) \r\n\r\n```\r\n$ spack install elfutils\r\n==> Installing elfutils\r\n==> Using cached archive: ~/Documents/git/spack/var/spack/cache/elfutils/elfutils-0.163.tar.bz2\r\n==> Already staged elfutils-0.163-72pp6vadezugf5nmy5gbqksrmpukksa3 in ~/Documents/git/spack/var/spack/stage/elfutils-0.163-72pp6vadezugf5nmy5gbqksrmpukksa3\r\n==> Already patched elfutils\r\n==> Building elfutils [AutotoolsPackage]\r\n==> Executing phase : 'autoreconf'\r\n==> Executing phase : 'configure'\r\n==> Error: ProcessError: Command exited with status 1:\r\n '~/Documents/git/spack/var/spack/stage/elfutils-0.163-72pp6vadezugf5nmy5gbqksrmpukksa3/elfutils-0.163/configure' '--prefix=~/Documents/git/spack/opt/spack/linux-debian8-x86_64/gcc-4.9.2/elfutils-0.163-72pp6vadezugf5nmy5gbqksrmpukksa3' '--enable-maintainer-mode'\r\n~/Documents/git/spack/lib/spack/spack/build_systems/autotools.py:266, in configure:\r\n 258 def configure(self, spec, prefix):\r\n 259 \"\"\"Runs configure with the arguments specified in\r\n 260 :py:meth:`~.AutotoolsPackage.configure_args`\r\n 261 and an appropriately set prefix.\r\n 262 \"\"\"\r\n 263 options = ['--prefix={0}'.format(prefix)] + self.configure_args()\r\n 264 \r\n 265 with working_dir(self.build_directory, create=True):\r\n >> 266 inspect.getmodule(self).configure(*options)\r\n\r\nSee build log for details:\r\n ~/spack-stage/spack-stage-9_hjUA/elfutils-0.163/spack-build.out\r\n```\r\n```\r\n$ tail ~/spack-stage/spack-stage-9_hjUA/elfutils-0.163/spack-build.out\r\nchecking for inttypes.h... yes\r\nchecking for stdint.h... yes\r\nchecking for unistd.h... yes\r\nchecking size of long... 8\r\nchecking for struct user_regs_struct... yes\r\nchecking ~/Documents/git/spack/lib/spack/env/gcc/gcc option for 32-bit word size... -m32\r\nchecking for 64-bit host... yes\r\nchecking whether ~/Documents/git/spack/lib/spack/env/gcc/gcc -m32 makes executables we can run... yes\r\nchecking for flex... no\r\nconfigure: error: flex needed in maintainer mode\r\n```\r\nAdding ```depends_on('flex')``` leads to \r\n```\r\nconfigure: error: bison needed in maintainer mode\r\n```\r\n\r\nIs this a know issue? How do I fix this?\n", "before_files": [{"content": "##############################################################################\n# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.\n# Produced at the Lawrence Livermore National Laboratory.\n#\n# This file is part of Spack.\n# Created by Todd Gamblin, [email protected], All rights reserved.\n# LLNL-CODE-647188\n#\n# For details, see https://github.com/llnl/spack\n# Please also see the NOTICE and LICENSE files for our notice and the LGPL.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License (as\n# published by the Free Software Foundation) version 2.1, February 1999.\n#\n# This program is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and\n# conditions of the GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public\n# License along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n##############################################################################\nfrom spack import *\n\n\nclass Elfutils(AutotoolsPackage):\n \"\"\"elfutils is a collection of various binary tools such as\n eu-objdump, eu-readelf, and other utilities that allow you to\n inspect and manipulate ELF files. Refer to Table 5.Tools Included\n in elfutils for Red Hat Developer for a complete list of binary\n tools that are distributed with the Red Hat Developer Toolset\n version of elfutils.\"\"\"\n\n homepage = \"https://fedorahosted.org/elfutils/\"\n\n url = \"https://sourceware.org/elfutils/ftp/0.168/elfutils-0.168.tar.bz2\"\n list_url = \"https://sourceware.org/elfutils/ftp\"\n list_depth = 1\n\n version('0.168', '52adfa40758d0d39e5d5c57689bf38d6')\n version('0.163', '77ce87f259987d2e54e4d87b86cbee41', preferred=True)\n\n provides('elf@1')\n\n def configure_args(self):\n return ['--enable-maintainer-mode']\n", "path": "var/spack/repos/builtin/packages/elfutils/package.py"}], "after_files": [{"content": "##############################################################################\n# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.\n# Produced at the Lawrence Livermore National Laboratory.\n#\n# This file is part of Spack.\n# Created by Todd Gamblin, [email protected], All rights reserved.\n# LLNL-CODE-647188\n#\n# For details, see https://github.com/llnl/spack\n# Please also see the NOTICE and LICENSE files for our notice and the LGPL.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License (as\n# published by the Free Software Foundation) version 2.1, February 1999.\n#\n# This program is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and\n# conditions of the GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public\n# License along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n##############################################################################\nfrom spack import *\n\n\nclass Elfutils(AutotoolsPackage):\n \"\"\"elfutils is a collection of various binary tools such as\n eu-objdump, eu-readelf, and other utilities that allow you to\n inspect and manipulate ELF files. Refer to Table 5.Tools Included\n in elfutils for Red Hat Developer for a complete list of binary\n tools that are distributed with the Red Hat Developer Toolset\n version of elfutils.\"\"\"\n\n homepage = \"https://fedorahosted.org/elfutils/\"\n\n url = \"https://sourceware.org/elfutils/ftp/0.168/elfutils-0.168.tar.bz2\"\n list_url = \"https://sourceware.org/elfutils/ftp\"\n list_depth = 1\n\n version('0.170', '03599aee98c9b726c7a732a2dd0245d5')\n version('0.168', '52adfa40758d0d39e5d5c57689bf38d6')\n version('0.163', '77ce87f259987d2e54e4d87b86cbee41', preferred=True)\n\n depends_on('flex', type='build')\n depends_on('bison', type='build')\n depends_on('gettext')\n\n provides('elf@1')\n\n def configure_args(self):\n # configure doesn't use LIBS correctly\n return [\n 'LDFLAGS=-L%s -lintl' % self.spec['gettext'].prefix.lib,\n '--enable-maintainer-mode']\n", "path": "var/spack/repos/builtin/packages/elfutils/package.py"}]} | 1,635 | 413 |
gh_patches_debug_32608 | rasdani/github-patches | git_diff | ansible-collections__amazon.aws-2022 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
The cloudwatchlogs_log_group_info module generates throttling exceptions
### Summary
When running the amazon.aws.cloudwatchlogs_log_group_info module on AWS accounts having more than (roughly) 50 log groups we get ThrottlingExceptions once every 20 calls or so. I noticed that both the describe and the list-tags boto calls in the cloudwatchlogs_log_group_info module have no throttling handling configured and use the default throttling handling policy, ie. none.
### Issue Type
Bug Report
### Component Name
amazon.aws.cloudwatchlogs_log_group_info
### Ansible Version
```console (paste below)
ansible [core 2.13.13]
config file = None
configured module search path = ['/home/rundeck/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /usr/local/lib/python3.8/dist-packages/ansible
ansible collection location = /home/rundeck/.ansible/collections:/usr/share/ansible/collections
executable location = /usr/local/bin/ansible
python version = 3.8.10 (default, Nov 22 2023, 10:22:35) [GCC 9.4.0]
jinja version = 3.1.3
libyaml = True
```
### Collection Versions
```console (paste below)
Collection Version
--------------------- -------
amazon.aws 7.3.0
community.general 8.3.0
nutanix.ncp 1.7.0
servicenow.servicenow 1.0.6
# /usr/local/lib/python3.8/dist-packages/ansible_collections
Collection Version
----------------------------- -------
amazon.aws 3.5.0
ansible.netcommon 3.1.3
ansible.posix 1.4.0
ansible.utils 2.8.0
ansible.windows 1.12.0
arista.eos 5.0.1
awx.awx 21.10.0
azure.azcollection 1.14.0
check_point.mgmt 2.3.0
chocolatey.chocolatey 1.3.1
cisco.aci 2.3.0
cisco.asa 3.1.0
cisco.dnac 6.6.1
cisco.intersight 1.0.22
cisco.ios 3.3.2
cisco.iosxr 3.3.1
cisco.ise 2.5.9
cisco.meraki 2.13.0
cisco.mso 2.1.0
cisco.nso 1.0.3
cisco.nxos 3.2.0
cisco.ucs 1.8.0
cloud.common 2.1.2
cloudscale_ch.cloud 2.2.3
community.aws 3.6.0
[...]
```
### AWS SDK versions
```console (paste below)
WARNING: Package(s) not found: boto
Name: boto3
Version: 1.34.45
Summary: The AWS SDK for Python
Home-page: https://github.com/boto/boto3
Author: Amazon Web Services
Author-email: None
License: Apache License 2.0
Location: /usr/local/lib/python3.8/dist-packages
Requires: botocore, jmespath, s3transfer
Required-by:
---
Name: botocore
Version: 1.34.45
Summary: Low-level, data-driven core of boto 3.
Home-page: https://github.com/boto/botocore
Author: Amazon Web Services
Author-email: None
License: Apache License 2.0
Location: /usr/local/lib/python3.8/dist-packages
Requires: python-dateutil, jmespath, urllib3
Required-by: s3transfer, boto3, awscli
```
### Configuration
```console (paste below)
(no Ansible configuration)
```
### OS / Environment
NAME="Ubuntu"
VERSION="20.04.6 LTS (Focal Fossa)"
ID=ubuntu
ID_LIKE=debian
PRETTY_NAME="Ubuntu 20.04.6 LTS"
### Steps to Reproduce
```yaml
- name: "Role based get all log groups in {{ selected_region }}"
amazon.aws.cloudwatchlogs_log_group_info:
region: "{{ selected_region }}"
aws_access_key: "{{ aws_access_key }}"
aws_secret_key: "{{ aws_secret_key }}"
aws_session_token: "{{ aws_session_token }}"
log_group_name: "{{ log_group_prefix }}"
register: log_groups
```
### Expected Results
Should get all log group information and tags without error. Optionally, allow us to add an option to skip getting the tags for each log group would allow engineers to work around this issue.
### Actual Results
```console (paste below)
fatal: [127.0.0.1]: FAILED! => {"boto3_version": "1.34.45", "botocore_version": "1.34.45", "changed": false, "error": {"code": "ThrottlingException", "message": "Rate exceeded"}, "msg": "Unable to describe tags for log group /aws/codebuild/tankmaintenanceplanning-pipeline-tsa: An error occurred (ThrottlingException) when calling the ListTagsLogGroup operation (reached max retries: 4): Rate exceeded"
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `plugins/modules/cloudwatchlogs_log_group_info.py`
Content:
```
1 #!/usr/bin/python
2 # -*- coding: utf-8 -*-
3
4 # Copyright: Ansible Project
5 # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
6
7 DOCUMENTATION = r"""
8 ---
9 module: cloudwatchlogs_log_group_info
10 version_added: 5.0.0
11 short_description: Get information about log_group in CloudWatchLogs
12 description:
13 - Lists the specified log groups. You can list all your log groups or filter the results by prefix.
14 - This module was originally added to C(community.aws) in release 1.0.0.
15 author:
16 - Willian Ricardo (@willricardo) <[email protected]>
17 options:
18 log_group_name:
19 description:
20 - The name or prefix of the log group to filter by.
21 type: str
22 extends_documentation_fragment:
23 - amazon.aws.common.modules
24 - amazon.aws.region.modules
25 - amazon.aws.boto3
26 """
27
28 EXAMPLES = r"""
29 # Note: These examples do not set authentication details, see the AWS Guide for details.
30 - amazon.aws.cloudwatchlogs_log_group_info:
31 log_group_name: test-log-group
32 """
33
34 RETURN = r"""
35 log_groups:
36 description: Return the list of complex objects representing log groups
37 returned: success
38 type: complex
39 contains:
40 log_group_name:
41 description: The name of the log group.
42 returned: always
43 type: str
44 creation_time:
45 description: The creation time of the log group.
46 returned: always
47 type: int
48 retention_in_days:
49 description: The number of days to retain the log events in the specified log group.
50 returned: always
51 type: int
52 metric_filter_count:
53 description: The number of metric filters.
54 returned: always
55 type: int
56 arn:
57 description: The Amazon Resource Name (ARN) of the log group.
58 returned: always
59 type: str
60 stored_bytes:
61 description: The number of bytes stored.
62 returned: always
63 type: str
64 kms_key_id:
65 description: The Amazon Resource Name (ARN) of the CMK to use when encrypting log data.
66 returned: always
67 type: str
68 tags:
69 description: A dictionary representing the tags on the log group.
70 returned: always
71 type: dict
72 version_added: 4.0.0
73 version_added_collection: community.aws
74 """
75
76 try:
77 import botocore
78 except ImportError:
79 pass # Handled by AnsibleAWSModule
80
81 from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
82
83 from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code
84 from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule
85
86
87 def describe_log_group(client, log_group_name, module):
88 params = {}
89 if log_group_name:
90 params["logGroupNamePrefix"] = log_group_name
91 try:
92 paginator = client.get_paginator("describe_log_groups")
93 desc_log_group = paginator.paginate(**params).build_full_result()
94 except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
95 module.fail_json_aws(e, msg=f"Unable to describe log group {log_group_name}")
96
97 for log_group in desc_log_group["logGroups"]:
98 log_group_name = log_group["logGroupName"]
99 try:
100 tags = client.list_tags_log_group(logGroupName=log_group_name)
101 except is_boto3_error_code("AccessDeniedException"):
102 tags = {}
103 module.warn(f"Permission denied listing tags for log group {log_group_name}")
104 except (
105 botocore.exceptions.ClientError,
106 botocore.exceptions.BotoCoreError,
107 ) as e: # pylint: disable=duplicate-except
108 module.fail_json_aws(e, msg=f"Unable to describe tags for log group {log_group_name}")
109 log_group["tags"] = tags.get("tags", {})
110
111 return desc_log_group
112
113
114 def main():
115 argument_spec = dict(
116 log_group_name=dict(),
117 )
118
119 module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
120
121 try:
122 logs = module.client("logs")
123 except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
124 module.fail_json_aws(e, msg="Failed to connect to AWS")
125
126 desc_log_group = describe_log_group(client=logs, log_group_name=module.params["log_group_name"], module=module)
127 final_log_group_snake = []
128
129 for log_group in desc_log_group["logGroups"]:
130 final_log_group_snake.append(camel_dict_to_snake_dict(log_group, ignore_list=["tags"]))
131
132 desc_log_group_result = dict(changed=False, log_groups=final_log_group_snake)
133 module.exit_json(**desc_log_group_result)
134
135
136 if __name__ == "__main__":
137 main()
138
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/plugins/modules/cloudwatchlogs_log_group_info.py b/plugins/modules/cloudwatchlogs_log_group_info.py
--- a/plugins/modules/cloudwatchlogs_log_group_info.py
+++ b/plugins/modules/cloudwatchlogs_log_group_info.py
@@ -82,6 +82,18 @@
from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code
from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
+
+
[email protected]_backoff()
+def list_tags_log_group_with_backoff(client, log_group_name):
+ return client.list_tags_log_group(logGroupName=log_group_name)
+
+
[email protected]_backoff()
+def describe_log_groups_with_backoff(client, **kwargs):
+ paginator = client.get_paginator("describe_log_groups")
+ return paginator.paginate(**kwargs).build_full_result()
def describe_log_group(client, log_group_name, module):
@@ -89,15 +101,14 @@
if log_group_name:
params["logGroupNamePrefix"] = log_group_name
try:
- paginator = client.get_paginator("describe_log_groups")
- desc_log_group = paginator.paginate(**params).build_full_result()
+ desc_log_group = describe_log_groups_with_backoff(client, **params)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg=f"Unable to describe log group {log_group_name}")
for log_group in desc_log_group["logGroups"]:
log_group_name = log_group["logGroupName"]
try:
- tags = client.list_tags_log_group(logGroupName=log_group_name)
+ tags = list_tags_log_group_with_backoff(client, log_group_name)
except is_boto3_error_code("AccessDeniedException"):
tags = {}
module.warn(f"Permission denied listing tags for log group {log_group_name}")
| {"golden_diff": "diff --git a/plugins/modules/cloudwatchlogs_log_group_info.py b/plugins/modules/cloudwatchlogs_log_group_info.py\n--- a/plugins/modules/cloudwatchlogs_log_group_info.py\n+++ b/plugins/modules/cloudwatchlogs_log_group_info.py\n@@ -82,6 +82,18 @@\n \n from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code\n from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule\n+from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry\n+\n+\[email protected]_backoff()\n+def list_tags_log_group_with_backoff(client, log_group_name):\n+ return client.list_tags_log_group(logGroupName=log_group_name)\n+\n+\[email protected]_backoff()\n+def describe_log_groups_with_backoff(client, **kwargs):\n+ paginator = client.get_paginator(\"describe_log_groups\")\n+ return paginator.paginate(**kwargs).build_full_result()\n \n \n def describe_log_group(client, log_group_name, module):\n@@ -89,15 +101,14 @@\n if log_group_name:\n params[\"logGroupNamePrefix\"] = log_group_name\n try:\n- paginator = client.get_paginator(\"describe_log_groups\")\n- desc_log_group = paginator.paginate(**params).build_full_result()\n+ desc_log_group = describe_log_groups_with_backoff(client, **params)\n except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:\n module.fail_json_aws(e, msg=f\"Unable to describe log group {log_group_name}\")\n \n for log_group in desc_log_group[\"logGroups\"]:\n log_group_name = log_group[\"logGroupName\"]\n try:\n- tags = client.list_tags_log_group(logGroupName=log_group_name)\n+ tags = list_tags_log_group_with_backoff(client, log_group_name)\n except is_boto3_error_code(\"AccessDeniedException\"):\n tags = {}\n module.warn(f\"Permission denied listing tags for log group {log_group_name}\")\n", "issue": "The cloudwatchlogs_log_group_info module generates throttling exceptions\n### Summary\n\nWhen running the amazon.aws.cloudwatchlogs_log_group_info module on AWS accounts having more than (roughly) 50 log groups we get ThrottlingExceptions once every 20 calls or so. I noticed that both the describe and the list-tags boto calls in the cloudwatchlogs_log_group_info module have no throttling handling configured and use the default throttling handling policy, ie. none. \n\n### Issue Type\n\nBug Report\n\n### Component Name\n\namazon.aws.cloudwatchlogs_log_group_info\n\n### Ansible Version\n\n```console (paste below)\r\nansible [core 2.13.13]\r\n config file = None\r\n configured module search path = ['/home/rundeck/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']\r\n ansible python module location = /usr/local/lib/python3.8/dist-packages/ansible\r\n ansible collection location = /home/rundeck/.ansible/collections:/usr/share/ansible/collections\r\n executable location = /usr/local/bin/ansible\r\n python version = 3.8.10 (default, Nov 22 2023, 10:22:35) [GCC 9.4.0]\r\n jinja version = 3.1.3\r\n libyaml = True\r\n\r\n```\r\n\n\n### Collection Versions\n\n```console (paste below)\r\nCollection Version\r\n--------------------- -------\r\namazon.aws 7.3.0\r\ncommunity.general 8.3.0\r\nnutanix.ncp 1.7.0\r\nservicenow.servicenow 1.0.6\r\n\r\n# /usr/local/lib/python3.8/dist-packages/ansible_collections\r\nCollection Version\r\n----------------------------- -------\r\namazon.aws 3.5.0\r\nansible.netcommon 3.1.3\r\nansible.posix 1.4.0\r\nansible.utils 2.8.0\r\nansible.windows 1.12.0\r\narista.eos 5.0.1\r\nawx.awx 21.10.0\r\nazure.azcollection 1.14.0\r\ncheck_point.mgmt 2.3.0\r\nchocolatey.chocolatey 1.3.1\r\ncisco.aci 2.3.0\r\ncisco.asa 3.1.0\r\ncisco.dnac 6.6.1\r\ncisco.intersight 1.0.22\r\ncisco.ios 3.3.2\r\ncisco.iosxr 3.3.1\r\ncisco.ise 2.5.9\r\ncisco.meraki 2.13.0\r\ncisco.mso 2.1.0\r\ncisco.nso 1.0.3\r\ncisco.nxos 3.2.0\r\ncisco.ucs 1.8.0\r\ncloud.common 2.1.2\r\ncloudscale_ch.cloud 2.2.3\r\ncommunity.aws 3.6.0\r\n[...]\r\n```\r\n\n\n### AWS SDK versions\n\n```console (paste below)\r\nWARNING: Package(s) not found: boto\r\nName: boto3\r\nVersion: 1.34.45\r\nSummary: The AWS SDK for Python\r\nHome-page: https://github.com/boto/boto3\r\nAuthor: Amazon Web Services\r\nAuthor-email: None\r\nLicense: Apache License 2.0\r\nLocation: /usr/local/lib/python3.8/dist-packages\r\nRequires: botocore, jmespath, s3transfer\r\nRequired-by:\r\n---\r\nName: botocore\r\nVersion: 1.34.45\r\nSummary: Low-level, data-driven core of boto 3.\r\nHome-page: https://github.com/boto/botocore\r\nAuthor: Amazon Web Services\r\nAuthor-email: None\r\nLicense: Apache License 2.0\r\nLocation: /usr/local/lib/python3.8/dist-packages\r\nRequires: python-dateutil, jmespath, urllib3\r\nRequired-by: s3transfer, boto3, awscli\r\n```\r\n\n\n### Configuration\n\n```console (paste below)\r\n(no Ansible configuration)\r\n```\r\n\n\n### OS / Environment\n\nNAME=\"Ubuntu\"\r\nVERSION=\"20.04.6 LTS (Focal Fossa)\"\r\nID=ubuntu\r\nID_LIKE=debian\r\nPRETTY_NAME=\"Ubuntu 20.04.6 LTS\"\n\n### Steps to Reproduce\n\n```yaml \r\n- name: \"Role based get all log groups in {{ selected_region }}\"\r\n amazon.aws.cloudwatchlogs_log_group_info:\r\n region: \"{{ selected_region }}\"\r\n aws_access_key: \"{{ aws_access_key }}\"\r\n aws_secret_key: \"{{ aws_secret_key }}\"\r\n aws_session_token: \"{{ aws_session_token }}\"\r\n log_group_name: \"{{ log_group_prefix }}\"\r\n register: log_groups\r\n```\r\n\n\n### Expected Results\n\nShould get all log group information and tags without error. Optionally, allow us to add an option to skip getting the tags for each log group would allow engineers to work around this issue. \n\n### Actual Results\n\n```console (paste below)\r\nfatal: [127.0.0.1]: FAILED! => {\"boto3_version\": \"1.34.45\", \"botocore_version\": \"1.34.45\", \"changed\": false, \"error\": {\"code\": \"ThrottlingException\", \"message\": \"Rate exceeded\"}, \"msg\": \"Unable to describe tags for log group /aws/codebuild/tankmaintenanceplanning-pipeline-tsa: An error occurred (ThrottlingException) when calling the ListTagsLogGroup operation (reached max retries: 4): Rate exceeded\"\r\n```\r\n\n\n### Code of Conduct\n\n- [X] I agree to follow the Ansible Code of Conduct\n", "before_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# Copyright: Ansible Project\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nDOCUMENTATION = r\"\"\"\n---\nmodule: cloudwatchlogs_log_group_info\nversion_added: 5.0.0\nshort_description: Get information about log_group in CloudWatchLogs\ndescription:\n - Lists the specified log groups. You can list all your log groups or filter the results by prefix.\n - This module was originally added to C(community.aws) in release 1.0.0.\nauthor:\n - Willian Ricardo (@willricardo) <[email protected]>\noptions:\n log_group_name:\n description:\n - The name or prefix of the log group to filter by.\n type: str\nextends_documentation_fragment:\n - amazon.aws.common.modules\n - amazon.aws.region.modules\n - amazon.aws.boto3\n\"\"\"\n\nEXAMPLES = r\"\"\"\n# Note: These examples do not set authentication details, see the AWS Guide for details.\n- amazon.aws.cloudwatchlogs_log_group_info:\n log_group_name: test-log-group\n\"\"\"\n\nRETURN = r\"\"\"\nlog_groups:\n description: Return the list of complex objects representing log groups\n returned: success\n type: complex\n contains:\n log_group_name:\n description: The name of the log group.\n returned: always\n type: str\n creation_time:\n description: The creation time of the log group.\n returned: always\n type: int\n retention_in_days:\n description: The number of days to retain the log events in the specified log group.\n returned: always\n type: int\n metric_filter_count:\n description: The number of metric filters.\n returned: always\n type: int\n arn:\n description: The Amazon Resource Name (ARN) of the log group.\n returned: always\n type: str\n stored_bytes:\n description: The number of bytes stored.\n returned: always\n type: str\n kms_key_id:\n description: The Amazon Resource Name (ARN) of the CMK to use when encrypting log data.\n returned: always\n type: str\n tags:\n description: A dictionary representing the tags on the log group.\n returned: always\n type: dict\n version_added: 4.0.0\n version_added_collection: community.aws\n\"\"\"\n\ntry:\n import botocore\nexcept ImportError:\n pass # Handled by AnsibleAWSModule\n\nfrom ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict\n\nfrom ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code\nfrom ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule\n\n\ndef describe_log_group(client, log_group_name, module):\n params = {}\n if log_group_name:\n params[\"logGroupNamePrefix\"] = log_group_name\n try:\n paginator = client.get_paginator(\"describe_log_groups\")\n desc_log_group = paginator.paginate(**params).build_full_result()\n except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:\n module.fail_json_aws(e, msg=f\"Unable to describe log group {log_group_name}\")\n\n for log_group in desc_log_group[\"logGroups\"]:\n log_group_name = log_group[\"logGroupName\"]\n try:\n tags = client.list_tags_log_group(logGroupName=log_group_name)\n except is_boto3_error_code(\"AccessDeniedException\"):\n tags = {}\n module.warn(f\"Permission denied listing tags for log group {log_group_name}\")\n except (\n botocore.exceptions.ClientError,\n botocore.exceptions.BotoCoreError,\n ) as e: # pylint: disable=duplicate-except\n module.fail_json_aws(e, msg=f\"Unable to describe tags for log group {log_group_name}\")\n log_group[\"tags\"] = tags.get(\"tags\", {})\n\n return desc_log_group\n\n\ndef main():\n argument_spec = dict(\n log_group_name=dict(),\n )\n\n module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)\n\n try:\n logs = module.client(\"logs\")\n except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:\n module.fail_json_aws(e, msg=\"Failed to connect to AWS\")\n\n desc_log_group = describe_log_group(client=logs, log_group_name=module.params[\"log_group_name\"], module=module)\n final_log_group_snake = []\n\n for log_group in desc_log_group[\"logGroups\"]:\n final_log_group_snake.append(camel_dict_to_snake_dict(log_group, ignore_list=[\"tags\"]))\n\n desc_log_group_result = dict(changed=False, log_groups=final_log_group_snake)\n module.exit_json(**desc_log_group_result)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "plugins/modules/cloudwatchlogs_log_group_info.py"}], "after_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# Copyright: Ansible Project\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nDOCUMENTATION = r\"\"\"\n---\nmodule: cloudwatchlogs_log_group_info\nversion_added: 5.0.0\nshort_description: Get information about log_group in CloudWatchLogs\ndescription:\n - Lists the specified log groups. You can list all your log groups or filter the results by prefix.\n - This module was originally added to C(community.aws) in release 1.0.0.\nauthor:\n - Willian Ricardo (@willricardo) <[email protected]>\noptions:\n log_group_name:\n description:\n - The name or prefix of the log group to filter by.\n type: str\nextends_documentation_fragment:\n - amazon.aws.common.modules\n - amazon.aws.region.modules\n - amazon.aws.boto3\n\"\"\"\n\nEXAMPLES = r\"\"\"\n# Note: These examples do not set authentication details, see the AWS Guide for details.\n- amazon.aws.cloudwatchlogs_log_group_info:\n log_group_name: test-log-group\n\"\"\"\n\nRETURN = r\"\"\"\nlog_groups:\n description: Return the list of complex objects representing log groups\n returned: success\n type: complex\n contains:\n log_group_name:\n description: The name of the log group.\n returned: always\n type: str\n creation_time:\n description: The creation time of the log group.\n returned: always\n type: int\n retention_in_days:\n description: The number of days to retain the log events in the specified log group.\n returned: always\n type: int\n metric_filter_count:\n description: The number of metric filters.\n returned: always\n type: int\n arn:\n description: The Amazon Resource Name (ARN) of the log group.\n returned: always\n type: str\n stored_bytes:\n description: The number of bytes stored.\n returned: always\n type: str\n kms_key_id:\n description: The Amazon Resource Name (ARN) of the CMK to use when encrypting log data.\n returned: always\n type: str\n tags:\n description: A dictionary representing the tags on the log group.\n returned: always\n type: dict\n version_added: 4.0.0\n version_added_collection: community.aws\n\"\"\"\n\ntry:\n import botocore\nexcept ImportError:\n pass # Handled by AnsibleAWSModule\n\nfrom ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict\n\nfrom ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code\nfrom ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule\nfrom ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry\n\n\[email protected]_backoff()\ndef list_tags_log_group_with_backoff(client, log_group_name):\n return client.list_tags_log_group(logGroupName=log_group_name)\n\n\[email protected]_backoff()\ndef describe_log_groups_with_backoff(client, **kwargs):\n paginator = client.get_paginator(\"describe_log_groups\")\n return paginator.paginate(**kwargs).build_full_result()\n\n\ndef describe_log_group(client, log_group_name, module):\n params = {}\n if log_group_name:\n params[\"logGroupNamePrefix\"] = log_group_name\n try:\n desc_log_group = describe_log_groups_with_backoff(client, **params)\n except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:\n module.fail_json_aws(e, msg=f\"Unable to describe log group {log_group_name}\")\n\n for log_group in desc_log_group[\"logGroups\"]:\n log_group_name = log_group[\"logGroupName\"]\n try:\n tags = list_tags_log_group_with_backoff(client, log_group_name)\n except is_boto3_error_code(\"AccessDeniedException\"):\n tags = {}\n module.warn(f\"Permission denied listing tags for log group {log_group_name}\")\n except (\n botocore.exceptions.ClientError,\n botocore.exceptions.BotoCoreError,\n ) as e: # pylint: disable=duplicate-except\n module.fail_json_aws(e, msg=f\"Unable to describe tags for log group {log_group_name}\")\n log_group[\"tags\"] = tags.get(\"tags\", {})\n\n return desc_log_group\n\n\ndef main():\n argument_spec = dict(\n log_group_name=dict(),\n )\n\n module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)\n\n try:\n logs = module.client(\"logs\")\n except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:\n module.fail_json_aws(e, msg=\"Failed to connect to AWS\")\n\n desc_log_group = describe_log_group(client=logs, log_group_name=module.params[\"log_group_name\"], module=module)\n final_log_group_snake = []\n\n for log_group in desc_log_group[\"logGroups\"]:\n final_log_group_snake.append(camel_dict_to_snake_dict(log_group, ignore_list=[\"tags\"]))\n\n desc_log_group_result = dict(changed=False, log_groups=final_log_group_snake)\n module.exit_json(**desc_log_group_result)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "plugins/modules/cloudwatchlogs_log_group_info.py"}]} | 2,902 | 432 |
gh_patches_debug_11891 | rasdani/github-patches | git_diff | saleor__saleor-2368 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Cannot place order in saleor demo's storefront
### What I'm trying to achieve
Place an order on the demo store to reproduce another bug. :wink:
### Steps to reproduce the problem
1. Create a cart with an item;
2. Follow the checkout until the summary page;
3. Try to hit "Order & Pay";
4. A server error should occur.
**System information**
```
Host: demo.getsaleor.com
User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:60.0) Gecko/20100101 Firefox/60.0
Accept-Language: en,en-GB;q=0.8,en-US
Accept-Encoding: gzip, deflate, br
Referer: https://demo.getsaleor.com/en/checkout/summary/
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `saleor/account/utils.py`
Content:
```
1 from ..checkout import AddressType
2 from ..core.demo_obfuscators import obfuscate_address
3
4
5 def store_user_address(user, address, address_type):
6 """Add address to user address book and set as default one."""
7 address, _ = user.addresses.get_or_create(**address.as_data())
8
9 # DEMO: obfuscate user address
10 address = obfuscate_address(address)
11
12 if address_type == AddressType.BILLING:
13 if not user.default_billing_address:
14 user.default_billing_address = address
15 user.save(update_fields=['default_billing_address'])
16 elif address_type == AddressType.SHIPPING:
17 if not user.default_shipping_address:
18 user.default_shipping_address = address
19 user.save(update_fields=['default_shipping_address'])
20
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/saleor/account/utils.py b/saleor/account/utils.py
--- a/saleor/account/utils.py
+++ b/saleor/account/utils.py
@@ -4,11 +4,11 @@
def store_user_address(user, address, address_type):
"""Add address to user address book and set as default one."""
- address, _ = user.addresses.get_or_create(**address.as_data())
-
# DEMO: obfuscate user address
address = obfuscate_address(address)
+ address, _ = user.addresses.get_or_create(**address.as_data())
+
if address_type == AddressType.BILLING:
if not user.default_billing_address:
user.default_billing_address = address
| {"golden_diff": "diff --git a/saleor/account/utils.py b/saleor/account/utils.py\n--- a/saleor/account/utils.py\n+++ b/saleor/account/utils.py\n@@ -4,11 +4,11 @@\n \n def store_user_address(user, address, address_type):\n \"\"\"Add address to user address book and set as default one.\"\"\"\n- address, _ = user.addresses.get_or_create(**address.as_data())\n-\n # DEMO: obfuscate user address\n address = obfuscate_address(address)\n \n+ address, _ = user.addresses.get_or_create(**address.as_data())\n+\n if address_type == AddressType.BILLING:\n if not user.default_billing_address:\n user.default_billing_address = address\n", "issue": "Cannot place order in saleor demo's storefront\n### What I'm trying to achieve\r\nPlace an order on the demo store to reproduce another bug. :wink:\r\n\r\n### Steps to reproduce the problem\r\n1. Create a cart with an item;\r\n2. Follow the checkout until the summary page;\r\n3. Try to hit \"Order & Pay\";\r\n4. A server error should occur.\r\n\r\n**System information**\r\n```\r\nHost: demo.getsaleor.com\r\nUser-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:60.0) Gecko/20100101 Firefox/60.0\r\nAccept-Language: en,en-GB;q=0.8,en-US\r\nAccept-Encoding: gzip, deflate, br\r\nReferer: https://demo.getsaleor.com/en/checkout/summary/\r\n```\n", "before_files": [{"content": "from ..checkout import AddressType\nfrom ..core.demo_obfuscators import obfuscate_address\n\n\ndef store_user_address(user, address, address_type):\n \"\"\"Add address to user address book and set as default one.\"\"\"\n address, _ = user.addresses.get_or_create(**address.as_data())\n\n # DEMO: obfuscate user address\n address = obfuscate_address(address)\n\n if address_type == AddressType.BILLING:\n if not user.default_billing_address:\n user.default_billing_address = address\n user.save(update_fields=['default_billing_address'])\n elif address_type == AddressType.SHIPPING:\n if not user.default_shipping_address:\n user.default_shipping_address = address\n user.save(update_fields=['default_shipping_address'])\n", "path": "saleor/account/utils.py"}], "after_files": [{"content": "from ..checkout import AddressType\nfrom ..core.demo_obfuscators import obfuscate_address\n\n\ndef store_user_address(user, address, address_type):\n \"\"\"Add address to user address book and set as default one.\"\"\"\n # DEMO: obfuscate user address\n address = obfuscate_address(address)\n\n address, _ = user.addresses.get_or_create(**address.as_data())\n\n if address_type == AddressType.BILLING:\n if not user.default_billing_address:\n user.default_billing_address = address\n user.save(update_fields=['default_billing_address'])\n elif address_type == AddressType.SHIPPING:\n if not user.default_shipping_address:\n user.default_shipping_address = address\n user.save(update_fields=['default_shipping_address'])\n", "path": "saleor/account/utils.py"}]} | 627 | 158 |
gh_patches_debug_16002 | rasdani/github-patches | git_diff | praw-dev__praw-910 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Accessing emoji causes recusion limit error
Given a `client_id`, `client_secret`, and `user_agent` a simple access of a the lazily loaded properties (`emoji`, `fullname`) on `subreddit` causes a problem. Code below should recreate that problem easily.
#!/usr/bin/env python3.6
def recreate_problem(client_id, client_secret, user_agent):
import praw
reddit_name = 'sfwpornnetwork'
reddit = praw.Reddit(client_id=client_id, client_secret=client_secret, user_agent=user_agent)
subreddit = reddit.subreddit(reddit_name)
display(subreddit)
def display(obj):
for d in dir(obj):
try:
v = getattr(obj, d)
print(f' {d}: {v}', flush=True)
pass
except Exception as e:
v = str(e)
print(f' ERROR: {d}: {v}', flush=True)
if __name__ == '__main__':
import sys
from pathlib import Path
if len(sys.argv) != 4:
print(f'Usage: {Path(__file__).stem} <client_id> <client_secret> <user_agent>')
sys.exit(1)
client_id = sys.argv[1]
client_secret = sys.argv[2]
user_agent = sys.argv[3]
recreate_problem(client_id, client_secret, user_agent)
Accessing emoji causes recusion limit error
Given a `client_id`, `client_secret`, and `user_agent` a simple access of a the lazily loaded properties (`emoji`, `fullname`) on `subreddit` causes a problem. Code below should recreate that problem easily.
#!/usr/bin/env python3.6
def recreate_problem(client_id, client_secret, user_agent):
import praw
reddit_name = 'sfwpornnetwork'
reddit = praw.Reddit(client_id=client_id, client_secret=client_secret, user_agent=user_agent)
subreddit = reddit.subreddit(reddit_name)
display(subreddit)
def display(obj):
for d in dir(obj):
try:
v = getattr(obj, d)
print(f' {d}: {v}', flush=True)
pass
except Exception as e:
v = str(e)
print(f' ERROR: {d}: {v}', flush=True)
if __name__ == '__main__':
import sys
from pathlib import Path
if len(sys.argv) != 4:
print(f'Usage: {Path(__file__).stem} <client_id> <client_secret> <user_agent>')
sys.exit(1)
client_id = sys.argv[1]
client_secret = sys.argv[2]
user_agent = sys.argv[3]
recreate_problem(client_id, client_secret, user_agent)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `praw/models/reddit/emoji.py`
Content:
```
1 """Provide the Emoji class."""
2 import os
3
4 from ...const import API_PATH
5 from ...exceptions import ClientException
6 from .base import RedditBase
7
8
9 class Emoji(RedditBase):
10 """An individual Emoji object."""
11
12 STR_FIELD = 'name'
13
14 def __eq__(self, other):
15 """Return whether the other instance equals the current."""
16 if isinstance(other, str):
17 return other == str(self)
18 return (isinstance(other, self.__class__) and
19 str(self) == str(other) and other.subreddit == self.subreddit)
20
21 def __hash__(self):
22 """Return the hash of the current instance."""
23 return (hash(self.__class__.__name__) ^ hash(str(self)) ^
24 hash(self.subreddit))
25
26 def __init__(self, reddit, subreddit, name, _data=None):
27 """Construct an instance of the Emoji object."""
28 self.name = name
29 self.subreddit = subreddit
30 super(Emoji, self).__init__(reddit, _data)
31
32 def _fetch(self):
33 for emoji in self.subreddit.emoji:
34 if emoji.name == self.name:
35 self.__dict__.update(emoji.__dict__)
36 self._fetched = True
37 return
38 raise ClientException('/r/{} does not have the emoji {}'
39 .format(self.subreddit, self.name))
40
41 def delete(self):
42 """Delete an emoji from this subreddit by Emoji.
43
44 To delete ``'test'`` as an emoji on the subreddit ``'praw_test'`` try:
45
46 .. code:: python
47
48 reddit.subreddit('praw_test').emoji['test'].delete()
49
50 """
51 url = API_PATH['emoji_delete'].format(
52 emoji_name=self.name, subreddit=self.subreddit)
53 self._reddit.request('DELETE', url)
54
55
56 class SubredditEmoji(RedditBase):
57 """Provides a set of functions to a Subreddit for emoji."""
58
59 __hash__ = RedditBase.__hash__
60
61 def __getitem__(self, name):
62 """Lazily return the Emoji for the subreddit named ``name``.
63
64 :param name: The name of the emoji
65
66 This method is to be used to fetch a specific emoji url, like so:
67
68 .. code:: python
69
70 emoji = reddit.subreddit('praw_test').emoji['test']
71 print(emoji)
72
73 """
74 return Emoji(self._reddit, self.subreddit, name)
75
76 def __init__(self, subreddit):
77 """Create a SubredditEmoji instance.
78
79 :param subreddit: The subreddit whose emoji are affected.
80
81 """
82 self.subreddit = subreddit
83 super(SubredditEmoji, self).__init__(subreddit._reddit, None)
84
85 def __iter__(self):
86 """Return a list of Emoji for the subreddit.
87
88 This method is to be used to discover all emoji for a subreddit:
89
90 .. code:: python
91
92 for emoji in reddit.subreddit('praw_test').emoji:
93 print(emoji)
94
95 """
96 response = self.subreddit._reddit.get(
97 API_PATH['emoji_list'].format(subreddit=self.subreddit))
98 for emoji_name, emoji_data in \
99 response[self.subreddit.fullname].items():
100 yield Emoji(self._reddit, self.subreddit, emoji_name,
101 _data=emoji_data)
102
103 def add(self, name, image_path):
104 """Add an emoji to this subreddit.
105
106 :param name: The name of the emoji
107 :param image_path: A path to a jpeg or png image.
108 :returns: The Emoji added.
109
110 To add ``'test'`` to the subreddit ``'praw_test'`` try:
111
112 .. code:: python
113
114 reddit.subreddit('praw_test').emoji.add('test','test.png')
115
116 """
117 data = {'filepath': os.path.basename(image_path),
118 'mimetype': 'image/jpeg'}
119 if image_path.lower().endswith('.png'):
120 data['mimetype'] = 'image/png'
121 url = API_PATH['emoji_lease'].format(subreddit=self.subreddit)
122
123 # until we learn otherwise, assume this request always succeeds
124 upload_lease = self._reddit.post(url, data=data)['s3UploadLease']
125 upload_data = {item['name']: item['value']
126 for item in upload_lease['fields']}
127 upload_url = 'https:{}'.format(upload_lease['action'])
128
129 with open(image_path, 'rb') as image:
130 response = self._reddit._core._requestor._http.post(
131 upload_url, data=upload_data, files={'file': image})
132 response.raise_for_status()
133
134 url = API_PATH['emoji_upload'].format(
135 subreddit=self.subreddit)
136 self._reddit.post(url,
137 data={'name': name, 's3_key': upload_data['key']})
138 return Emoji(self._reddit, self.subreddit, name)
139
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/praw/models/reddit/emoji.py b/praw/models/reddit/emoji.py
--- a/praw/models/reddit/emoji.py
+++ b/praw/models/reddit/emoji.py
@@ -53,11 +53,9 @@
self._reddit.request('DELETE', url)
-class SubredditEmoji(RedditBase):
+class SubredditEmoji(object):
"""Provides a set of functions to a Subreddit for emoji."""
- __hash__ = RedditBase.__hash__
-
def __getitem__(self, name):
"""Lazily return the Emoji for the subreddit named ``name``.
@@ -80,7 +78,7 @@
"""
self.subreddit = subreddit
- super(SubredditEmoji, self).__init__(subreddit._reddit, None)
+ self._reddit = subreddit._reddit
def __iter__(self):
"""Return a list of Emoji for the subreddit.
| {"golden_diff": "diff --git a/praw/models/reddit/emoji.py b/praw/models/reddit/emoji.py\n--- a/praw/models/reddit/emoji.py\n+++ b/praw/models/reddit/emoji.py\n@@ -53,11 +53,9 @@\n self._reddit.request('DELETE', url)\n \n \n-class SubredditEmoji(RedditBase):\n+class SubredditEmoji(object):\n \"\"\"Provides a set of functions to a Subreddit for emoji.\"\"\"\n \n- __hash__ = RedditBase.__hash__\n-\n def __getitem__(self, name):\n \"\"\"Lazily return the Emoji for the subreddit named ``name``.\n \n@@ -80,7 +78,7 @@\n \n \"\"\"\n self.subreddit = subreddit\n- super(SubredditEmoji, self).__init__(subreddit._reddit, None)\n+ self._reddit = subreddit._reddit\n \n def __iter__(self):\n \"\"\"Return a list of Emoji for the subreddit.\n", "issue": "Accessing emoji causes recusion limit error\nGiven a `client_id`, `client_secret`, and `user_agent` a simple access of a the lazily loaded properties (`emoji`, `fullname`) on `subreddit` causes a problem. Code below should recreate that problem easily.\r\n\r\n #!/usr/bin/env python3.6\r\n\r\n def recreate_problem(client_id, client_secret, user_agent):\r\n import praw\r\n\r\n reddit_name = 'sfwpornnetwork'\r\n reddit = praw.Reddit(client_id=client_id, client_secret=client_secret, user_agent=user_agent)\r\n subreddit = reddit.subreddit(reddit_name)\r\n display(subreddit)\r\n\r\n\r\n def display(obj):\r\n for d in dir(obj):\r\n try:\r\n v = getattr(obj, d)\r\n print(f' {d}: {v}', flush=True)\r\n pass\r\n except Exception as e:\r\n v = str(e)\r\n print(f' ERROR: {d}: {v}', flush=True)\r\n\r\n\r\n if __name__ == '__main__':\r\n import sys\r\n from pathlib import Path\r\n\r\n if len(sys.argv) != 4:\r\n print(f'Usage: {Path(__file__).stem} <client_id> <client_secret> <user_agent>')\r\n sys.exit(1)\r\n client_id = sys.argv[1]\r\n client_secret = sys.argv[2]\r\n user_agent = sys.argv[3]\r\n recreate_problem(client_id, client_secret, user_agent)\r\n\r\n\nAccessing emoji causes recusion limit error\nGiven a `client_id`, `client_secret`, and `user_agent` a simple access of a the lazily loaded properties (`emoji`, `fullname`) on `subreddit` causes a problem. Code below should recreate that problem easily.\r\n\r\n #!/usr/bin/env python3.6\r\n\r\n def recreate_problem(client_id, client_secret, user_agent):\r\n import praw\r\n\r\n reddit_name = 'sfwpornnetwork'\r\n reddit = praw.Reddit(client_id=client_id, client_secret=client_secret, user_agent=user_agent)\r\n subreddit = reddit.subreddit(reddit_name)\r\n display(subreddit)\r\n\r\n\r\n def display(obj):\r\n for d in dir(obj):\r\n try:\r\n v = getattr(obj, d)\r\n print(f' {d}: {v}', flush=True)\r\n pass\r\n except Exception as e:\r\n v = str(e)\r\n print(f' ERROR: {d}: {v}', flush=True)\r\n\r\n\r\n if __name__ == '__main__':\r\n import sys\r\n from pathlib import Path\r\n\r\n if len(sys.argv) != 4:\r\n print(f'Usage: {Path(__file__).stem} <client_id> <client_secret> <user_agent>')\r\n sys.exit(1)\r\n client_id = sys.argv[1]\r\n client_secret = sys.argv[2]\r\n user_agent = sys.argv[3]\r\n recreate_problem(client_id, client_secret, user_agent)\r\n\r\n\n", "before_files": [{"content": "\"\"\"Provide the Emoji class.\"\"\"\nimport os\n\nfrom ...const import API_PATH\nfrom ...exceptions import ClientException\nfrom .base import RedditBase\n\n\nclass Emoji(RedditBase):\n \"\"\"An individual Emoji object.\"\"\"\n\n STR_FIELD = 'name'\n\n def __eq__(self, other):\n \"\"\"Return whether the other instance equals the current.\"\"\"\n if isinstance(other, str):\n return other == str(self)\n return (isinstance(other, self.__class__) and\n str(self) == str(other) and other.subreddit == self.subreddit)\n\n def __hash__(self):\n \"\"\"Return the hash of the current instance.\"\"\"\n return (hash(self.__class__.__name__) ^ hash(str(self)) ^\n hash(self.subreddit))\n\n def __init__(self, reddit, subreddit, name, _data=None):\n \"\"\"Construct an instance of the Emoji object.\"\"\"\n self.name = name\n self.subreddit = subreddit\n super(Emoji, self).__init__(reddit, _data)\n\n def _fetch(self):\n for emoji in self.subreddit.emoji:\n if emoji.name == self.name:\n self.__dict__.update(emoji.__dict__)\n self._fetched = True\n return\n raise ClientException('/r/{} does not have the emoji {}'\n .format(self.subreddit, self.name))\n\n def delete(self):\n \"\"\"Delete an emoji from this subreddit by Emoji.\n\n To delete ``'test'`` as an emoji on the subreddit ``'praw_test'`` try:\n\n .. code:: python\n\n reddit.subreddit('praw_test').emoji['test'].delete()\n\n \"\"\"\n url = API_PATH['emoji_delete'].format(\n emoji_name=self.name, subreddit=self.subreddit)\n self._reddit.request('DELETE', url)\n\n\nclass SubredditEmoji(RedditBase):\n \"\"\"Provides a set of functions to a Subreddit for emoji.\"\"\"\n\n __hash__ = RedditBase.__hash__\n\n def __getitem__(self, name):\n \"\"\"Lazily return the Emoji for the subreddit named ``name``.\n\n :param name: The name of the emoji\n\n This method is to be used to fetch a specific emoji url, like so:\n\n .. code:: python\n\n emoji = reddit.subreddit('praw_test').emoji['test']\n print(emoji)\n\n \"\"\"\n return Emoji(self._reddit, self.subreddit, name)\n\n def __init__(self, subreddit):\n \"\"\"Create a SubredditEmoji instance.\n\n :param subreddit: The subreddit whose emoji are affected.\n\n \"\"\"\n self.subreddit = subreddit\n super(SubredditEmoji, self).__init__(subreddit._reddit, None)\n\n def __iter__(self):\n \"\"\"Return a list of Emoji for the subreddit.\n\n This method is to be used to discover all emoji for a subreddit:\n\n .. code:: python\n\n for emoji in reddit.subreddit('praw_test').emoji:\n print(emoji)\n\n \"\"\"\n response = self.subreddit._reddit.get(\n API_PATH['emoji_list'].format(subreddit=self.subreddit))\n for emoji_name, emoji_data in \\\n response[self.subreddit.fullname].items():\n yield Emoji(self._reddit, self.subreddit, emoji_name,\n _data=emoji_data)\n\n def add(self, name, image_path):\n \"\"\"Add an emoji to this subreddit.\n\n :param name: The name of the emoji\n :param image_path: A path to a jpeg or png image.\n :returns: The Emoji added.\n\n To add ``'test'`` to the subreddit ``'praw_test'`` try:\n\n .. code:: python\n\n reddit.subreddit('praw_test').emoji.add('test','test.png')\n\n \"\"\"\n data = {'filepath': os.path.basename(image_path),\n 'mimetype': 'image/jpeg'}\n if image_path.lower().endswith('.png'):\n data['mimetype'] = 'image/png'\n url = API_PATH['emoji_lease'].format(subreddit=self.subreddit)\n\n # until we learn otherwise, assume this request always succeeds\n upload_lease = self._reddit.post(url, data=data)['s3UploadLease']\n upload_data = {item['name']: item['value']\n for item in upload_lease['fields']}\n upload_url = 'https:{}'.format(upload_lease['action'])\n\n with open(image_path, 'rb') as image:\n response = self._reddit._core._requestor._http.post(\n upload_url, data=upload_data, files={'file': image})\n response.raise_for_status()\n\n url = API_PATH['emoji_upload'].format(\n subreddit=self.subreddit)\n self._reddit.post(url,\n data={'name': name, 's3_key': upload_data['key']})\n return Emoji(self._reddit, self.subreddit, name)\n", "path": "praw/models/reddit/emoji.py"}], "after_files": [{"content": "\"\"\"Provide the Emoji class.\"\"\"\nimport os\n\nfrom ...const import API_PATH\nfrom ...exceptions import ClientException\nfrom .base import RedditBase\n\n\nclass Emoji(RedditBase):\n \"\"\"An individual Emoji object.\"\"\"\n\n STR_FIELD = 'name'\n\n def __eq__(self, other):\n \"\"\"Return whether the other instance equals the current.\"\"\"\n if isinstance(other, str):\n return other == str(self)\n return (isinstance(other, self.__class__) and\n str(self) == str(other) and other.subreddit == self.subreddit)\n\n def __hash__(self):\n \"\"\"Return the hash of the current instance.\"\"\"\n return (hash(self.__class__.__name__) ^ hash(str(self)) ^\n hash(self.subreddit))\n\n def __init__(self, reddit, subreddit, name, _data=None):\n \"\"\"Construct an instance of the Emoji object.\"\"\"\n self.name = name\n self.subreddit = subreddit\n super(Emoji, self).__init__(reddit, _data)\n\n def _fetch(self):\n for emoji in self.subreddit.emoji:\n if emoji.name == self.name:\n self.__dict__.update(emoji.__dict__)\n self._fetched = True\n return\n raise ClientException('/r/{} does not have the emoji {}'\n .format(self.subreddit, self.name))\n\n def delete(self):\n \"\"\"Delete an emoji from this subreddit by Emoji.\n\n To delete ``'test'`` as an emoji on the subreddit ``'praw_test'`` try:\n\n .. code:: python\n\n reddit.subreddit('praw_test').emoji['test'].delete()\n\n \"\"\"\n url = API_PATH['emoji_delete'].format(\n emoji_name=self.name, subreddit=self.subreddit)\n self._reddit.request('DELETE', url)\n\n\nclass SubredditEmoji(object):\n \"\"\"Provides a set of functions to a Subreddit for emoji.\"\"\"\n\n def __getitem__(self, name):\n \"\"\"Lazily return the Emoji for the subreddit named ``name``.\n\n :param name: The name of the emoji\n\n This method is to be used to fetch a specific emoji url, like so:\n\n .. code:: python\n\n emoji = reddit.subreddit('praw_test').emoji['test']\n print(emoji)\n\n \"\"\"\n return Emoji(self._reddit, self.subreddit, name)\n\n def __init__(self, subreddit):\n \"\"\"Create a SubredditEmoji instance.\n\n :param subreddit: The subreddit whose emoji are affected.\n\n \"\"\"\n self.subreddit = subreddit\n self._reddit = subreddit._reddit\n\n def __iter__(self):\n \"\"\"Return a list of Emoji for the subreddit.\n\n This method is to be used to discover all emoji for a subreddit:\n\n .. code:: python\n\n for emoji in reddit.subreddit('praw_test').emoji:\n print(emoji)\n\n \"\"\"\n response = self.subreddit._reddit.get(\n API_PATH['emoji_list'].format(subreddit=self.subreddit))\n for emoji_name, emoji_data in \\\n response[self.subreddit.fullname].items():\n yield Emoji(self._reddit, self.subreddit, emoji_name,\n _data=emoji_data)\n\n def add(self, name, image_path):\n \"\"\"Add an emoji to this subreddit.\n\n :param name: The name of the emoji\n :param image_path: A path to a jpeg or png image.\n :returns: The Emoji added.\n\n To add ``'test'`` to the subreddit ``'praw_test'`` try:\n\n .. code:: python\n\n reddit.subreddit('praw_test').emoji.add('test','test.png')\n\n \"\"\"\n data = {'filepath': os.path.basename(image_path),\n 'mimetype': 'image/jpeg'}\n if image_path.lower().endswith('.png'):\n data['mimetype'] = 'image/png'\n url = API_PATH['emoji_lease'].format(subreddit=self.subreddit)\n\n # until we learn otherwise, assume this request always succeeds\n upload_lease = self._reddit.post(url, data=data)['s3UploadLease']\n upload_data = {item['name']: item['value']\n for item in upload_lease['fields']}\n upload_url = 'https:{}'.format(upload_lease['action'])\n\n with open(image_path, 'rb') as image:\n response = self._reddit._core._requestor._http.post(\n upload_url, data=upload_data, files={'file': image})\n response.raise_for_status()\n\n url = API_PATH['emoji_upload'].format(\n subreddit=self.subreddit)\n self._reddit.post(url,\n data={'name': name, 's3_key': upload_data['key']})\n return Emoji(self._reddit, self.subreddit, name)\n", "path": "praw/models/reddit/emoji.py"}]} | 2,227 | 205 |
gh_patches_debug_12953 | rasdani/github-patches | git_diff | OpenCTI-Platform__connectors-1415 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
restore-files.py crashes when missing find_element returns None
## Description
When using restore-files to incrementally update a second instance of OpenCTI the process crashes whenever a referenced element is not in the data being restored (normally because it's already in the system). The return from find_element in the resolve_missing function (line 79 currently) is not checked for "None" (as is done in the restore_files function at line 121).
## Environment
1. OS (where OpenCTI server runs): Ubuntu 20.4
2. OpenCTI version: 5.3.11
3. OpenCTI client: python
4. Other environment details:
## Reproducible Steps
Steps to create the smallest reproducible scenario:
1. Run backup-files on system 1
2. copy output to system 2
3. Run restore-files on system 2
## Expected Output
Ingest of all elements.
## Actual Output
INFO:root:Restore run directory @ 20220314T200100Z
Traceback (most recent call last):
File "/opt/opencti-highside-sync/connectors-master/external-import/restore-files/src/restore-files.py", line 188, in <module>
RestoreFilesInstance.start()
File "/opt/opencti-highside-sync/connectors-master/external-import/restore-files/src/restore-files.py", line 181, in start
self.restore_files()
File "/opt/opencti-highside-sync/connectors-master/external-import/restore-files/src/restore-files.py", line 133, in restore_files
self.resolve_missing(dir_date, ids, missing_element, acc)
File "/opt/opencti-highside-sync/connectors-master/external-import/restore-files/src/restore-files.py", line 87, in resolve_missing
self.resolve_missing(dir_date, element_ids, missing_element, acc)
File "/opt/opencti-highside-sync/connectors-master/external-import/restore-files/src/restore-files.py", line 80, in resolve_missing
refs = ref_extractors([data])
File "/opt/opencti-highside-sync/connectors-master/external-import/restore-files/src/restore-files.py", line 17, in ref_extractors
for key in data.keys():
AttributeError: 'NoneType' object has no attribute 'keys'
Killed
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `external-import/restore-files/src/restore-files.py`
Content:
```
1 ################################
2 # OpenCTI Restore Files #
3 ################################
4 import datetime
5 import json
6 import os
7 import sys
8 from pathlib import Path
9
10 import yaml
11 from pycti import OpenCTIConnectorHelper, OpenCTIStix2Splitter, get_config_variable
12
13
14 def ref_extractors(objects):
15 ids = []
16 for data in objects:
17 for key in data.keys():
18 if key.startswith("x_") is False:
19 if key.endswith("_ref"):
20 ids.append(data[key])
21 if key.endswith("_refs"):
22 ids.extend(data[key])
23 return set(ids)
24
25
26 def fetch_stix_data(file):
27 # Open a file: file
28 file = open(file, mode="r")
29 file_content = file.read()
30 file.close()
31 file_json = json.loads(file_content)
32 return file_json["objects"]
33
34
35 def date_convert(name):
36 return datetime.datetime.strptime(name, "%Y%m%dT%H%M%SZ")
37
38
39 class RestoreFilesConnector:
40 def __init__(self, conf_data):
41 config_file_path = os.path.dirname(os.path.abspath(__file__)) + "/config.yml"
42 config = (
43 yaml.load(open(config_file_path), Loader=yaml.FullLoader)
44 if os.path.isfile(config_file_path)
45 else conf_data
46 )
47 self.helper = OpenCTIConnectorHelper(config)
48 # Extra config
49 self.direct_creation = get_config_variable(
50 "DIRECT_CREATION",
51 ["backup", "direct_creation"],
52 config,
53 default=False,
54 )
55 self.backup_protocol = get_config_variable(
56 "BACKUP_PROTOCOL", ["backup", "protocol"], config
57 )
58 self.backup_path = get_config_variable(
59 "BACKUP_PATH", ["backup", "path"], config
60 )
61
62 def find_element(self, dir_date, id):
63 name = id + ".json"
64 path = self.backup_path + "/opencti_data"
65 for root, dirs, files in os.walk(path):
66 if name in files:
67 # If find dir is before, no need to process the element as missing
68 path = os.path.basename(root)
69 if date_convert(path) > dir_date:
70 return fetch_stix_data(os.path.join(root, name))[0]
71 return None
72
73 def resolve_missing(self, dir_date, element_ids, data, acc=[]):
74 refs = ref_extractors([data])
75 for ref in refs:
76 if ref not in element_ids:
77 not_in = next((x for x in acc if x["id"] == ref), None)
78 if not_in is None:
79 missing_element = self.find_element(dir_date, ref)
80 acc.insert(0, missing_element)
81 self.resolve_missing(dir_date, element_ids, missing_element, acc)
82
83 def restore_files(self):
84 stix2_splitter = OpenCTIStix2Splitter()
85 state = self.helper.get_state()
86 start_directory = (
87 state["current"] if state is not None and "current" in state else None
88 )
89 start_date = (
90 date_convert(start_directory) if start_directory is not None else None
91 )
92 path = self.backup_path + "/opencti_data"
93 dirs = sorted(Path(path).iterdir(), key=lambda d: date_convert(d.name))
94 for entry in dirs:
95 friendly_name = "Restore run directory @ " + entry.name
96 self.helper.log_info(friendly_name)
97 dir_date = date_convert(entry.name)
98 if start_date is not None and dir_date <= start_date:
99 continue
100 # 00 - Create a bundle for the directory
101 files_data = []
102 element_ids = []
103 # 01 - build all _ref / _refs contained in the bundle
104 element_refs = []
105 for file in os.scandir(entry):
106 if file.is_file():
107 objects = fetch_stix_data(file)
108 object_ids = set(map(lambda x: x["id"], objects))
109 element_refs.extend(ref_extractors(objects))
110 files_data.extend(objects)
111 element_ids.extend(object_ids)
112 # Ensure the bundle is consistent (include meta elements)
113 # 02 - Scan bundle to detect missing elements
114 acc = []
115 ids = set(element_ids)
116 refs = set(element_refs)
117 for ref in refs:
118 if ref not in ids:
119 # 03 - If missing, scan the other dir/files to find the elements
120 missing_element = self.find_element(dir_date, ref)
121 if missing_element is not None:
122 acc.insert(0, missing_element)
123 # 04 - Restart the process to handle recursive resolution
124 self.resolve_missing(dir_date, ids, missing_element, acc)
125 # 05 - Add elements to the bundle
126 objects_with_missing = acc + files_data
127 if len(objects_with_missing) > 0:
128 # Create the work
129 work_id = self.helper.api.work.initiate_work(
130 self.helper.connect_id, friendly_name
131 )
132 # 06 - Send the bundle to the worker queue
133 stix_bundle = {
134 "type": "bundle",
135 "objects": objects_with_missing,
136 }
137 if self.direct_creation:
138 # Bundle must be split for reordering
139 bundles = stix2_splitter.split_bundle(stix_bundle, False)
140 self.helper.log_info(
141 "restore dir "
142 + entry.name
143 + " with "
144 + str(len(bundles))
145 + " bundles (direct creation)"
146 )
147 for bundle in bundles:
148 self.helper.api.stix2.import_bundle_from_json(
149 json.dumps(bundle), True
150 )
151 # 06 - Save the state
152 self.helper.set_state({"current": entry.name})
153 else:
154 self.helper.log_info("restore dir (worker bundles):" + entry.name)
155 self.helper.send_stix2_bundle(
156 json.dumps(stix_bundle), work_id=work_id
157 )
158 message = "Restore dir run, storing last_run as {0}".format(
159 entry.name
160 )
161 self.helper.api.work.to_processed(work_id, message)
162 # 06 - Save the state
163 self.helper.set_state({"current": entry.name})
164 self.helper.log_info("restore run completed")
165
166 def start(self):
167 # Check if the directory exists
168 if not os.path.exists(self.backup_path + "/opencti_data"):
169 raise ValueError(
170 "Backup path does not exist - " + self.backup_path + "/opencti_data"
171 )
172 self.restore_files()
173
174
175 if __name__ == "__main__":
176 json_conf = sys.argv[1] if len(sys.argv) > 1 else None
177 conf = json.loads(json_conf) if json_conf is not None else {}
178 RestoreFilesInstance = RestoreFilesConnector(conf)
179 RestoreFilesInstance.start()
180
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/external-import/restore-files/src/restore-files.py b/external-import/restore-files/src/restore-files.py
--- a/external-import/restore-files/src/restore-files.py
+++ b/external-import/restore-files/src/restore-files.py
@@ -77,8 +77,9 @@
not_in = next((x for x in acc if x["id"] == ref), None)
if not_in is None:
missing_element = self.find_element(dir_date, ref)
- acc.insert(0, missing_element)
- self.resolve_missing(dir_date, element_ids, missing_element, acc)
+ if missing_element is not None:
+ acc.insert(0, missing_element)
+ self.resolve_missing(dir_date, element_ids, missing_element, acc)
def restore_files(self):
stix2_splitter = OpenCTIStix2Splitter()
| {"golden_diff": "diff --git a/external-import/restore-files/src/restore-files.py b/external-import/restore-files/src/restore-files.py\n--- a/external-import/restore-files/src/restore-files.py\n+++ b/external-import/restore-files/src/restore-files.py\n@@ -77,8 +77,9 @@\n not_in = next((x for x in acc if x[\"id\"] == ref), None)\n if not_in is None:\n missing_element = self.find_element(dir_date, ref)\n- acc.insert(0, missing_element)\n- self.resolve_missing(dir_date, element_ids, missing_element, acc)\n+ if missing_element is not None:\n+ acc.insert(0, missing_element)\n+ self.resolve_missing(dir_date, element_ids, missing_element, acc)\n \n def restore_files(self):\n stix2_splitter = OpenCTIStix2Splitter()\n", "issue": "restore-files.py crashes when missing find_element returns None\n## Description\r\n\r\nWhen using restore-files to incrementally update a second instance of OpenCTI the process crashes whenever a referenced element is not in the data being restored (normally because it's already in the system). The return from find_element in the resolve_missing function (line 79 currently) is not checked for \"None\" (as is done in the restore_files function at line 121).\r\n\r\n## Environment\r\n\r\n1. OS (where OpenCTI server runs): Ubuntu 20.4\r\n2. OpenCTI version: 5.3.11\r\n3. OpenCTI client: python\r\n4. Other environment details:\r\n\r\n## Reproducible Steps\r\n\r\nSteps to create the smallest reproducible scenario:\r\n1. Run backup-files on system 1\r\n2. copy output to system 2\r\n3. Run restore-files on system 2\r\n\r\n## Expected Output\r\n\r\nIngest of all elements.\r\n\r\n## Actual Output\r\n\r\nINFO:root:Restore run directory @ 20220314T200100Z\r\nTraceback (most recent call last):\r\n File \"/opt/opencti-highside-sync/connectors-master/external-import/restore-files/src/restore-files.py\", line 188, in <module>\r\n RestoreFilesInstance.start()\r\n File \"/opt/opencti-highside-sync/connectors-master/external-import/restore-files/src/restore-files.py\", line 181, in start\r\n self.restore_files()\r\n File \"/opt/opencti-highside-sync/connectors-master/external-import/restore-files/src/restore-files.py\", line 133, in restore_files\r\n self.resolve_missing(dir_date, ids, missing_element, acc)\r\n File \"/opt/opencti-highside-sync/connectors-master/external-import/restore-files/src/restore-files.py\", line 87, in resolve_missing\r\n self.resolve_missing(dir_date, element_ids, missing_element, acc)\r\n File \"/opt/opencti-highside-sync/connectors-master/external-import/restore-files/src/restore-files.py\", line 80, in resolve_missing\r\n refs = ref_extractors([data])\r\n File \"/opt/opencti-highside-sync/connectors-master/external-import/restore-files/src/restore-files.py\", line 17, in ref_extractors\r\n for key in data.keys():\r\nAttributeError: 'NoneType' object has no attribute 'keys'\r\nKilled\r\n\n", "before_files": [{"content": "################################\n# OpenCTI Restore Files #\n################################\nimport datetime\nimport json\nimport os\nimport sys\nfrom pathlib import Path\n\nimport yaml\nfrom pycti import OpenCTIConnectorHelper, OpenCTIStix2Splitter, get_config_variable\n\n\ndef ref_extractors(objects):\n ids = []\n for data in objects:\n for key in data.keys():\n if key.startswith(\"x_\") is False:\n if key.endswith(\"_ref\"):\n ids.append(data[key])\n if key.endswith(\"_refs\"):\n ids.extend(data[key])\n return set(ids)\n\n\ndef fetch_stix_data(file):\n # Open a file: file\n file = open(file, mode=\"r\")\n file_content = file.read()\n file.close()\n file_json = json.loads(file_content)\n return file_json[\"objects\"]\n\n\ndef date_convert(name):\n return datetime.datetime.strptime(name, \"%Y%m%dT%H%M%SZ\")\n\n\nclass RestoreFilesConnector:\n def __init__(self, conf_data):\n config_file_path = os.path.dirname(os.path.abspath(__file__)) + \"/config.yml\"\n config = (\n yaml.load(open(config_file_path), Loader=yaml.FullLoader)\n if os.path.isfile(config_file_path)\n else conf_data\n )\n self.helper = OpenCTIConnectorHelper(config)\n # Extra config\n self.direct_creation = get_config_variable(\n \"DIRECT_CREATION\",\n [\"backup\", \"direct_creation\"],\n config,\n default=False,\n )\n self.backup_protocol = get_config_variable(\n \"BACKUP_PROTOCOL\", [\"backup\", \"protocol\"], config\n )\n self.backup_path = get_config_variable(\n \"BACKUP_PATH\", [\"backup\", \"path\"], config\n )\n\n def find_element(self, dir_date, id):\n name = id + \".json\"\n path = self.backup_path + \"/opencti_data\"\n for root, dirs, files in os.walk(path):\n if name in files:\n # If find dir is before, no need to process the element as missing\n path = os.path.basename(root)\n if date_convert(path) > dir_date:\n return fetch_stix_data(os.path.join(root, name))[0]\n return None\n\n def resolve_missing(self, dir_date, element_ids, data, acc=[]):\n refs = ref_extractors([data])\n for ref in refs:\n if ref not in element_ids:\n not_in = next((x for x in acc if x[\"id\"] == ref), None)\n if not_in is None:\n missing_element = self.find_element(dir_date, ref)\n acc.insert(0, missing_element)\n self.resolve_missing(dir_date, element_ids, missing_element, acc)\n\n def restore_files(self):\n stix2_splitter = OpenCTIStix2Splitter()\n state = self.helper.get_state()\n start_directory = (\n state[\"current\"] if state is not None and \"current\" in state else None\n )\n start_date = (\n date_convert(start_directory) if start_directory is not None else None\n )\n path = self.backup_path + \"/opencti_data\"\n dirs = sorted(Path(path).iterdir(), key=lambda d: date_convert(d.name))\n for entry in dirs:\n friendly_name = \"Restore run directory @ \" + entry.name\n self.helper.log_info(friendly_name)\n dir_date = date_convert(entry.name)\n if start_date is not None and dir_date <= start_date:\n continue\n # 00 - Create a bundle for the directory\n files_data = []\n element_ids = []\n # 01 - build all _ref / _refs contained in the bundle\n element_refs = []\n for file in os.scandir(entry):\n if file.is_file():\n objects = fetch_stix_data(file)\n object_ids = set(map(lambda x: x[\"id\"], objects))\n element_refs.extend(ref_extractors(objects))\n files_data.extend(objects)\n element_ids.extend(object_ids)\n # Ensure the bundle is consistent (include meta elements)\n # 02 - Scan bundle to detect missing elements\n acc = []\n ids = set(element_ids)\n refs = set(element_refs)\n for ref in refs:\n if ref not in ids:\n # 03 - If missing, scan the other dir/files to find the elements\n missing_element = self.find_element(dir_date, ref)\n if missing_element is not None:\n acc.insert(0, missing_element)\n # 04 - Restart the process to handle recursive resolution\n self.resolve_missing(dir_date, ids, missing_element, acc)\n # 05 - Add elements to the bundle\n objects_with_missing = acc + files_data\n if len(objects_with_missing) > 0:\n # Create the work\n work_id = self.helper.api.work.initiate_work(\n self.helper.connect_id, friendly_name\n )\n # 06 - Send the bundle to the worker queue\n stix_bundle = {\n \"type\": \"bundle\",\n \"objects\": objects_with_missing,\n }\n if self.direct_creation:\n # Bundle must be split for reordering\n bundles = stix2_splitter.split_bundle(stix_bundle, False)\n self.helper.log_info(\n \"restore dir \"\n + entry.name\n + \" with \"\n + str(len(bundles))\n + \" bundles (direct creation)\"\n )\n for bundle in bundles:\n self.helper.api.stix2.import_bundle_from_json(\n json.dumps(bundle), True\n )\n # 06 - Save the state\n self.helper.set_state({\"current\": entry.name})\n else:\n self.helper.log_info(\"restore dir (worker bundles):\" + entry.name)\n self.helper.send_stix2_bundle(\n json.dumps(stix_bundle), work_id=work_id\n )\n message = \"Restore dir run, storing last_run as {0}\".format(\n entry.name\n )\n self.helper.api.work.to_processed(work_id, message)\n # 06 - Save the state\n self.helper.set_state({\"current\": entry.name})\n self.helper.log_info(\"restore run completed\")\n\n def start(self):\n # Check if the directory exists\n if not os.path.exists(self.backup_path + \"/opencti_data\"):\n raise ValueError(\n \"Backup path does not exist - \" + self.backup_path + \"/opencti_data\"\n )\n self.restore_files()\n\n\nif __name__ == \"__main__\":\n json_conf = sys.argv[1] if len(sys.argv) > 1 else None\n conf = json.loads(json_conf) if json_conf is not None else {}\n RestoreFilesInstance = RestoreFilesConnector(conf)\n RestoreFilesInstance.start()\n", "path": "external-import/restore-files/src/restore-files.py"}], "after_files": [{"content": "################################\n# OpenCTI Restore Files #\n################################\nimport datetime\nimport json\nimport os\nimport sys\nfrom pathlib import Path\n\nimport yaml\nfrom pycti import OpenCTIConnectorHelper, OpenCTIStix2Splitter, get_config_variable\n\n\ndef ref_extractors(objects):\n ids = []\n for data in objects:\n for key in data.keys():\n if key.startswith(\"x_\") is False:\n if key.endswith(\"_ref\"):\n ids.append(data[key])\n if key.endswith(\"_refs\"):\n ids.extend(data[key])\n return set(ids)\n\n\ndef fetch_stix_data(file):\n # Open a file: file\n file = open(file, mode=\"r\")\n file_content = file.read()\n file.close()\n file_json = json.loads(file_content)\n return file_json[\"objects\"]\n\n\ndef date_convert(name):\n return datetime.datetime.strptime(name, \"%Y%m%dT%H%M%SZ\")\n\n\nclass RestoreFilesConnector:\n def __init__(self, conf_data):\n config_file_path = os.path.dirname(os.path.abspath(__file__)) + \"/config.yml\"\n config = (\n yaml.load(open(config_file_path), Loader=yaml.FullLoader)\n if os.path.isfile(config_file_path)\n else conf_data\n )\n self.helper = OpenCTIConnectorHelper(config)\n # Extra config\n self.direct_creation = get_config_variable(\n \"DIRECT_CREATION\",\n [\"backup\", \"direct_creation\"],\n config,\n default=False,\n )\n self.backup_protocol = get_config_variable(\n \"BACKUP_PROTOCOL\", [\"backup\", \"protocol\"], config\n )\n self.backup_path = get_config_variable(\n \"BACKUP_PATH\", [\"backup\", \"path\"], config\n )\n\n def find_element(self, dir_date, id):\n name = id + \".json\"\n path = self.backup_path + \"/opencti_data\"\n for root, dirs, files in os.walk(path):\n if name in files:\n # If find dir is before, no need to process the element as missing\n path = os.path.basename(root)\n if date_convert(path) > dir_date:\n return fetch_stix_data(os.path.join(root, name))[0]\n return None\n\n def resolve_missing(self, dir_date, element_ids, data, acc=[]):\n refs = ref_extractors([data])\n for ref in refs:\n if ref not in element_ids:\n not_in = next((x for x in acc if x[\"id\"] == ref), None)\n if not_in is None:\n missing_element = self.find_element(dir_date, ref)\n if missing_element is not None:\n acc.insert(0, missing_element)\n self.resolve_missing(dir_date, element_ids, missing_element, acc)\n\n def restore_files(self):\n stix2_splitter = OpenCTIStix2Splitter()\n state = self.helper.get_state()\n start_directory = (\n state[\"current\"] if state is not None and \"current\" in state else None\n )\n start_date = (\n date_convert(start_directory) if start_directory is not None else None\n )\n path = self.backup_path + \"/opencti_data\"\n dirs = sorted(Path(path).iterdir(), key=lambda d: date_convert(d.name))\n for entry in dirs:\n friendly_name = \"Restore run directory @ \" + entry.name\n self.helper.log_info(friendly_name)\n dir_date = date_convert(entry.name)\n if start_date is not None and dir_date <= start_date:\n continue\n # 00 - Create a bundle for the directory\n files_data = []\n element_ids = []\n # 01 - build all _ref / _refs contained in the bundle\n element_refs = []\n for file in os.scandir(entry):\n if file.is_file():\n objects = fetch_stix_data(file)\n object_ids = set(map(lambda x: x[\"id\"], objects))\n element_refs.extend(ref_extractors(objects))\n files_data.extend(objects)\n element_ids.extend(object_ids)\n # Ensure the bundle is consistent (include meta elements)\n # 02 - Scan bundle to detect missing elements\n acc = []\n ids = set(element_ids)\n refs = set(element_refs)\n for ref in refs:\n if ref not in ids:\n # 03 - If missing, scan the other dir/files to find the elements\n missing_element = self.find_element(dir_date, ref)\n if missing_element is not None:\n acc.insert(0, missing_element)\n # 04 - Restart the process to handle recursive resolution\n self.resolve_missing(dir_date, ids, missing_element, acc)\n # 05 - Add elements to the bundle\n objects_with_missing = acc + files_data\n if len(objects_with_missing) > 0:\n # Create the work\n work_id = self.helper.api.work.initiate_work(\n self.helper.connect_id, friendly_name\n )\n # 06 - Send the bundle to the worker queue\n stix_bundle = {\n \"type\": \"bundle\",\n \"objects\": objects_with_missing,\n }\n if self.direct_creation:\n # Bundle must be split for reordering\n bundles = stix2_splitter.split_bundle(stix_bundle, False)\n self.helper.log_info(\n \"restore dir \"\n + entry.name\n + \" with \"\n + str(len(bundles))\n + \" bundles (direct creation)\"\n )\n for bundle in bundles:\n self.helper.api.stix2.import_bundle_from_json(\n json.dumps(bundle), True\n )\n # 06 - Save the state\n self.helper.set_state({\"current\": entry.name})\n else:\n self.helper.log_info(\"restore dir (worker bundles):\" + entry.name)\n self.helper.send_stix2_bundle(\n json.dumps(stix_bundle), work_id=work_id\n )\n message = \"Restore dir run, storing last_run as {0}\".format(\n entry.name\n )\n self.helper.api.work.to_processed(work_id, message)\n # 06 - Save the state\n self.helper.set_state({\"current\": entry.name})\n self.helper.log_info(\"restore run completed\")\n\n def start(self):\n # Check if the directory exists\n if not os.path.exists(self.backup_path + \"/opencti_data\"):\n raise ValueError(\n \"Backup path does not exist - \" + self.backup_path + \"/opencti_data\"\n )\n self.restore_files()\n\n\nif __name__ == \"__main__\":\n json_conf = sys.argv[1] if len(sys.argv) > 1 else None\n conf = json.loads(json_conf) if json_conf is not None else {}\n RestoreFilesInstance = RestoreFilesConnector(conf)\n RestoreFilesInstance.start()\n", "path": "external-import/restore-files/src/restore-files.py"}]} | 2,651 | 192 |
gh_patches_debug_56935 | rasdani/github-patches | git_diff | quantopian__zipline-1707 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
latest tutorial.ipynb has non working examples
Dear Zipline Maintainers,
Before I tell you about my issue, let me describe my environment:
# Environment
- Operating System: (MAC OS X El Capitan`)
- Python Version: `$ python --3.4`
- Python Bitness: `$ python -c 'import math, sys;print(int(math.log(sys.maxsize + 1, 2) + 1))'`
- How did you install Zipline: (`pip`)
- Python packages: `$ pip freeze` or `$ conda list`
Now that you know a little about me, let me tell you about the issue I am
having
# Description of Issue
While going through the latest tutorial.ipynb it throws an error:
TypeError: a float is required
- What did you expect to happen?
I ran the notebook and expected to see the same results as in your notebook
- What happened instead?
An error:
TypeError: a float is required
Here is how you can reproduce this issue on your machine:
## Reproduction Steps
1.Run the last cell in the tutorial
...
## What steps have you taken to resolve this already?
I was trying to identify where the errors belongs to by commenting the lines of code. I'm a beginner , so I don't know how to solve it yet. It seems like the error is thrown when accessing the line:
short_mavg = history(100, '1d', 'price').mean()
...
# Anything else?
...
Sincerely,
`$ whoami`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `zipline/examples/buyapple.py`
Content:
```
1 #!/usr/bin/env python
2 #
3 # Copyright 2014 Quantopian, Inc.
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
16
17 from zipline.api import order, record, symbol
18
19
20 def initialize(context):
21 pass
22
23
24 def handle_data(context, data):
25 order(symbol('AAPL'), 10)
26 record(AAPL=data.current(symbol('AAPL'), 'price'))
27
28
29 # Note: this function can be removed if running
30 # this algorithm on quantopian.com
31 def analyze(context=None, results=None):
32 import matplotlib.pyplot as plt
33 # Plot the portfolio and asset data.
34 ax1 = plt.subplot(211)
35 results.portfolio_value.plot(ax=ax1)
36 ax1.set_ylabel('Portfolio value (USD)')
37 ax2 = plt.subplot(212, sharex=ax1)
38 results.AAPL.plot(ax=ax2)
39 ax2.set_ylabel('AAPL price (USD)')
40
41 # Show the plot.
42 plt.gcf().set_size_inches(18, 8)
43 plt.show()
44
45
46 def _test_args():
47 """Extra arguments to use when zipline's automated tests run this example.
48 """
49 import pandas as pd
50
51 return {
52 'start': pd.Timestamp('2014-01-01', tz='utc'),
53 'end': pd.Timestamp('2014-11-01', tz='utc'),
54 }
55
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/zipline/examples/buyapple.py b/zipline/examples/buyapple.py
--- a/zipline/examples/buyapple.py
+++ b/zipline/examples/buyapple.py
@@ -18,12 +18,12 @@
def initialize(context):
- pass
+ context.asset = symbol('AAPL')
def handle_data(context, data):
- order(symbol('AAPL'), 10)
- record(AAPL=data.current(symbol('AAPL'), 'price'))
+ order(context.asset, 10)
+ record(AAPL=data.current(context.asset, 'price'))
# Note: this function can be removed if running
| {"golden_diff": "diff --git a/zipline/examples/buyapple.py b/zipline/examples/buyapple.py\n--- a/zipline/examples/buyapple.py\n+++ b/zipline/examples/buyapple.py\n@@ -18,12 +18,12 @@\n \n \n def initialize(context):\n- pass\n+ context.asset = symbol('AAPL')\n \n \n def handle_data(context, data):\n- order(symbol('AAPL'), 10)\n- record(AAPL=data.current(symbol('AAPL'), 'price'))\n+ order(context.asset, 10)\n+ record(AAPL=data.current(context.asset, 'price'))\n \n \n # Note: this function can be removed if running\n", "issue": "latest tutorial.ipynb has non working examples \nDear Zipline Maintainers,\n\nBefore I tell you about my issue, let me describe my environment:\n# Environment\n- Operating System: (MAC OS X El Capitan`)\n- Python Version: `$ python --3.4`\n- Python Bitness: `$ python -c 'import math, sys;print(int(math.log(sys.maxsize + 1, 2) + 1))'`\n- How did you install Zipline: (`pip`)\n- Python packages: `$ pip freeze` or `$ conda list`\n\nNow that you know a little about me, let me tell you about the issue I am\nhaving\n# Description of Issue\n\nWhile going through the latest tutorial.ipynb it throws an error:\nTypeError: a float is required\n- What did you expect to happen?\n I ran the notebook and expected to see the same results as in your notebook\n- What happened instead?\n An error:\n TypeError: a float is required\n\nHere is how you can reproduce this issue on your machine:\n## Reproduction Steps\n\n1.Run the last cell in the tutorial\n\n...\n## What steps have you taken to resolve this already?\n\nI was trying to identify where the errors belongs to by commenting the lines of code. I'm a beginner , so I don't know how to solve it yet. It seems like the error is thrown when accessing the line:\nshort_mavg = history(100, '1d', 'price').mean()\n...\n# Anything else?\n\n...\n\nSincerely,\n`$ whoami`\n\n", "before_files": [{"content": "#!/usr/bin/env python\n#\n# Copyright 2014 Quantopian, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom zipline.api import order, record, symbol\n\n\ndef initialize(context):\n pass\n\n\ndef handle_data(context, data):\n order(symbol('AAPL'), 10)\n record(AAPL=data.current(symbol('AAPL'), 'price'))\n\n\n# Note: this function can be removed if running\n# this algorithm on quantopian.com\ndef analyze(context=None, results=None):\n import matplotlib.pyplot as plt\n # Plot the portfolio and asset data.\n ax1 = plt.subplot(211)\n results.portfolio_value.plot(ax=ax1)\n ax1.set_ylabel('Portfolio value (USD)')\n ax2 = plt.subplot(212, sharex=ax1)\n results.AAPL.plot(ax=ax2)\n ax2.set_ylabel('AAPL price (USD)')\n\n # Show the plot.\n plt.gcf().set_size_inches(18, 8)\n plt.show()\n\n\ndef _test_args():\n \"\"\"Extra arguments to use when zipline's automated tests run this example.\n \"\"\"\n import pandas as pd\n\n return {\n 'start': pd.Timestamp('2014-01-01', tz='utc'),\n 'end': pd.Timestamp('2014-11-01', tz='utc'),\n }\n", "path": "zipline/examples/buyapple.py"}], "after_files": [{"content": "#!/usr/bin/env python\n#\n# Copyright 2014 Quantopian, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom zipline.api import order, record, symbol\n\n\ndef initialize(context):\n context.asset = symbol('AAPL')\n\n\ndef handle_data(context, data):\n order(context.asset, 10)\n record(AAPL=data.current(context.asset, 'price'))\n\n\n# Note: this function can be removed if running\n# this algorithm on quantopian.com\ndef analyze(context=None, results=None):\n import matplotlib.pyplot as plt\n # Plot the portfolio and asset data.\n ax1 = plt.subplot(211)\n results.portfolio_value.plot(ax=ax1)\n ax1.set_ylabel('Portfolio value (USD)')\n ax2 = plt.subplot(212, sharex=ax1)\n results.AAPL.plot(ax=ax2)\n ax2.set_ylabel('AAPL price (USD)')\n\n # Show the plot.\n plt.gcf().set_size_inches(18, 8)\n plt.show()\n\n\ndef _test_args():\n \"\"\"Extra arguments to use when zipline's automated tests run this example.\n \"\"\"\n import pandas as pd\n\n return {\n 'start': pd.Timestamp('2014-01-01', tz='utc'),\n 'end': pd.Timestamp('2014-11-01', tz='utc'),\n }\n", "path": "zipline/examples/buyapple.py"}]} | 1,102 | 149 |
gh_patches_debug_1217 | rasdani/github-patches | git_diff | liqd__a4-meinberlin-2857 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ValueError: Missing staticfiles manifest entry for 'js/select_dropdown_init.js'
https://sentry.liqd.net/sentry/meinberlin-dev/issues/1032/
```
ValueError: Missing staticfiles manifest entry for 'js/select_dropdown_init.js'
(35 additional frame(s) were not displayed)
...
File "django/templatetags/static.py", line 118, in handle_simple
return staticfiles_storage.url(path)
File "django_cloudflare_push/middleware.py", line 47, in url
return super(DebugStaticFilesStorage, self).url(path)
File "django/contrib/staticfiles/storage.py", line 153, in url
return self._url(self.stored_name, name, force)
File "django/contrib/staticfiles/storage.py", line 132, in _url
hashed_name = hashed_name_func(*args)
File "django/contrib/staticfiles/storage.py", line 420, in stored_name
raise ValueError("Missing staticfiles manifest entry for '%s'" % clean_name)
Internal Server Error: /kiezkasse/create/module/kiezkasse-2/
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `meinberlin/apps/mapideas/forms.py`
Content:
```
1 from django import forms
2 from django.utils.translation import ugettext_lazy as _
3
4 from adhocracy4.categories.forms import CategorizableFieldMixin
5 from adhocracy4.labels.mixins import LabelsAddableFieldMixin
6 from adhocracy4.maps import widgets as maps_widgets
7 from meinberlin.apps.contrib.mixins import ImageRightOfUseMixin
8
9 from . import models
10
11
12 class MapIdeaForm(CategorizableFieldMixin,
13 LabelsAddableFieldMixin,
14 ImageRightOfUseMixin):
15
16 def __init__(self, *args, **kwargs):
17 self.settings = kwargs.pop('settings_instance')
18 super().__init__(*args, **kwargs)
19 self.fields['point'].widget = maps_widgets.MapChoosePointWidget(
20 polygon=self.settings.polygon)
21 self.fields['point'].error_messages['required'] = _(
22 'Please locate your proposal on the map.')
23
24 class Media:
25 js = ('js/select_dropdown_init.js',)
26
27 class Meta:
28 model = models.MapIdea
29 fields = ['name', 'description', 'image', 'category',
30 'labels', 'point', 'point_label']
31
32
33 class MapIdeaModerateForm(forms.ModelForm):
34 class Meta:
35 model = models.MapIdea
36 fields = ['moderator_feedback']
37
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/meinberlin/apps/mapideas/forms.py b/meinberlin/apps/mapideas/forms.py
--- a/meinberlin/apps/mapideas/forms.py
+++ b/meinberlin/apps/mapideas/forms.py
@@ -22,7 +22,7 @@
'Please locate your proposal on the map.')
class Media:
- js = ('js/select_dropdown_init.js',)
+ js = ('select_dropdown_init.js',)
class Meta:
model = models.MapIdea
| {"golden_diff": "diff --git a/meinberlin/apps/mapideas/forms.py b/meinberlin/apps/mapideas/forms.py\n--- a/meinberlin/apps/mapideas/forms.py\n+++ b/meinberlin/apps/mapideas/forms.py\n@@ -22,7 +22,7 @@\n 'Please locate your proposal on the map.')\n \n class Media:\n- js = ('js/select_dropdown_init.js',)\n+ js = ('select_dropdown_init.js',)\n \n class Meta:\n model = models.MapIdea\n", "issue": "ValueError: Missing staticfiles manifest entry for 'js/select_dropdown_init.js'\nhttps://sentry.liqd.net/sentry/meinberlin-dev/issues/1032/\n\n```\nValueError: Missing staticfiles manifest entry for 'js/select_dropdown_init.js'\n(35 additional frame(s) were not displayed)\n...\n File \"django/templatetags/static.py\", line 118, in handle_simple\n return staticfiles_storage.url(path)\n File \"django_cloudflare_push/middleware.py\", line 47, in url\n return super(DebugStaticFilesStorage, self).url(path)\n File \"django/contrib/staticfiles/storage.py\", line 153, in url\n return self._url(self.stored_name, name, force)\n File \"django/contrib/staticfiles/storage.py\", line 132, in _url\n hashed_name = hashed_name_func(*args)\n File \"django/contrib/staticfiles/storage.py\", line 420, in stored_name\n raise ValueError(\"Missing staticfiles manifest entry for '%s'\" % clean_name)\n\nInternal Server Error: /kiezkasse/create/module/kiezkasse-2/\n```\n", "before_files": [{"content": "from django import forms\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom adhocracy4.categories.forms import CategorizableFieldMixin\nfrom adhocracy4.labels.mixins import LabelsAddableFieldMixin\nfrom adhocracy4.maps import widgets as maps_widgets\nfrom meinberlin.apps.contrib.mixins import ImageRightOfUseMixin\n\nfrom . import models\n\n\nclass MapIdeaForm(CategorizableFieldMixin,\n LabelsAddableFieldMixin,\n ImageRightOfUseMixin):\n\n def __init__(self, *args, **kwargs):\n self.settings = kwargs.pop('settings_instance')\n super().__init__(*args, **kwargs)\n self.fields['point'].widget = maps_widgets.MapChoosePointWidget(\n polygon=self.settings.polygon)\n self.fields['point'].error_messages['required'] = _(\n 'Please locate your proposal on the map.')\n\n class Media:\n js = ('js/select_dropdown_init.js',)\n\n class Meta:\n model = models.MapIdea\n fields = ['name', 'description', 'image', 'category',\n 'labels', 'point', 'point_label']\n\n\nclass MapIdeaModerateForm(forms.ModelForm):\n class Meta:\n model = models.MapIdea\n fields = ['moderator_feedback']\n", "path": "meinberlin/apps/mapideas/forms.py"}], "after_files": [{"content": "from django import forms\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom adhocracy4.categories.forms import CategorizableFieldMixin\nfrom adhocracy4.labels.mixins import LabelsAddableFieldMixin\nfrom adhocracy4.maps import widgets as maps_widgets\nfrom meinberlin.apps.contrib.mixins import ImageRightOfUseMixin\n\nfrom . import models\n\n\nclass MapIdeaForm(CategorizableFieldMixin,\n LabelsAddableFieldMixin,\n ImageRightOfUseMixin):\n\n def __init__(self, *args, **kwargs):\n self.settings = kwargs.pop('settings_instance')\n super().__init__(*args, **kwargs)\n self.fields['point'].widget = maps_widgets.MapChoosePointWidget(\n polygon=self.settings.polygon)\n self.fields['point'].error_messages['required'] = _(\n 'Please locate your proposal on the map.')\n\n class Media:\n js = ('select_dropdown_init.js',)\n\n class Meta:\n model = models.MapIdea\n fields = ['name', 'description', 'image', 'category',\n 'labels', 'point', 'point_label']\n\n\nclass MapIdeaModerateForm(forms.ModelForm):\n class Meta:\n model = models.MapIdea\n fields = ['moderator_feedback']\n", "path": "meinberlin/apps/mapideas/forms.py"}]} | 850 | 109 |
gh_patches_debug_27981 | rasdani/github-patches | git_diff | ocadotechnology__codeforlife-portal-641 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Postcode / Zipcode error message at School creation is misleading
Since most web browsers autocomplete the postcode in the class creation form as the email address, teachers receive a badly worded AND badly positioned error message (the message makes it sound like the error is in the name)
It should mention something like, please input a valid postcode / zipcode
and be below the postcode field.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `portal/forms/organisation.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # Code for Life
3 #
4 # Copyright (C) 2017, Ocado Innovation Limited
5 #
6 # This program is free software: you can redistribute it and/or modify
7 # it under the terms of the GNU Affero General Public License as
8 # published by the Free Software Foundation, either version 3 of the
9 # License, or (at your option) any later version.
10 #
11 # This program is distributed in the hope that it will be useful,
12 # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 # GNU Affero General Public License for more details.
15 #
16 # You should have received a copy of the GNU Affero General Public License
17 # along with this program. If not, see <http://www.gnu.org/licenses/>.
18 #
19 # ADDITIONAL TERMS – Section 7 GNU General Public Licence
20 #
21 # This licence does not grant any right, title or interest in any “Ocado” logos,
22 # trade names or the trademark “Ocado” or any other trademarks or domain names
23 # owned by Ocado Innovation Limited or the Ocado group of companies or any other
24 # distinctive brand features of “Ocado” as may be secured from time to time. You
25 # must not distribute any modification of this program using the trademark
26 # “Ocado” or claim any affiliation or association with Ocado or its employees.
27 #
28 # You are not authorised to use the name Ocado (or any of its trade names) or
29 # the names of any author or contributor in advertising or for publicity purposes
30 # pertaining to the distribution of this program, without the prior written
31 # authorisation of Ocado.
32 #
33 # Any propagation, distribution or conveyance of this program must include this
34 # copyright notice and these terms. You must not misrepresent the origins of this
35 # program; modified versions of the program must be marked as such and not
36 # identified as the original program.
37 from django import forms
38
39 from portal.models import School
40
41 from django_countries.widgets import CountrySelectWidget
42 from django.core.exceptions import ObjectDoesNotExist
43
44
45 class OrganisationForm(forms.ModelForm):
46
47 current_password = forms.CharField(
48 label='Enter your password',
49 widget=forms.PasswordInput(attrs={'autocomplete': "off"}))
50
51 class Meta:
52 model = School
53 fields = ['name', 'postcode', 'country']
54 labels = {
55 'name' : "Name of your school or club",
56 'postcode' : 'Postcode',
57 'country' : 'Country',
58 }
59 widgets = {
60 'name' : forms.TextInput(attrs={'autocomplete': "off", 'placeholder': 'Name of your school or club'}),
61 'postcode' : forms.TextInput(attrs={'autocomplete': "off", 'placeholder': 'Postcode'}),
62 'country' : CountrySelectWidget(attrs={'class': 'wide'}),
63 }
64
65 def __init__(self, *args, **kwargs):
66 self.user = kwargs.pop('user', None)
67 self.current_school = kwargs.pop('current_school', None)
68 super(OrganisationForm, self).__init__(*args, **kwargs)
69 if self.current_school:
70 del self.fields['current_password']
71
72 def clean(self):
73 name = self.cleaned_data.get('name', None)
74 postcode = self.cleaned_data.get('postcode', None)
75
76 if name and postcode:
77 try:
78 school = School.objects.get(name=name, postcode=postcode)
79 except ObjectDoesNotExist:
80 return self.cleaned_data
81
82 if not self.current_school or self.current_school.id != school.id:
83 raise forms.ValidationError(
84 "There is already a school or club registered with that name and postcode")
85
86 return self.cleaned_data
87
88 def clean_postcode(self):
89 postcode = self.cleaned_data.get('postcode', None)
90
91 if postcode:
92 # Basic postcode check for now
93 if not len(postcode.replace(' ', '')) > 0:
94 raise forms.ValidationError("That postcode was not recognised")
95
96 return postcode
97
98 def clean_current_password(self):
99 current_password = self.cleaned_data.get('current_password', None)
100 if not self.user.check_password(current_password):
101 raise forms.ValidationError("Your password was incorrect")
102
103
104 class OrganisationJoinForm(forms.Form):
105 fuzzy_name = forms.CharField(
106 label="Search for school or club by name or postcode",
107 widget=forms.TextInput(
108 attrs={'placeholder': "Enrico Fermi High School"}))
109
110 # Note: the reason this is a CharField rather than a ChoiceField is to avoid having to
111 # provide choices which was problematic given that the options are dynamically generated.
112 chosen_org = forms.CharField(
113 label='Select school or club',
114 widget=forms.Select(attrs={'class': 'wide'}))
115
116 def clean_chosen_org(self):
117 chosen_org = self.cleaned_data.get('chosen_org', None)
118
119 if chosen_org and not School.objects.filter(id=int(chosen_org)).exists():
120 raise forms.ValidationError("That school or club was not recognised")
121
122 return chosen_org
123
124
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/portal/forms/organisation.py b/portal/forms/organisation.py
--- a/portal/forms/organisation.py
+++ b/portal/forms/organisation.py
@@ -52,14 +52,14 @@
model = School
fields = ['name', 'postcode', 'country']
labels = {
- 'name' : "Name of your school or club",
- 'postcode' : 'Postcode',
- 'country' : 'Country',
+ 'name': "Name of your school or club",
+ 'postcode': 'Postcode',
+ 'country': 'Country',
}
widgets = {
- 'name' : forms.TextInput(attrs={'autocomplete': "off", 'placeholder': 'Name of your school or club'}),
- 'postcode' : forms.TextInput(attrs={'autocomplete': "off", 'placeholder': 'Postcode'}),
- 'country' : CountrySelectWidget(attrs={'class': 'wide'}),
+ 'name': forms.TextInput(attrs={'autocomplete': "off", 'placeholder': 'Name of your school or club'}),
+ 'postcode': forms.TextInput(attrs={'autocomplete': "off", 'placeholder': 'Postcode'}),
+ 'country': CountrySelectWidget(attrs={'class': 'wide'}),
}
def __init__(self, *args, **kwargs):
@@ -89,9 +89,8 @@
postcode = self.cleaned_data.get('postcode', None)
if postcode:
- # Basic postcode check for now
- if not len(postcode.replace(' ', '')) > 0:
- raise forms.ValidationError("That postcode was not recognised")
+ if len(postcode.replace(' ', '')) > 10 or len(postcode.replace(' ', '')) == 0:
+ raise forms.ValidationError("Please enter a valid postcode or ZIP code")
return postcode
| {"golden_diff": "diff --git a/portal/forms/organisation.py b/portal/forms/organisation.py\n--- a/portal/forms/organisation.py\n+++ b/portal/forms/organisation.py\n@@ -52,14 +52,14 @@\n model = School\n fields = ['name', 'postcode', 'country']\n labels = {\n- 'name' : \"Name of your school or club\",\n- 'postcode' : 'Postcode',\n- 'country' : 'Country',\n+ 'name': \"Name of your school or club\",\n+ 'postcode': 'Postcode',\n+ 'country': 'Country',\n }\n widgets = {\n- 'name' : forms.TextInput(attrs={'autocomplete': \"off\", 'placeholder': 'Name of your school or club'}),\n- 'postcode' : forms.TextInput(attrs={'autocomplete': \"off\", 'placeholder': 'Postcode'}),\n- 'country' : CountrySelectWidget(attrs={'class': 'wide'}),\n+ 'name': forms.TextInput(attrs={'autocomplete': \"off\", 'placeholder': 'Name of your school or club'}),\n+ 'postcode': forms.TextInput(attrs={'autocomplete': \"off\", 'placeholder': 'Postcode'}),\n+ 'country': CountrySelectWidget(attrs={'class': 'wide'}),\n }\n \n def __init__(self, *args, **kwargs):\n@@ -89,9 +89,8 @@\n postcode = self.cleaned_data.get('postcode', None)\n \n if postcode:\n- # Basic postcode check for now\n- if not len(postcode.replace(' ', '')) > 0:\n- raise forms.ValidationError(\"That postcode was not recognised\")\n+ if len(postcode.replace(' ', '')) > 10 or len(postcode.replace(' ', '')) == 0:\n+ raise forms.ValidationError(\"Please enter a valid postcode or ZIP code\")\n \n return postcode\n", "issue": "Postcode / Zipcode error message at School creation is misleading\nSince most web browsers autocomplete the postcode in the class creation form as the email address, teachers receive a badly worded AND badly positioned error message (the message makes it sound like the error is in the name)\r\nIt should mention something like, please input a valid postcode / zipcode \r\nand be below the postcode field.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Code for Life\n#\n# Copyright (C) 2017, Ocado Innovation Limited\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n# ADDITIONAL TERMS \u2013 Section 7 GNU General Public Licence\n#\n# This licence does not grant any right, title or interest in any \u201cOcado\u201d logos,\n# trade names or the trademark \u201cOcado\u201d or any other trademarks or domain names\n# owned by Ocado Innovation Limited or the Ocado group of companies or any other\n# distinctive brand features of \u201cOcado\u201d as may be secured from time to time. You\n# must not distribute any modification of this program using the trademark\n# \u201cOcado\u201d or claim any affiliation or association with Ocado or its employees.\n#\n# You are not authorised to use the name Ocado (or any of its trade names) or\n# the names of any author or contributor in advertising or for publicity purposes\n# pertaining to the distribution of this program, without the prior written\n# authorisation of Ocado.\n#\n# Any propagation, distribution or conveyance of this program must include this\n# copyright notice and these terms. You must not misrepresent the origins of this\n# program; modified versions of the program must be marked as such and not\n# identified as the original program.\nfrom django import forms\n\nfrom portal.models import School\n\nfrom django_countries.widgets import CountrySelectWidget\nfrom django.core.exceptions import ObjectDoesNotExist\n\n\nclass OrganisationForm(forms.ModelForm):\n\n current_password = forms.CharField(\n label='Enter your password',\n widget=forms.PasswordInput(attrs={'autocomplete': \"off\"}))\n\n class Meta:\n model = School\n fields = ['name', 'postcode', 'country']\n labels = {\n 'name' : \"Name of your school or club\",\n 'postcode' : 'Postcode',\n 'country' : 'Country',\n }\n widgets = {\n 'name' : forms.TextInput(attrs={'autocomplete': \"off\", 'placeholder': 'Name of your school or club'}),\n 'postcode' : forms.TextInput(attrs={'autocomplete': \"off\", 'placeholder': 'Postcode'}),\n 'country' : CountrySelectWidget(attrs={'class': 'wide'}),\n }\n\n def __init__(self, *args, **kwargs):\n self.user = kwargs.pop('user', None)\n self.current_school = kwargs.pop('current_school', None)\n super(OrganisationForm, self).__init__(*args, **kwargs)\n if self.current_school:\n del self.fields['current_password']\n\n def clean(self):\n name = self.cleaned_data.get('name', None)\n postcode = self.cleaned_data.get('postcode', None)\n\n if name and postcode:\n try:\n school = School.objects.get(name=name, postcode=postcode)\n except ObjectDoesNotExist:\n return self.cleaned_data\n\n if not self.current_school or self.current_school.id != school.id:\n raise forms.ValidationError(\n \"There is already a school or club registered with that name and postcode\")\n\n return self.cleaned_data\n\n def clean_postcode(self):\n postcode = self.cleaned_data.get('postcode', None)\n\n if postcode:\n # Basic postcode check for now\n if not len(postcode.replace(' ', '')) > 0:\n raise forms.ValidationError(\"That postcode was not recognised\")\n\n return postcode\n\n def clean_current_password(self):\n current_password = self.cleaned_data.get('current_password', None)\n if not self.user.check_password(current_password):\n raise forms.ValidationError(\"Your password was incorrect\")\n\n\nclass OrganisationJoinForm(forms.Form):\n fuzzy_name = forms.CharField(\n label=\"Search for school or club by name or postcode\",\n widget=forms.TextInput(\n attrs={'placeholder': \"Enrico Fermi High School\"}))\n\n # Note: the reason this is a CharField rather than a ChoiceField is to avoid having to\n # provide choices which was problematic given that the options are dynamically generated.\n chosen_org = forms.CharField(\n label='Select school or club',\n widget=forms.Select(attrs={'class': 'wide'}))\n\n def clean_chosen_org(self):\n chosen_org = self.cleaned_data.get('chosen_org', None)\n\n if chosen_org and not School.objects.filter(id=int(chosen_org)).exists():\n raise forms.ValidationError(\"That school or club was not recognised\")\n\n return chosen_org\n\n", "path": "portal/forms/organisation.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# Code for Life\n#\n# Copyright (C) 2017, Ocado Innovation Limited\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n# ADDITIONAL TERMS \u2013 Section 7 GNU General Public Licence\n#\n# This licence does not grant any right, title or interest in any \u201cOcado\u201d logos,\n# trade names or the trademark \u201cOcado\u201d or any other trademarks or domain names\n# owned by Ocado Innovation Limited or the Ocado group of companies or any other\n# distinctive brand features of \u201cOcado\u201d as may be secured from time to time. You\n# must not distribute any modification of this program using the trademark\n# \u201cOcado\u201d or claim any affiliation or association with Ocado or its employees.\n#\n# You are not authorised to use the name Ocado (or any of its trade names) or\n# the names of any author or contributor in advertising or for publicity purposes\n# pertaining to the distribution of this program, without the prior written\n# authorisation of Ocado.\n#\n# Any propagation, distribution or conveyance of this program must include this\n# copyright notice and these terms. You must not misrepresent the origins of this\n# program; modified versions of the program must be marked as such and not\n# identified as the original program.\nfrom django import forms\n\nfrom portal.models import School\n\nfrom django_countries.widgets import CountrySelectWidget\nfrom django.core.exceptions import ObjectDoesNotExist\n\n\nclass OrganisationForm(forms.ModelForm):\n\n current_password = forms.CharField(\n label='Enter your password',\n widget=forms.PasswordInput(attrs={'autocomplete': \"off\"}))\n\n class Meta:\n model = School\n fields = ['name', 'postcode', 'country']\n labels = {\n 'name': \"Name of your school or club\",\n 'postcode': 'Postcode',\n 'country': 'Country',\n }\n widgets = {\n 'name': forms.TextInput(attrs={'autocomplete': \"off\", 'placeholder': 'Name of your school or club'}),\n 'postcode': forms.TextInput(attrs={'autocomplete': \"off\", 'placeholder': 'Postcode'}),\n 'country': CountrySelectWidget(attrs={'class': 'wide'}),\n }\n\n def __init__(self, *args, **kwargs):\n self.user = kwargs.pop('user', None)\n self.current_school = kwargs.pop('current_school', None)\n super(OrganisationForm, self).__init__(*args, **kwargs)\n if self.current_school:\n del self.fields['current_password']\n\n def clean(self):\n name = self.cleaned_data.get('name', None)\n postcode = self.cleaned_data.get('postcode', None)\n\n if name and postcode:\n try:\n school = School.objects.get(name=name, postcode=postcode)\n except ObjectDoesNotExist:\n return self.cleaned_data\n\n if not self.current_school or self.current_school.id != school.id:\n raise forms.ValidationError(\n \"There is already a school or club registered with that name and postcode\")\n\n return self.cleaned_data\n\n def clean_postcode(self):\n postcode = self.cleaned_data.get('postcode', None)\n\n if postcode:\n if len(postcode.replace(' ', '')) > 10 or len(postcode.replace(' ', '')) == 0:\n raise forms.ValidationError(\"Please enter a valid postcode or ZIP code\")\n\n return postcode\n\n def clean_current_password(self):\n current_password = self.cleaned_data.get('current_password', None)\n if not self.user.check_password(current_password):\n raise forms.ValidationError(\"Your password was incorrect\")\n\n\nclass OrganisationJoinForm(forms.Form):\n fuzzy_name = forms.CharField(\n label=\"Search for school or club by name or postcode\",\n widget=forms.TextInput(\n attrs={'placeholder': \"Enrico Fermi High School\"}))\n\n # Note: the reason this is a CharField rather than a ChoiceField is to avoid having to\n # provide choices which was problematic given that the options are dynamically generated.\n chosen_org = forms.CharField(\n label='Select school or club',\n widget=forms.Select(attrs={'class': 'wide'}))\n\n def clean_chosen_org(self):\n chosen_org = self.cleaned_data.get('chosen_org', None)\n\n if chosen_org and not School.objects.filter(id=int(chosen_org)).exists():\n raise forms.ValidationError(\"That school or club was not recognised\")\n\n return chosen_org\n\n", "path": "portal/forms/organisation.py"}]} | 1,663 | 395 |
gh_patches_debug_20643 | rasdani/github-patches | git_diff | nvaccess__nvda-7494 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Nvda not announcing all the speeches when using the windows onecore synthesizer
I installed nvda next 14285 and noticed a problem using the windows onecore synthesizer.
By unchecking and selecting a checkbox several more times then nvda does not announce whether the box is unchecked or checked.
It is a problem in the windows onecore synthesizer because when using another synthesizer as nuance vocaliser exprecive and a voice sap5 the synthesizer announces quickly and accurately if the box has been selected or cleared.
Steps:
Go to a checkbox such as:
Save setting when exiting
Within the general options dialog of nvda.
Now press space to uncheck and check this box.
Do this several times.
The expected is that every time nvda announces the status of the box.
The thing is that the nvda announces some times the state of the box.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `source/synthDrivers/oneCore.py`
Content:
```
1 #synthDrivers/oneCore.py
2 #A part of NonVisual Desktop Access (NVDA)
3 #Copyright (C) 2016-2017 Tyler Spivey, NV Access Limited
4 #This file is covered by the GNU General Public License.
5 #See the file COPYING for more details.
6
7 """Synth driver for Windows OneCore voices.
8 """
9
10 import os
11 import sys
12 from collections import OrderedDict
13 import ctypes
14 import _winreg
15 from synthDriverHandler import SynthDriver, VoiceInfo
16 from logHandler import log
17 import config
18 import nvwave
19 import speech
20 import speechXml
21 import languageHandler
22 import winVersion
23 import NVDAHelper
24
25 SAMPLES_PER_SEC = 22050
26 BITS_PER_SAMPLE = 16
27 BYTES_PER_SEC = SAMPLES_PER_SEC * (BITS_PER_SAMPLE / 8)
28 #: The number of 100-nanosecond units in 1 second.
29 HUNDRED_NS_PER_SEC = 10000000 # 1000000000 ns per sec / 100 ns
30 WAV_HEADER_LEN = 44
31 ocSpeech_Callback = ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.c_int, ctypes.c_wchar_p)
32
33 class _OcSsmlConverter(speechXml.SsmlConverter):
34
35 def __init__(self, defaultLanguage, rate, pitch, volume):
36 super(_OcSsmlConverter, self).__init__(defaultLanguage)
37 self._rate = rate
38 self._pitch = pitch
39 self._volume = volume
40
41 def generateBalancerCommands(self, speechSequence):
42 commands = super(_OcSsmlConverter, self).generateBalancerCommands(speechSequence)
43 # The EncloseAllCommand from SSML must be first.
44 yield next(commands)
45 # OneCore doesn't provide a way to set base prosody values.
46 # Therefore, the base values need to be set using SSML.
47 yield self.convertRateCommand(speech.RateCommand(multiplier=1))
48 yield self.convertVolumeCommand(speech.VolumeCommand(multiplier=1))
49 yield self.convertPitchCommand(speech.PitchCommand(multiplier=1))
50 for command in commands:
51 yield command
52
53 def _convertProsody(self, command, attr, default, base):
54 if command.multiplier == 1 and base == default:
55 # Returning to synth default.
56 return speechXml.DelAttrCommand("prosody", attr)
57 else:
58 # Multiplication isn't supported, only addition/subtraction.
59 # The final value must therefore be relative to the synthesizer's default.
60 val = base * command.multiplier - default
61 return speechXml.SetAttrCommand("prosody", attr, "%d%%" % val)
62
63 def convertRateCommand(self, command):
64 return self._convertProsody(command, "rate", 50, self._rate)
65 def convertPitchCommand(self, command):
66 return self._convertProsody(command, "pitch", 50, self._pitch)
67 def convertVolumeCommand(self, command):
68 return self._convertProsody(command, "volume", 100, self._volume)
69
70 def convertCharacterModeCommand(self, command):
71 # OneCore's character speech sounds weird and doesn't support pitch alteration.
72 # Therefore, we don't use it.
73 return None
74
75 def convertLangChangeCommand(self, command):
76 lcid = languageHandler.localeNameToWindowsLCID(command.lang)
77 if lcid is languageHandler.LCID_NONE:
78 log.debugWarning("Invalid language: %s" % command.lang)
79 return None
80 return super(_OcSsmlConverter, self).convertLangChangeCommand(command)
81
82 class SynthDriver(SynthDriver):
83 name = "oneCore"
84 # Translators: Description for a speech synthesizer.
85 description = _("Windows OneCore voices")
86 supportedSettings = (
87 SynthDriver.VoiceSetting(),
88 SynthDriver.RateSetting(),
89 SynthDriver.PitchSetting(),
90 SynthDriver.VolumeSetting(),
91 )
92 # These are all controlled via SSML, so we only need attributes, not properties.
93 rate = None
94 pitch = None
95 volume = None
96
97 @classmethod
98 def check(cls):
99 if not hasattr(sys, "frozen"):
100 # #3793: Source copies don't report the correct version on Windows 10 because Python isn't manifested for higher versions.
101 # We want this driver to work for source copies on Windows 10, so just return True here.
102 # If this isn't in fact Windows 10, it will fail when constructed, which is okay.
103 return True
104 # For binary copies, only present this as an available synth if this is Windows 10.
105 return winVersion.winVersion.major >= 10
106
107 def __init__(self):
108 super(SynthDriver, self).__init__()
109 self._dll = NVDAHelper.getHelperLocalWin10Dll()
110 self._dll.ocSpeech_getCurrentVoiceLanguage.restype = ctypes.c_wchar_p
111 self._handle = self._dll.ocSpeech_initialize()
112 self._callbackInst = ocSpeech_Callback(self._callback)
113 self._dll.ocSpeech_setCallback(self._handle, self._callbackInst)
114 self._dll.ocSpeech_getVoices.restype = NVDAHelper.bstrReturn
115 self._dll.ocSpeech_getCurrentVoiceId.restype = ctypes.c_wchar_p
116 self._player = nvwave.WavePlayer(1, SAMPLES_PER_SEC, BITS_PER_SAMPLE, outputDevice=config.conf["speech"]["outputDevice"])
117 # Initialize state.
118 self._queuedSpeech = []
119 self._wasCancelled = False
120 self._isProcessing = False
121 # Set initial values for parameters that can't be queried.
122 # This initialises our cache for the value.
123 self.rate = 50
124 self.pitch = 50
125 self.volume = 100
126
127 def terminate(self):
128 super(SynthDriver, self).terminate()
129 self._dll.ocSpeech_terminate(self._handle)
130 # Drop the ctypes function instance for the callback,
131 # as it is holding a reference to an instance method, which causes a reference cycle.
132 self._callbackInst = None
133
134 def cancel(self):
135 # Set a flag to tell the callback not to push more audio.
136 self._wasCancelled = True
137 log.debug("Cancelling")
138 # There might be more text pending. Throw it away.
139 self._queuedSpeech = []
140 self._player.stop()
141
142 def speak(self, speechSequence):
143 conv = _OcSsmlConverter(self.language, self.rate, self.pitch, self.volume)
144 text = conv.convertToXml(speechSequence)
145 self._queueSpeech(text)
146
147 def _queueSpeech(self, item):
148 self._queuedSpeech.append(item)
149 # We only process the queue here if it isn't already being processed.
150 if not self._isProcessing:
151 self._processQueue()
152
153 def _processQueue(self):
154 if self._queuedSpeech:
155 item = self._queuedSpeech.pop(0)
156 self._wasCancelled = False
157 log.debug("Begin processing speech")
158 self._isProcessing = True
159 # ocSpeech_speak is async.
160 # It will call _callback in a background thread once done,
161 # which will eventually process the queue again.
162 self._dll.ocSpeech_speak(self._handle, item)
163 return
164 self._player.idle()
165 log.debug("Queue empty, done processing")
166 self._isProcessing = False
167
168 def _callback(self, bytes, len, markers):
169 if len == 0:
170 # The C++ code will log an error with details.
171 log.debugWarning("ocSpeech_speak failed!")
172 self._processQueue()
173 return
174 # This gets called in a background thread.
175 # Strip the wav header.
176 assert len > WAV_HEADER_LEN
177 bytes += WAV_HEADER_LEN
178 len -= WAV_HEADER_LEN
179 data = ctypes.string_at(bytes, len)
180 if markers:
181 markers = markers.split('|')
182 else:
183 markers = []
184 prevMarker = None
185 prevPos = 0
186
187 # Push audio up to each marker so we can sync the audio with the markers.
188 for marker in markers:
189 if self._wasCancelled:
190 break
191 name, pos = marker.split(':')
192 pos = int(pos)
193 # pos is a time offset in 100-nanosecond units.
194 # Convert this to a byte offset.
195 # Order the equation so we don't have to do floating point.
196 pos = pos * BYTES_PER_SEC / HUNDRED_NS_PER_SEC
197 # Push audio up to this marker.
198 self._player.feed(data[prevPos:pos])
199 # _player.feed blocks until the previous chunk of audio is complete, not the chunk we just pushed.
200 # Therefore, indicate that we've reached the previous marker.
201 if prevMarker:
202 self.lastIndex = prevMarker
203 prevMarker = int(name)
204 prevPos = pos
205 if self._wasCancelled:
206 log.debug("Cancelled, stopped pushing audio")
207 else:
208 self._player.feed(data[prevPos:])
209 if prevMarker:
210 self.lastIndex = prevMarker
211 log.debug("Done pushing audio")
212 self._processQueue()
213
214 def _getAvailableVoices(self, onlyValid=True):
215 voices = OrderedDict()
216 voicesStr = self._dll.ocSpeech_getVoices(self._handle).split('|')
217 for voiceStr in voicesStr:
218 id, name = voiceStr.split(":")
219 if onlyValid and not self._isVoiceValid(id):
220 continue
221 voices[id] = VoiceInfo(id, name)
222 return voices
223
224 def _isVoiceValid(self, id):
225 idParts = id.split('\\')
226 rootKey = getattr(_winreg, idParts[0])
227 subkey = "\\".join(idParts[1:])
228 try:
229 hkey = _winreg.OpenKey(rootKey, subkey)
230 except WindowsError as e:
231 log.debugWarning("Could not open registry key %s, %s" % (id, e))
232 return False
233 try:
234 langDataPath = _winreg.QueryValueEx(hkey, 'langDataPath')
235 except WindowsError as e:
236 log.debugWarning("Could not open registry value 'langDataPath', %s" % e)
237 return False
238 if not langDataPath or not isinstance(langDataPath[0], basestring):
239 log.debugWarning("Invalid langDataPath value")
240 return False
241 if not os.path.isfile(os.path.expandvars(langDataPath[0])):
242 log.debugWarning("Missing language data file: %s" % langDataPath[0])
243 return False
244 try:
245 voicePath = _winreg.QueryValueEx(hkey, 'voicePath')
246 except WindowsError as e:
247 log.debugWarning("Could not open registry value 'langDataPath', %s" % e)
248 return False
249 if not voicePath or not isinstance(voicePath[0],basestring):
250 log.debugWarning("Invalid voicePath value")
251 return False
252 if not os.path.isfile(os.path.expandvars(voicePath[0] + '.apm')):
253 log.debugWarning("Missing voice file: %s" % voicePath[0] + ".apm")
254 return False
255 return True
256
257 def _get_voice(self):
258 return self._dll.ocSpeech_getCurrentVoiceId(self._handle)
259
260 def _set_voice(self, id):
261 voices = self._getAvailableVoices(onlyValid=False)
262 for index, voice in enumerate(voices):
263 if voice == id:
264 break
265 else:
266 raise LookupError("No such voice: %s" % id)
267 self._dll.ocSpeech_setVoice(self._handle, index)
268
269 def _get_language(self):
270 return self._dll.ocSpeech_getCurrentVoiceLanguage(self._handle)
271
272 def pause(self, switch):
273 self._player.pause(switch)
274
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/source/synthDrivers/oneCore.py b/source/synthDrivers/oneCore.py
--- a/source/synthDrivers/oneCore.py
+++ b/source/synthDrivers/oneCore.py
@@ -161,7 +161,6 @@
# which will eventually process the queue again.
self._dll.ocSpeech_speak(self._handle, item)
return
- self._player.idle()
log.debug("Queue empty, done processing")
self._isProcessing = False
@@ -209,6 +208,13 @@
if prevMarker:
self.lastIndex = prevMarker
log.debug("Done pushing audio")
+ if not self._queuedSpeech:
+ # There are no more queued utterances at this point, so call idle.
+ # This blocks while waiting for the final chunk to play,
+ # so by the time this is done, there might be something queued.
+ # The call to _processQueue will take care of this.
+ log.debug("Calling idle on audio player")
+ self._player.idle()
self._processQueue()
def _getAvailableVoices(self, onlyValid=True):
| {"golden_diff": "diff --git a/source/synthDrivers/oneCore.py b/source/synthDrivers/oneCore.py\n--- a/source/synthDrivers/oneCore.py\n+++ b/source/synthDrivers/oneCore.py\n@@ -161,7 +161,6 @@\n \t\t\t# which will eventually process the queue again.\r\n \t\t\tself._dll.ocSpeech_speak(self._handle, item)\r\n \t\t\treturn\r\n-\t\tself._player.idle()\r\n \t\tlog.debug(\"Queue empty, done processing\")\r\n \t\tself._isProcessing = False\r\n \r\n@@ -209,6 +208,13 @@\n \t\t\tif prevMarker:\r\n \t\t\t\tself.lastIndex = prevMarker\r\n \t\t\tlog.debug(\"Done pushing audio\")\r\n+\t\t\tif not self._queuedSpeech:\r\n+\t\t\t\t# There are no more queued utterances at this point, so call idle.\r\n+\t\t\t\t# This blocks while waiting for the final chunk to play,\r\n+\t\t\t\t# so by the time this is done, there might be something queued.\r\n+\t\t\t\t# The call to _processQueue will take care of this.\r\n+\t\t\t\tlog.debug(\"Calling idle on audio player\")\r\n+\t\t\t\tself._player.idle()\r\n \t\tself._processQueue()\r\n \r\n \tdef _getAvailableVoices(self, onlyValid=True):\n", "issue": "Nvda not announcing all the speeches when using the windows onecore synthesizer\nI installed nvda next 14285 and noticed a problem using the windows onecore synthesizer.\r\nBy unchecking and selecting a checkbox several more times then nvda does not announce whether the box is unchecked or checked.\r\nIt is a problem in the windows onecore synthesizer because when using another synthesizer as nuance vocaliser exprecive and a voice sap5 the synthesizer announces quickly and accurately if the box has been selected or cleared.\r\nSteps:\r\nGo to a checkbox such as:\r\nSave setting when exiting\r\nWithin the general options dialog of nvda.\r\nNow press space to uncheck and check this box.\r\nDo this several times.\r\nThe expected is that every time nvda announces the status of the box.\r\nThe thing is that the nvda announces some times the state of the box.\n", "before_files": [{"content": "#synthDrivers/oneCore.py\r\n#A part of NonVisual Desktop Access (NVDA)\r\n#Copyright (C) 2016-2017 Tyler Spivey, NV Access Limited\r\n#This file is covered by the GNU General Public License.\r\n#See the file COPYING for more details.\r\n\r\n\"\"\"Synth driver for Windows OneCore voices.\r\n\"\"\"\r\n\r\nimport os\r\nimport sys\r\nfrom collections import OrderedDict\r\nimport ctypes\r\nimport _winreg\r\nfrom synthDriverHandler import SynthDriver, VoiceInfo\r\nfrom logHandler import log\r\nimport config\r\nimport nvwave\r\nimport speech\r\nimport speechXml\r\nimport languageHandler\r\nimport winVersion\r\nimport NVDAHelper\r\n\r\nSAMPLES_PER_SEC = 22050\r\nBITS_PER_SAMPLE = 16\r\nBYTES_PER_SEC = SAMPLES_PER_SEC * (BITS_PER_SAMPLE / 8)\r\n#: The number of 100-nanosecond units in 1 second.\r\nHUNDRED_NS_PER_SEC = 10000000 # 1000000000 ns per sec / 100 ns\r\nWAV_HEADER_LEN = 44\r\nocSpeech_Callback = ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.c_int, ctypes.c_wchar_p)\r\n\r\nclass _OcSsmlConverter(speechXml.SsmlConverter):\r\n\r\n\tdef __init__(self, defaultLanguage, rate, pitch, volume):\r\n\t\tsuper(_OcSsmlConverter, self).__init__(defaultLanguage)\r\n\t\tself._rate = rate\r\n\t\tself._pitch = pitch\r\n\t\tself._volume = volume\r\n\r\n\tdef generateBalancerCommands(self, speechSequence):\r\n\t\tcommands = super(_OcSsmlConverter, self).generateBalancerCommands(speechSequence)\r\n\t\t# The EncloseAllCommand from SSML must be first.\r\n\t\tyield next(commands)\r\n\t\t# OneCore doesn't provide a way to set base prosody values.\r\n\t\t# Therefore, the base values need to be set using SSML.\r\n\t\tyield self.convertRateCommand(speech.RateCommand(multiplier=1))\r\n\t\tyield self.convertVolumeCommand(speech.VolumeCommand(multiplier=1))\r\n\t\tyield self.convertPitchCommand(speech.PitchCommand(multiplier=1))\r\n\t\tfor command in commands:\r\n\t\t\tyield command\r\n\r\n\tdef _convertProsody(self, command, attr, default, base):\r\n\t\tif command.multiplier == 1 and base == default:\r\n\t\t\t# Returning to synth default.\r\n\t\t\treturn speechXml.DelAttrCommand(\"prosody\", attr)\r\n\t\telse:\r\n\t\t\t# Multiplication isn't supported, only addition/subtraction.\r\n\t\t\t# The final value must therefore be relative to the synthesizer's default.\r\n\t\t\tval = base * command.multiplier - default\r\n\t\t\treturn speechXml.SetAttrCommand(\"prosody\", attr, \"%d%%\" % val)\r\n\r\n\tdef convertRateCommand(self, command):\r\n\t\treturn self._convertProsody(command, \"rate\", 50, self._rate)\r\n\tdef convertPitchCommand(self, command):\r\n\t\treturn self._convertProsody(command, \"pitch\", 50, self._pitch)\r\n\tdef convertVolumeCommand(self, command):\r\n\t\treturn self._convertProsody(command, \"volume\", 100, self._volume)\r\n\r\n\tdef convertCharacterModeCommand(self, command):\r\n\t\t# OneCore's character speech sounds weird and doesn't support pitch alteration.\r\n\t\t# Therefore, we don't use it.\r\n\t\treturn None\r\n\r\n\tdef convertLangChangeCommand(self, command):\r\n\t\tlcid = languageHandler.localeNameToWindowsLCID(command.lang)\r\n\t\tif lcid is languageHandler.LCID_NONE:\r\n\t\t\tlog.debugWarning(\"Invalid language: %s\" % command.lang)\r\n\t\t\treturn None\r\n\t\treturn super(_OcSsmlConverter, self).convertLangChangeCommand(command)\r\n\r\nclass SynthDriver(SynthDriver):\r\n\tname = \"oneCore\"\r\n\t# Translators: Description for a speech synthesizer.\r\n\tdescription = _(\"Windows OneCore voices\")\r\n\tsupportedSettings = (\r\n\t\tSynthDriver.VoiceSetting(),\r\n\t\tSynthDriver.RateSetting(),\r\n\t\tSynthDriver.PitchSetting(),\r\n\t\tSynthDriver.VolumeSetting(),\r\n\t)\r\n\t# These are all controlled via SSML, so we only need attributes, not properties.\r\n\trate = None\r\n\tpitch = None\r\n\tvolume = None\r\n\r\n\t@classmethod\r\n\tdef check(cls):\r\n\t\tif not hasattr(sys, \"frozen\"):\r\n\t\t\t# #3793: Source copies don't report the correct version on Windows 10 because Python isn't manifested for higher versions.\r\n\t\t\t# We want this driver to work for source copies on Windows 10, so just return True here.\r\n\t\t\t# If this isn't in fact Windows 10, it will fail when constructed, which is okay.\r\n\t\t\treturn True\r\n\t\t# For binary copies, only present this as an available synth if this is Windows 10.\r\n\t\treturn winVersion.winVersion.major >= 10\r\n\r\n\tdef __init__(self):\r\n\t\tsuper(SynthDriver, self).__init__()\r\n\t\tself._dll = NVDAHelper.getHelperLocalWin10Dll()\r\n\t\tself._dll.ocSpeech_getCurrentVoiceLanguage.restype = ctypes.c_wchar_p\r\n\t\tself._handle = self._dll.ocSpeech_initialize()\r\n\t\tself._callbackInst = ocSpeech_Callback(self._callback)\r\n\t\tself._dll.ocSpeech_setCallback(self._handle, self._callbackInst)\r\n\t\tself._dll.ocSpeech_getVoices.restype = NVDAHelper.bstrReturn\r\n\t\tself._dll.ocSpeech_getCurrentVoiceId.restype = ctypes.c_wchar_p\r\n\t\tself._player = nvwave.WavePlayer(1, SAMPLES_PER_SEC, BITS_PER_SAMPLE, outputDevice=config.conf[\"speech\"][\"outputDevice\"])\r\n\t\t# Initialize state.\r\n\t\tself._queuedSpeech = []\r\n\t\tself._wasCancelled = False\r\n\t\tself._isProcessing = False\r\n\t\t# Set initial values for parameters that can't be queried.\r\n\t\t# This initialises our cache for the value.\r\n\t\tself.rate = 50\r\n\t\tself.pitch = 50\r\n\t\tself.volume = 100\r\n\r\n\tdef terminate(self):\r\n\t\tsuper(SynthDriver, self).terminate()\r\n\t\tself._dll.ocSpeech_terminate(self._handle)\r\n\t\t# Drop the ctypes function instance for the callback,\r\n\t\t# as it is holding a reference to an instance method, which causes a reference cycle.\r\n\t\tself._callbackInst = None\r\n\r\n\tdef cancel(self):\r\n\t\t# Set a flag to tell the callback not to push more audio.\r\n\t\tself._wasCancelled = True\r\n\t\tlog.debug(\"Cancelling\")\r\n\t\t# There might be more text pending. Throw it away.\r\n\t\tself._queuedSpeech = []\r\n\t\tself._player.stop()\r\n\r\n\tdef speak(self, speechSequence):\r\n\t\tconv = _OcSsmlConverter(self.language, self.rate, self.pitch, self.volume)\r\n\t\ttext = conv.convertToXml(speechSequence)\r\n\t\tself._queueSpeech(text)\r\n\r\n\tdef _queueSpeech(self, item):\r\n\t\tself._queuedSpeech.append(item)\r\n\t\t# We only process the queue here if it isn't already being processed.\r\n\t\tif not self._isProcessing:\r\n\t\t\tself._processQueue()\r\n\r\n\tdef _processQueue(self):\r\n\t\tif self._queuedSpeech:\r\n\t\t\titem = self._queuedSpeech.pop(0)\r\n\t\t\tself._wasCancelled = False\r\n\t\t\tlog.debug(\"Begin processing speech\")\r\n\t\t\tself._isProcessing = True\r\n\t\t\t# ocSpeech_speak is async.\r\n\t\t\t# It will call _callback in a background thread once done,\r\n\t\t\t# which will eventually process the queue again.\r\n\t\t\tself._dll.ocSpeech_speak(self._handle, item)\r\n\t\t\treturn\r\n\t\tself._player.idle()\r\n\t\tlog.debug(\"Queue empty, done processing\")\r\n\t\tself._isProcessing = False\r\n\r\n\tdef _callback(self, bytes, len, markers):\r\n\t\tif len == 0:\r\n\t\t\t# The C++ code will log an error with details.\r\n\t\t\tlog.debugWarning(\"ocSpeech_speak failed!\")\r\n\t\t\tself._processQueue()\r\n\t\t\treturn\r\n\t\t# This gets called in a background thread.\r\n\t\t# Strip the wav header.\r\n\t\tassert len > WAV_HEADER_LEN\r\n\t\tbytes += WAV_HEADER_LEN\r\n\t\tlen -= WAV_HEADER_LEN\r\n\t\tdata = ctypes.string_at(bytes, len)\r\n\t\tif markers:\r\n\t\t\tmarkers = markers.split('|')\r\n\t\telse:\r\n\t\t\tmarkers = []\r\n\t\tprevMarker = None\r\n\t\tprevPos = 0\r\n\r\n\t\t# Push audio up to each marker so we can sync the audio with the markers.\r\n\t\tfor marker in markers:\r\n\t\t\tif self._wasCancelled:\r\n\t\t\t\tbreak\r\n\t\t\tname, pos = marker.split(':')\r\n\t\t\tpos = int(pos)\r\n\t\t\t# pos is a time offset in 100-nanosecond units.\r\n\t\t\t# Convert this to a byte offset.\r\n\t\t\t# Order the equation so we don't have to do floating point.\r\n\t\t\tpos = pos * BYTES_PER_SEC / HUNDRED_NS_PER_SEC\r\n\t\t\t# Push audio up to this marker.\r\n\t\t\tself._player.feed(data[prevPos:pos])\r\n\t\t\t# _player.feed blocks until the previous chunk of audio is complete, not the chunk we just pushed.\r\n\t\t\t# Therefore, indicate that we've reached the previous marker.\r\n\t\t\tif prevMarker:\r\n\t\t\t\tself.lastIndex = prevMarker\r\n\t\t\tprevMarker = int(name)\r\n\t\t\tprevPos = pos\r\n\t\tif self._wasCancelled:\r\n\t\t\tlog.debug(\"Cancelled, stopped pushing audio\")\r\n\t\telse:\r\n\t\t\tself._player.feed(data[prevPos:])\r\n\t\t\tif prevMarker:\r\n\t\t\t\tself.lastIndex = prevMarker\r\n\t\t\tlog.debug(\"Done pushing audio\")\r\n\t\tself._processQueue()\r\n\r\n\tdef _getAvailableVoices(self, onlyValid=True):\r\n\t\tvoices = OrderedDict()\r\n\t\tvoicesStr = self._dll.ocSpeech_getVoices(self._handle).split('|')\r\n\t\tfor voiceStr in voicesStr:\r\n\t\t\tid, name = voiceStr.split(\":\")\r\n\t\t\tif onlyValid and not self._isVoiceValid(id):\r\n\t\t\t\tcontinue\r\n\t\t\tvoices[id] = VoiceInfo(id, name)\r\n\t\treturn voices\r\n\r\n\tdef _isVoiceValid(self, id):\r\n\t\tidParts = id.split('\\\\')\r\n\t\trootKey = getattr(_winreg, idParts[0])\r\n\t\tsubkey = \"\\\\\".join(idParts[1:])\r\n\t\ttry:\r\n\t\t\thkey = _winreg.OpenKey(rootKey, subkey)\r\n\t\texcept WindowsError as e:\r\n\t\t\tlog.debugWarning(\"Could not open registry key %s, %s\" % (id, e))\r\n\t\t\treturn False\r\n\t\ttry:\r\n\t\t\tlangDataPath = _winreg.QueryValueEx(hkey, 'langDataPath')\r\n\t\texcept WindowsError as e:\r\n\t\t\tlog.debugWarning(\"Could not open registry value 'langDataPath', %s\" % e)\r\n\t\t\treturn False\r\n\t\tif not langDataPath or not isinstance(langDataPath[0], basestring):\r\n\t\t\tlog.debugWarning(\"Invalid langDataPath value\")\r\n\t\t\treturn False\r\n\t\tif not os.path.isfile(os.path.expandvars(langDataPath[0])):\r\n\t\t\tlog.debugWarning(\"Missing language data file: %s\" % langDataPath[0])\r\n\t\t\treturn False\r\n\t\ttry:\r\n\t\t\tvoicePath = _winreg.QueryValueEx(hkey, 'voicePath')\r\n\t\texcept WindowsError as e:\r\n\t\t\tlog.debugWarning(\"Could not open registry value 'langDataPath', %s\" % e)\r\n\t\t\treturn False\r\n\t\tif not voicePath or not isinstance(voicePath[0],basestring):\r\n\t\t\tlog.debugWarning(\"Invalid voicePath value\")\r\n\t\t\treturn False\r\n\t\tif not os.path.isfile(os.path.expandvars(voicePath[0] + '.apm')):\r\n\t\t\tlog.debugWarning(\"Missing voice file: %s\" % voicePath[0] + \".apm\")\r\n\t\t\treturn False\r\n\t\treturn True\r\n\r\n\tdef _get_voice(self):\r\n\t\treturn self._dll.ocSpeech_getCurrentVoiceId(self._handle)\r\n\r\n\tdef _set_voice(self, id):\r\n\t\tvoices = self._getAvailableVoices(onlyValid=False)\r\n\t\tfor index, voice in enumerate(voices):\r\n\t\t\tif voice == id:\r\n\t\t\t\tbreak\r\n\t\telse:\r\n\t\t\traise LookupError(\"No such voice: %s\" % id)\r\n\t\tself._dll.ocSpeech_setVoice(self._handle, index)\r\n\r\n\tdef _get_language(self):\r\n\t\treturn self._dll.ocSpeech_getCurrentVoiceLanguage(self._handle)\r\n\r\n\tdef pause(self, switch):\r\n\t\tself._player.pause(switch)\r\n", "path": "source/synthDrivers/oneCore.py"}], "after_files": [{"content": "#synthDrivers/oneCore.py\r\n#A part of NonVisual Desktop Access (NVDA)\r\n#Copyright (C) 2016-2017 Tyler Spivey, NV Access Limited\r\n#This file is covered by the GNU General Public License.\r\n#See the file COPYING for more details.\r\n\r\n\"\"\"Synth driver for Windows OneCore voices.\r\n\"\"\"\r\n\r\nimport os\r\nimport sys\r\nfrom collections import OrderedDict\r\nimport ctypes\r\nimport _winreg\r\nfrom synthDriverHandler import SynthDriver, VoiceInfo\r\nfrom logHandler import log\r\nimport config\r\nimport nvwave\r\nimport speech\r\nimport speechXml\r\nimport languageHandler\r\nimport winVersion\r\nimport NVDAHelper\r\n\r\nSAMPLES_PER_SEC = 22050\r\nBITS_PER_SAMPLE = 16\r\nBYTES_PER_SEC = SAMPLES_PER_SEC * (BITS_PER_SAMPLE / 8)\r\n#: The number of 100-nanosecond units in 1 second.\r\nHUNDRED_NS_PER_SEC = 10000000 # 1000000000 ns per sec / 100 ns\r\nWAV_HEADER_LEN = 44\r\nocSpeech_Callback = ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.c_int, ctypes.c_wchar_p)\r\n\r\nclass _OcSsmlConverter(speechXml.SsmlConverter):\r\n\r\n\tdef __init__(self, defaultLanguage, rate, pitch, volume):\r\n\t\tsuper(_OcSsmlConverter, self).__init__(defaultLanguage)\r\n\t\tself._rate = rate\r\n\t\tself._pitch = pitch\r\n\t\tself._volume = volume\r\n\r\n\tdef generateBalancerCommands(self, speechSequence):\r\n\t\tcommands = super(_OcSsmlConverter, self).generateBalancerCommands(speechSequence)\r\n\t\t# The EncloseAllCommand from SSML must be first.\r\n\t\tyield next(commands)\r\n\t\t# OneCore doesn't provide a way to set base prosody values.\r\n\t\t# Therefore, the base values need to be set using SSML.\r\n\t\tyield self.convertRateCommand(speech.RateCommand(multiplier=1))\r\n\t\tyield self.convertVolumeCommand(speech.VolumeCommand(multiplier=1))\r\n\t\tyield self.convertPitchCommand(speech.PitchCommand(multiplier=1))\r\n\t\tfor command in commands:\r\n\t\t\tyield command\r\n\r\n\tdef _convertProsody(self, command, attr, default, base):\r\n\t\tif command.multiplier == 1 and base == default:\r\n\t\t\t# Returning to synth default.\r\n\t\t\treturn speechXml.DelAttrCommand(\"prosody\", attr)\r\n\t\telse:\r\n\t\t\t# Multiplication isn't supported, only addition/subtraction.\r\n\t\t\t# The final value must therefore be relative to the synthesizer's default.\r\n\t\t\tval = base * command.multiplier - default\r\n\t\t\treturn speechXml.SetAttrCommand(\"prosody\", attr, \"%d%%\" % val)\r\n\r\n\tdef convertRateCommand(self, command):\r\n\t\treturn self._convertProsody(command, \"rate\", 50, self._rate)\r\n\tdef convertPitchCommand(self, command):\r\n\t\treturn self._convertProsody(command, \"pitch\", 50, self._pitch)\r\n\tdef convertVolumeCommand(self, command):\r\n\t\treturn self._convertProsody(command, \"volume\", 100, self._volume)\r\n\r\n\tdef convertCharacterModeCommand(self, command):\r\n\t\t# OneCore's character speech sounds weird and doesn't support pitch alteration.\r\n\t\t# Therefore, we don't use it.\r\n\t\treturn None\r\n\r\n\tdef convertLangChangeCommand(self, command):\r\n\t\tlcid = languageHandler.localeNameToWindowsLCID(command.lang)\r\n\t\tif lcid is languageHandler.LCID_NONE:\r\n\t\t\tlog.debugWarning(\"Invalid language: %s\" % command.lang)\r\n\t\t\treturn None\r\n\t\treturn super(_OcSsmlConverter, self).convertLangChangeCommand(command)\r\n\r\nclass SynthDriver(SynthDriver):\r\n\tname = \"oneCore\"\r\n\t# Translators: Description for a speech synthesizer.\r\n\tdescription = _(\"Windows OneCore voices\")\r\n\tsupportedSettings = (\r\n\t\tSynthDriver.VoiceSetting(),\r\n\t\tSynthDriver.RateSetting(),\r\n\t\tSynthDriver.PitchSetting(),\r\n\t\tSynthDriver.VolumeSetting(),\r\n\t)\r\n\t# These are all controlled via SSML, so we only need attributes, not properties.\r\n\trate = None\r\n\tpitch = None\r\n\tvolume = None\r\n\r\n\t@classmethod\r\n\tdef check(cls):\r\n\t\tif not hasattr(sys, \"frozen\"):\r\n\t\t\t# #3793: Source copies don't report the correct version on Windows 10 because Python isn't manifested for higher versions.\r\n\t\t\t# We want this driver to work for source copies on Windows 10, so just return True here.\r\n\t\t\t# If this isn't in fact Windows 10, it will fail when constructed, which is okay.\r\n\t\t\treturn True\r\n\t\t# For binary copies, only present this as an available synth if this is Windows 10.\r\n\t\treturn winVersion.winVersion.major >= 10\r\n\r\n\tdef __init__(self):\r\n\t\tsuper(SynthDriver, self).__init__()\r\n\t\tself._dll = NVDAHelper.getHelperLocalWin10Dll()\r\n\t\tself._dll.ocSpeech_getCurrentVoiceLanguage.restype = ctypes.c_wchar_p\r\n\t\tself._handle = self._dll.ocSpeech_initialize()\r\n\t\tself._callbackInst = ocSpeech_Callback(self._callback)\r\n\t\tself._dll.ocSpeech_setCallback(self._handle, self._callbackInst)\r\n\t\tself._dll.ocSpeech_getVoices.restype = NVDAHelper.bstrReturn\r\n\t\tself._dll.ocSpeech_getCurrentVoiceId.restype = ctypes.c_wchar_p\r\n\t\tself._player = nvwave.WavePlayer(1, SAMPLES_PER_SEC, BITS_PER_SAMPLE, outputDevice=config.conf[\"speech\"][\"outputDevice\"])\r\n\t\t# Initialize state.\r\n\t\tself._queuedSpeech = []\r\n\t\tself._wasCancelled = False\r\n\t\tself._isProcessing = False\r\n\t\t# Set initial values for parameters that can't be queried.\r\n\t\t# This initialises our cache for the value.\r\n\t\tself.rate = 50\r\n\t\tself.pitch = 50\r\n\t\tself.volume = 100\r\n\r\n\tdef terminate(self):\r\n\t\tsuper(SynthDriver, self).terminate()\r\n\t\tself._dll.ocSpeech_terminate(self._handle)\r\n\t\t# Drop the ctypes function instance for the callback,\r\n\t\t# as it is holding a reference to an instance method, which causes a reference cycle.\r\n\t\tself._callbackInst = None\r\n\r\n\tdef cancel(self):\r\n\t\t# Set a flag to tell the callback not to push more audio.\r\n\t\tself._wasCancelled = True\r\n\t\tlog.debug(\"Cancelling\")\r\n\t\t# There might be more text pending. Throw it away.\r\n\t\tself._queuedSpeech = []\r\n\t\tself._player.stop()\r\n\r\n\tdef speak(self, speechSequence):\r\n\t\tconv = _OcSsmlConverter(self.language, self.rate, self.pitch, self.volume)\r\n\t\ttext = conv.convertToXml(speechSequence)\r\n\t\tself._queueSpeech(text)\r\n\r\n\tdef _queueSpeech(self, item):\r\n\t\tself._queuedSpeech.append(item)\r\n\t\t# We only process the queue here if it isn't already being processed.\r\n\t\tif not self._isProcessing:\r\n\t\t\tself._processQueue()\r\n\r\n\tdef _processQueue(self):\r\n\t\tif self._queuedSpeech:\r\n\t\t\titem = self._queuedSpeech.pop(0)\r\n\t\t\tself._wasCancelled = False\r\n\t\t\tlog.debug(\"Begin processing speech\")\r\n\t\t\tself._isProcessing = True\r\n\t\t\t# ocSpeech_speak is async.\r\n\t\t\t# It will call _callback in a background thread once done,\r\n\t\t\t# which will eventually process the queue again.\r\n\t\t\tself._dll.ocSpeech_speak(self._handle, item)\r\n\t\t\treturn\r\n\t\tlog.debug(\"Queue empty, done processing\")\r\n\t\tself._isProcessing = False\r\n\r\n\tdef _callback(self, bytes, len, markers):\r\n\t\tif len == 0:\r\n\t\t\t# The C++ code will log an error with details.\r\n\t\t\tlog.debugWarning(\"ocSpeech_speak failed!\")\r\n\t\t\tself._processQueue()\r\n\t\t\treturn\r\n\t\t# This gets called in a background thread.\r\n\t\t# Strip the wav header.\r\n\t\tassert len > WAV_HEADER_LEN\r\n\t\tbytes += WAV_HEADER_LEN\r\n\t\tlen -= WAV_HEADER_LEN\r\n\t\tdata = ctypes.string_at(bytes, len)\r\n\t\tif markers:\r\n\t\t\tmarkers = markers.split('|')\r\n\t\telse:\r\n\t\t\tmarkers = []\r\n\t\tprevMarker = None\r\n\t\tprevPos = 0\r\n\r\n\t\t# Push audio up to each marker so we can sync the audio with the markers.\r\n\t\tfor marker in markers:\r\n\t\t\tif self._wasCancelled:\r\n\t\t\t\tbreak\r\n\t\t\tname, pos = marker.split(':')\r\n\t\t\tpos = int(pos)\r\n\t\t\t# pos is a time offset in 100-nanosecond units.\r\n\t\t\t# Convert this to a byte offset.\r\n\t\t\t# Order the equation so we don't have to do floating point.\r\n\t\t\tpos = pos * BYTES_PER_SEC / HUNDRED_NS_PER_SEC\r\n\t\t\t# Push audio up to this marker.\r\n\t\t\tself._player.feed(data[prevPos:pos])\r\n\t\t\t# _player.feed blocks until the previous chunk of audio is complete, not the chunk we just pushed.\r\n\t\t\t# Therefore, indicate that we've reached the previous marker.\r\n\t\t\tif prevMarker:\r\n\t\t\t\tself.lastIndex = prevMarker\r\n\t\t\tprevMarker = int(name)\r\n\t\t\tprevPos = pos\r\n\t\tif self._wasCancelled:\r\n\t\t\tlog.debug(\"Cancelled, stopped pushing audio\")\r\n\t\telse:\r\n\t\t\tself._player.feed(data[prevPos:])\r\n\t\t\tif prevMarker:\r\n\t\t\t\tself.lastIndex = prevMarker\r\n\t\t\tlog.debug(\"Done pushing audio\")\r\n\t\t\tif not self._queuedSpeech:\r\n\t\t\t\t# There are no more queued utterances at this point, so call idle.\r\n\t\t\t\t# This blocks while waiting for the final chunk to play,\r\n\t\t\t\t# so by the time this is done, there might be something queued.\r\n\t\t\t\t# The call to _processQueue will take care of this.\r\n\t\t\t\tlog.debug(\"Calling idle on audio player\")\r\n\t\t\t\tself._player.idle()\r\n\t\tself._processQueue()\r\n\r\n\tdef _getAvailableVoices(self, onlyValid=True):\r\n\t\tvoices = OrderedDict()\r\n\t\tvoicesStr = self._dll.ocSpeech_getVoices(self._handle).split('|')\r\n\t\tfor voiceStr in voicesStr:\r\n\t\t\tid, name = voiceStr.split(\":\")\r\n\t\t\tif onlyValid and not self._isVoiceValid(id):\r\n\t\t\t\tcontinue\r\n\t\t\tvoices[id] = VoiceInfo(id, name)\r\n\t\treturn voices\r\n\r\n\tdef _isVoiceValid(self, id):\r\n\t\tidParts = id.split('\\\\')\r\n\t\trootKey = getattr(_winreg, idParts[0])\r\n\t\tsubkey = \"\\\\\".join(idParts[1:])\r\n\t\ttry:\r\n\t\t\thkey = _winreg.OpenKey(rootKey, subkey)\r\n\t\texcept WindowsError as e:\r\n\t\t\tlog.debugWarning(\"Could not open registry key %s, %s\" % (id, e))\r\n\t\t\treturn False\r\n\t\ttry:\r\n\t\t\tlangDataPath = _winreg.QueryValueEx(hkey, 'langDataPath')\r\n\t\texcept WindowsError as e:\r\n\t\t\tlog.debugWarning(\"Could not open registry value 'langDataPath', %s\" % e)\r\n\t\t\treturn False\r\n\t\tif not langDataPath or not isinstance(langDataPath[0], basestring):\r\n\t\t\tlog.debugWarning(\"Invalid langDataPath value\")\r\n\t\t\treturn False\r\n\t\tif not os.path.isfile(os.path.expandvars(langDataPath[0])):\r\n\t\t\tlog.debugWarning(\"Missing language data file: %s\" % langDataPath[0])\r\n\t\t\treturn False\r\n\t\ttry:\r\n\t\t\tvoicePath = _winreg.QueryValueEx(hkey, 'voicePath')\r\n\t\texcept WindowsError as e:\r\n\t\t\tlog.debugWarning(\"Could not open registry value 'langDataPath', %s\" % e)\r\n\t\t\treturn False\r\n\t\tif not voicePath or not isinstance(voicePath[0],basestring):\r\n\t\t\tlog.debugWarning(\"Invalid voicePath value\")\r\n\t\t\treturn False\r\n\t\tif not os.path.isfile(os.path.expandvars(voicePath[0] + '.apm')):\r\n\t\t\tlog.debugWarning(\"Missing voice file: %s\" % voicePath[0] + \".apm\")\r\n\t\t\treturn False\r\n\t\treturn True\r\n\r\n\tdef _get_voice(self):\r\n\t\treturn self._dll.ocSpeech_getCurrentVoiceId(self._handle)\r\n\r\n\tdef _set_voice(self, id):\r\n\t\tvoices = self._getAvailableVoices(onlyValid=False)\r\n\t\tfor index, voice in enumerate(voices):\r\n\t\t\tif voice == id:\r\n\t\t\t\tbreak\r\n\t\telse:\r\n\t\t\traise LookupError(\"No such voice: %s\" % id)\r\n\t\tself._dll.ocSpeech_setVoice(self._handle, index)\r\n\r\n\tdef _get_language(self):\r\n\t\treturn self._dll.ocSpeech_getCurrentVoiceLanguage(self._handle)\r\n\r\n\tdef pause(self, switch):\r\n\t\tself._player.pause(switch)\r\n", "path": "source/synthDrivers/oneCore.py"}]} | 3,820 | 265 |
gh_patches_debug_1263 | rasdani/github-patches | git_diff | aws__aws-cli-2892 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
- Support use of colorama up to 0.3.8
+ colorama bugfix release 0.3.8 is available and contains no incompatible
changes. There is no need to restrict use to less or equal 0.3.7
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 import codecs
3 import os.path
4 import re
5 import sys
6
7 from setuptools import setup, find_packages
8
9
10 here = os.path.abspath(os.path.dirname(__file__))
11
12
13 def read(*parts):
14 return codecs.open(os.path.join(here, *parts), 'r').read()
15
16
17 def find_version(*file_paths):
18 version_file = read(*file_paths)
19 version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
20 version_file, re.M)
21 if version_match:
22 return version_match.group(1)
23 raise RuntimeError("Unable to find version string.")
24
25
26 requires = ['botocore==1.10.19',
27 'colorama>=0.2.5,<=0.3.7',
28 'docutils>=0.10',
29 'rsa>=3.1.2,<=3.5.0',
30 's3transfer>=0.1.12,<0.2.0',
31 'PyYAML>=3.10,<=3.12']
32
33
34 if sys.version_info[:2] == (2, 6):
35 # For python2.6 we have to require argparse since it
36 # was not in stdlib until 2.7.
37 requires.append('argparse>=1.1')
38
39
40 setup_options = dict(
41 name='awscli',
42 version=find_version("awscli", "__init__.py"),
43 description='Universal Command Line Environment for AWS.',
44 long_description=open('README.rst').read(),
45 author='Amazon Web Services',
46 url='http://aws.amazon.com/cli/',
47 scripts=['bin/aws', 'bin/aws.cmd',
48 'bin/aws_completer', 'bin/aws_zsh_completer.sh',
49 'bin/aws_bash_completer'],
50 packages=find_packages(exclude=['tests*']),
51 package_data={'awscli': ['data/*.json', 'examples/*/*.rst',
52 'examples/*/*/*.rst', 'topics/*.rst',
53 'topics/*.json']},
54 install_requires=requires,
55 extras_require={
56 ':python_version=="2.6"': [
57 'argparse>=1.1',
58 ]
59 },
60 license="Apache License 2.0",
61 classifiers=(
62 'Development Status :: 5 - Production/Stable',
63 'Intended Audience :: Developers',
64 'Intended Audience :: System Administrators',
65 'Natural Language :: English',
66 'License :: OSI Approved :: Apache Software License',
67 'Programming Language :: Python',
68 'Programming Language :: Python :: 2.6',
69 'Programming Language :: Python :: 2.7',
70 'Programming Language :: Python :: 3',
71 'Programming Language :: Python :: 3.3',
72 'Programming Language :: Python :: 3.4',
73 'Programming Language :: Python :: 3.5',
74 'Programming Language :: Python :: 3.6',
75 ),
76 )
77
78 if 'py2exe' in sys.argv:
79 # This will actually give us a py2exe command.
80 import py2exe
81 # And we have some py2exe specific options.
82 setup_options['options'] = {
83 'py2exe': {
84 'optimize': 0,
85 'skip_archive': True,
86 'dll_excludes': ['crypt32.dll'],
87 'packages': ['docutils', 'urllib', 'httplib', 'HTMLParser',
88 'awscli', 'ConfigParser', 'xml.etree', 'pipes'],
89 }
90 }
91 setup_options['console'] = ['bin/aws']
92
93
94 setup(**setup_options)
95
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -24,7 +24,7 @@
requires = ['botocore==1.10.19',
- 'colorama>=0.2.5,<=0.3.7',
+ 'colorama>=0.2.5,<=0.3.9',
'docutils>=0.10',
'rsa>=3.1.2,<=3.5.0',
's3transfer>=0.1.12,<0.2.0',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -24,7 +24,7 @@\n \n \n requires = ['botocore==1.10.19',\n- 'colorama>=0.2.5,<=0.3.7',\n+ 'colorama>=0.2.5,<=0.3.9',\n 'docutils>=0.10',\n 'rsa>=3.1.2,<=3.5.0',\n 's3transfer>=0.1.12,<0.2.0',\n", "issue": "- Support use of colorama up to 0.3.8\n + colorama bugfix release 0.3.8 is available and contains no incompatible\r\n changes. There is no need to restrict use to less or equal 0.3.7\n", "before_files": [{"content": "#!/usr/bin/env python\nimport codecs\nimport os.path\nimport re\nimport sys\n\nfrom setuptools import setup, find_packages\n\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\ndef read(*parts):\n return codecs.open(os.path.join(here, *parts), 'r').read()\n\n\ndef find_version(*file_paths):\n version_file = read(*file_paths)\n version_match = re.search(r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\",\n version_file, re.M)\n if version_match:\n return version_match.group(1)\n raise RuntimeError(\"Unable to find version string.\")\n\n\nrequires = ['botocore==1.10.19',\n 'colorama>=0.2.5,<=0.3.7',\n 'docutils>=0.10',\n 'rsa>=3.1.2,<=3.5.0',\n 's3transfer>=0.1.12,<0.2.0',\n 'PyYAML>=3.10,<=3.12']\n\n\nif sys.version_info[:2] == (2, 6):\n # For python2.6 we have to require argparse since it\n # was not in stdlib until 2.7.\n requires.append('argparse>=1.1')\n\n\nsetup_options = dict(\n name='awscli',\n version=find_version(\"awscli\", \"__init__.py\"),\n description='Universal Command Line Environment for AWS.',\n long_description=open('README.rst').read(),\n author='Amazon Web Services',\n url='http://aws.amazon.com/cli/',\n scripts=['bin/aws', 'bin/aws.cmd',\n 'bin/aws_completer', 'bin/aws_zsh_completer.sh',\n 'bin/aws_bash_completer'],\n packages=find_packages(exclude=['tests*']),\n package_data={'awscli': ['data/*.json', 'examples/*/*.rst',\n 'examples/*/*/*.rst', 'topics/*.rst',\n 'topics/*.json']},\n install_requires=requires,\n extras_require={\n ':python_version==\"2.6\"': [\n 'argparse>=1.1',\n ]\n },\n license=\"Apache License 2.0\",\n classifiers=(\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Natural Language :: English',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ),\n)\n\nif 'py2exe' in sys.argv:\n # This will actually give us a py2exe command.\n import py2exe\n # And we have some py2exe specific options.\n setup_options['options'] = {\n 'py2exe': {\n 'optimize': 0,\n 'skip_archive': True,\n 'dll_excludes': ['crypt32.dll'],\n 'packages': ['docutils', 'urllib', 'httplib', 'HTMLParser',\n 'awscli', 'ConfigParser', 'xml.etree', 'pipes'],\n }\n }\n setup_options['console'] = ['bin/aws']\n\n\nsetup(**setup_options)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\nimport codecs\nimport os.path\nimport re\nimport sys\n\nfrom setuptools import setup, find_packages\n\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\ndef read(*parts):\n return codecs.open(os.path.join(here, *parts), 'r').read()\n\n\ndef find_version(*file_paths):\n version_file = read(*file_paths)\n version_match = re.search(r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\",\n version_file, re.M)\n if version_match:\n return version_match.group(1)\n raise RuntimeError(\"Unable to find version string.\")\n\n\nrequires = ['botocore==1.10.19',\n 'colorama>=0.2.5,<=0.3.9',\n 'docutils>=0.10',\n 'rsa>=3.1.2,<=3.5.0',\n 's3transfer>=0.1.12,<0.2.0',\n 'PyYAML>=3.10,<=3.12']\n\n\nif sys.version_info[:2] == (2, 6):\n # For python2.6 we have to require argparse since it\n # was not in stdlib until 2.7.\n requires.append('argparse>=1.1')\n\n\nsetup_options = dict(\n name='awscli',\n version=find_version(\"awscli\", \"__init__.py\"),\n description='Universal Command Line Environment for AWS.',\n long_description=open('README.rst').read(),\n author='Amazon Web Services',\n url='http://aws.amazon.com/cli/',\n scripts=['bin/aws', 'bin/aws.cmd',\n 'bin/aws_completer', 'bin/aws_zsh_completer.sh',\n 'bin/aws_bash_completer'],\n packages=find_packages(exclude=['tests*']),\n package_data={'awscli': ['data/*.json', 'examples/*/*.rst',\n 'examples/*/*/*.rst', 'topics/*.rst',\n 'topics/*.json']},\n install_requires=requires,\n extras_require={\n ':python_version==\"2.6\"': [\n 'argparse>=1.1',\n ]\n },\n license=\"Apache License 2.0\",\n classifiers=(\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Natural Language :: English',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ),\n)\n\nif 'py2exe' in sys.argv:\n # This will actually give us a py2exe command.\n import py2exe\n # And we have some py2exe specific options.\n setup_options['options'] = {\n 'py2exe': {\n 'optimize': 0,\n 'skip_archive': True,\n 'dll_excludes': ['crypt32.dll'],\n 'packages': ['docutils', 'urllib', 'httplib', 'HTMLParser',\n 'awscli', 'ConfigParser', 'xml.etree', 'pipes'],\n }\n }\n setup_options['console'] = ['bin/aws']\n\n\nsetup(**setup_options)\n", "path": "setup.py"}]} | 1,261 | 132 |
gh_patches_debug_1540 | rasdani/github-patches | git_diff | WeblateOrg__weblate-2262 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Improvement: show location for texts in screenshot management
### Steps to reproduce
1. Add a new screenshot
2. Search for a text string
3. See multiple identical results
### Actual behaviour
Sometimes you have the exact same text in multiple places in your project, but the translations can differ depending on the context. This is currently not supported very well in screenshot management, resulting in impossible to make choices like this:

### Expected behaviour
There should be some way to see differentiate the otherwise identical results in the source string list. Showing the context (where possible; I'm not sure any format other than gettext supports it?) and comments would help. A tooltip or something to show locations would also be very useful.
### Server configuration and status
Currently running the `weblate/weblate:2.20-1` docker image:
```
Postgres is up
* Weblate 2.20
* Python 2.7.13
* Django 1.11.12
* six 1.10.0
* social-auth-core 1.7.0
* social-auth-app-django 2.1.0
* django-appconf 1.0.2
* Translate Toolkit 2.3.0
* Whoosh 2.7.4
* defusedxml 0.5.0
* Git 2.11.0
* Pillow (PIL) 1.1.7
* dateutil 2.5.3
* lxml 3.7.1
* django-crispy-forms 1.7.2
* compressor 2.2
* djangorestframework 3.8.1
* user-agents 1.1.0
* pytz 2018.3
* pyuca N/A
* pyLibravatar N/A
* PyYAML 3.12
* tesserocr 2.2.2
* Mercurial 4.0
* git-svn 2.11.0
* Database backends: django.db.backends.postgresql_psycopg2
```
<bountysource-plugin>
---
Want to back this issue? **[Post a bounty on it!](https://www.bountysource.com/issues/61401631-improvement-show-location-for-texts-in-screenshot-management?utm_campaign=plugin&utm_content=tracker%2F253393&utm_medium=issues&utm_source=github)** We accept bounties via [Bountysource](https://www.bountysource.com/?utm_campaign=plugin&utm_content=tracker%2F253393&utm_medium=issues&utm_source=github).
</bountysource-plugin>
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `weblate/screenshots/views.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright © 2012 - 2018 Michal Čihař <[email protected]>
4 #
5 # This file is part of Weblate <https://weblate.org/>
6 #
7 # This program is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU General Public License as published by
9 # the Free Software Foundation, either version 3 of the License, or
10 # (at your option) any later version.
11 #
12 # This program is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU General Public License for more details.
16 #
17 # You should have received a copy of the GNU General Public License
18 # along with this program. If not, see <https://www.gnu.org/licenses/>.
19 #
20
21 import difflib
22
23 from django.contrib.auth.decorators import login_required
24 from django.core.exceptions import PermissionDenied
25 from django.http import JsonResponse
26 from django.utils.translation import ugettext as _
27 from django.views.decorators.http import require_POST
28 from django.views.generic import ListView, DetailView
29 from django.shortcuts import get_object_or_404, redirect, render
30
31 from PIL import Image
32
33 try:
34 from tesserocr import PyTessBaseAPI, RIL
35 HAS_OCR = True
36 except ImportError:
37 HAS_OCR = False
38
39 from weblate.screenshots.forms import ScreenshotForm
40 from weblate.screenshots.models import Screenshot
41 from weblate.trans.models import Source
42 from weblate.utils import messages
43 from weblate.utils.views import ComponentViewMixin
44
45
46 def try_add_source(request, obj):
47 if 'source' not in request.POST or not request.POST['source'].isdigit():
48 return False
49
50 try:
51 source = Source.objects.get(
52 pk=request.POST['source'],
53 component=obj.component
54 )
55 obj.sources.add(source)
56 return True
57 except Source.DoesNotExist:
58 return False
59
60
61 class ScreenshotList(ListView, ComponentViewMixin):
62 paginate_by = 25
63 model = Screenshot
64 _add_form = None
65
66 def get_queryset(self):
67 self.kwargs['component'] = self.get_component()
68 return Screenshot.objects.filter(component=self.kwargs['component'])
69
70 def get_context_data(self):
71 result = super(ScreenshotList, self).get_context_data()
72 component = self.kwargs['component']
73 result['object'] = component
74 if self.request.user.has_perm('screenshot.add', component):
75 if self._add_form is not None:
76 result['add_form'] = self._add_form
77 else:
78 result['add_form'] = ScreenshotForm()
79 return result
80
81 def post(self, request, **kwargs):
82 component = self.get_component()
83 if not request.user.has_perm('screenshot.add', component):
84 raise PermissionDenied()
85 self._add_form = ScreenshotForm(request.POST, request.FILES)
86 if self._add_form.is_valid():
87 obj = Screenshot.objects.create(
88 component=component,
89 user=request.user,
90 **self._add_form.cleaned_data
91 )
92 request.user.profile.uploaded += 1
93 request.user.profile.save(update_fields=['uploaded'])
94
95 try_add_source(request, obj)
96 messages.success(
97 request,
98 _(
99 'Screenshot has been uploaded, '
100 'you can now assign it to source strings.'
101 )
102 )
103 return redirect(obj)
104 messages.error(
105 request,
106 _('Failed to upload screenshot, please fix errors below.')
107 )
108 return self.get(request, **kwargs)
109
110
111 class ScreenshotDetail(DetailView):
112 model = Screenshot
113 _edit_form = None
114
115 def get_object(self, *args, **kwargs):
116 obj = super(ScreenshotDetail, self).get_object(*args, **kwargs)
117 self.request.user.check_access(obj.component.project)
118 return obj
119
120 def get_context_data(self, **kwargs):
121 result = super(ScreenshotDetail, self).get_context_data(**kwargs)
122 component = result['object'].component
123 if self.request.user.has_perm('screenshot.edit', component):
124 if self._edit_form is not None:
125 result['edit_form'] = self._edit_form
126 else:
127 result['edit_form'] = ScreenshotForm(instance=result['object'])
128 return result
129
130 def post(self, request, **kwargs):
131 obj = self.get_object()
132 if request.user.has_perm('screenshot.edit', obj.component):
133 self._edit_form = ScreenshotForm(
134 request.POST, request.FILES, instance=obj
135 )
136 if self._edit_form.is_valid():
137 if request.FILES:
138 obj.user = request.user
139 request.user.profile.uploaded += 1
140 request.user.profile.save(update_fields=['uploaded'])
141 self._edit_form.save()
142 else:
143 return self.get(request, **kwargs)
144 return redirect(obj)
145
146
147 @require_POST
148 @login_required
149 def delete_screenshot(request, pk):
150 obj = get_object_or_404(Screenshot, pk=pk)
151 request.user.check_access(obj.component.project)
152 if not request.user.has_perm('screenshot.delete', obj.component):
153 raise PermissionDenied()
154
155 kwargs = {
156 'project': obj.component.project.slug,
157 'component': obj.component.slug,
158 }
159
160 obj.delete()
161
162 messages.success(request, _('Screenshot %s has been deleted.') % obj.name)
163
164 return redirect('screenshots', **kwargs)
165
166
167 def get_screenshot(request, pk):
168 obj = get_object_or_404(Screenshot, pk=pk)
169 request.user.check_access(obj.component.project)
170 if not request.user.has_perm('screenshot.edit', obj.component):
171 raise PermissionDenied()
172 return obj
173
174
175 @require_POST
176 @login_required
177 def remove_source(request, pk):
178 obj = get_screenshot(request, pk)
179
180 obj.sources.remove(request.POST['source'])
181
182 messages.success(request, _('Source has been removed.'))
183
184 return redirect(obj)
185
186
187 def search_results(code, obj, units=None):
188 if units is None:
189 units = []
190 else:
191 units = units.exclude(
192 id_hash__in=obj.sources.values_list('id_hash', flat=True)
193 )
194
195 results = [
196 {'text': unit.get_source_plurals()[0], 'pk': unit.source_info.pk}
197 for unit in units
198 ]
199
200 return JsonResponse(
201 data={'responseCode': code, 'results': results}
202 )
203
204
205 @login_required
206 @require_POST
207 def search_source(request, pk):
208 obj = get_screenshot(request, pk)
209 try:
210 translation = obj.component.translation_set.all()[0]
211 except IndexError:
212 return search_results(500, obj)
213
214 units = translation.unit_set.search(
215 {
216 'search': 'substring',
217 'q': request.POST.get('q', ''),
218 'type': 'all',
219 'source': True,
220 'context': True,
221 },
222 translation=translation,
223 )
224
225 return search_results(200, obj, units)
226
227
228 def ocr_extract(api, image, strings):
229 """Extract closes matches from an image"""
230 api.SetImage(image)
231 for item in api.GetComponentImages(RIL.TEXTLINE, True):
232 api.SetRectangle(
233 item[1]['x'], item[1]['y'], item[1]['w'], item[1]['h']
234 )
235 ocr_result = api.GetUTF8Text()
236 parts = [ocr_result] + ocr_result.split('|') + ocr_result.split()
237 for part in parts:
238 for match in difflib.get_close_matches(part, strings, cutoff=0.9):
239 yield match
240
241
242 @login_required
243 @require_POST
244 def ocr_search(request, pk):
245 obj = get_screenshot(request, pk)
246 if not HAS_OCR:
247 return search_results(500, obj)
248 try:
249 translation = obj.component.translation_set.all()[0]
250 except IndexError:
251 return search_results(500, obj)
252
253 # Load image
254 original_image = Image.open(obj.image.path)
255 # Convert to greyscale
256 original_image = original_image.convert("L")
257 # Resize image (tesseract works best around 300dpi)
258 scaled_image = original_image.copy().resize(
259 [size * 4 for size in original_image.size],
260 Image.BICUBIC
261 )
262
263 # Find all our strings
264 sources = dict(translation.unit_set.values_list('source', 'pk'))
265 strings = tuple(sources.keys())
266
267 results = set()
268
269 # Extract and match strings
270 with PyTessBaseAPI() as api:
271 for image in (original_image, scaled_image):
272 for match in ocr_extract(api, image, strings):
273 results.add(sources[match])
274
275 return search_results(
276 200,
277 obj,
278 translation.unit_set.filter(pk__in=results)
279 )
280
281
282 @login_required
283 @require_POST
284 def add_source(request, pk):
285 obj = get_screenshot(request, pk)
286 result = try_add_source(request, obj)
287 return JsonResponse(
288 data={'responseCode': 200, 'status': result}
289 )
290
291
292 @login_required
293 def get_sources(request, pk):
294 obj = get_screenshot(request, pk)
295 return render(
296 request, 'screenshots/screenshot_sources_body.html',
297 {'sources': obj.sources.all(), 'object': obj}
298 )
299
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/weblate/screenshots/views.py b/weblate/screenshots/views.py
--- a/weblate/screenshots/views.py
+++ b/weblate/screenshots/views.py
@@ -193,7 +193,11 @@
)
results = [
- {'text': unit.get_source_plurals()[0], 'pk': unit.source_info.pk}
+ {'text': unit.get_source_plurals()[0],
+ 'pk': unit.source_info.pk,
+ 'context': unit.context,
+ 'location': unit.location}
+
for unit in units
]
| {"golden_diff": "diff --git a/weblate/screenshots/views.py b/weblate/screenshots/views.py\n--- a/weblate/screenshots/views.py\n+++ b/weblate/screenshots/views.py\n@@ -193,7 +193,11 @@\n )\n \n results = [\n- {'text': unit.get_source_plurals()[0], 'pk': unit.source_info.pk}\n+ {'text': unit.get_source_plurals()[0],\n+ 'pk': unit.source_info.pk,\n+ 'context': unit.context,\n+ 'location': unit.location}\n+\n for unit in units\n ]\n", "issue": "Improvement: show location for texts in screenshot management\n### Steps to reproduce\r\n\r\n1. Add a new screenshot\r\n2. Search for a text string\r\n3. See multiple identical results\r\n\r\n### Actual behaviour\r\n\r\nSometimes you have the exact same text in multiple places in your project, but the translations can differ depending on the context. This is currently not supported very well in screenshot management, resulting in impossible to make choices like this:\r\n\r\n\r\n\r\n### Expected behaviour\r\n\r\nThere should be some way to see differentiate the otherwise identical results in the source string list. Showing the context (where possible; I'm not sure any format other than gettext supports it?) and comments would help. A tooltip or something to show locations would also be very useful.\r\n\r\n### Server configuration and status\r\n\r\nCurrently running the `weblate/weblate:2.20-1` docker image:\r\n\r\n```\r\nPostgres is up\r\n * Weblate 2.20\r\n * Python 2.7.13\r\n * Django 1.11.12\r\n * six 1.10.0\r\n * social-auth-core 1.7.0\r\n * social-auth-app-django 2.1.0\r\n * django-appconf 1.0.2\r\n * Translate Toolkit 2.3.0\r\n * Whoosh 2.7.4\r\n * defusedxml 0.5.0\r\n * Git 2.11.0\r\n * Pillow (PIL) 1.1.7\r\n * dateutil 2.5.3\r\n * lxml 3.7.1\r\n * django-crispy-forms 1.7.2\r\n * compressor 2.2\r\n * djangorestframework 3.8.1\r\n * user-agents 1.1.0\r\n * pytz 2018.3\r\n * pyuca N/A\r\n * pyLibravatar N/A\r\n * PyYAML 3.12\r\n * tesserocr 2.2.2\r\n * Mercurial 4.0\r\n * git-svn 2.11.0\r\n * Database backends: django.db.backends.postgresql_psycopg2\r\n```\r\n\r\n\r\n\r\n\r\n<bountysource-plugin>\r\n\r\n---\r\nWant to back this issue? **[Post a bounty on it!](https://www.bountysource.com/issues/61401631-improvement-show-location-for-texts-in-screenshot-management?utm_campaign=plugin&utm_content=tracker%2F253393&utm_medium=issues&utm_source=github)** We accept bounties via [Bountysource](https://www.bountysource.com/?utm_campaign=plugin&utm_content=tracker%2F253393&utm_medium=issues&utm_source=github).\r\n</bountysource-plugin>\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright \u00a9 2012 - 2018 Michal \u010ciha\u0159 <[email protected]>\n#\n# This file is part of Weblate <https://weblate.org/>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <https://www.gnu.org/licenses/>.\n#\n\nimport difflib\n\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.exceptions import PermissionDenied\nfrom django.http import JsonResponse\nfrom django.utils.translation import ugettext as _\nfrom django.views.decorators.http import require_POST\nfrom django.views.generic import ListView, DetailView\nfrom django.shortcuts import get_object_or_404, redirect, render\n\nfrom PIL import Image\n\ntry:\n from tesserocr import PyTessBaseAPI, RIL\n HAS_OCR = True\nexcept ImportError:\n HAS_OCR = False\n\nfrom weblate.screenshots.forms import ScreenshotForm\nfrom weblate.screenshots.models import Screenshot\nfrom weblate.trans.models import Source\nfrom weblate.utils import messages\nfrom weblate.utils.views import ComponentViewMixin\n\n\ndef try_add_source(request, obj):\n if 'source' not in request.POST or not request.POST['source'].isdigit():\n return False\n\n try:\n source = Source.objects.get(\n pk=request.POST['source'],\n component=obj.component\n )\n obj.sources.add(source)\n return True\n except Source.DoesNotExist:\n return False\n\n\nclass ScreenshotList(ListView, ComponentViewMixin):\n paginate_by = 25\n model = Screenshot\n _add_form = None\n\n def get_queryset(self):\n self.kwargs['component'] = self.get_component()\n return Screenshot.objects.filter(component=self.kwargs['component'])\n\n def get_context_data(self):\n result = super(ScreenshotList, self).get_context_data()\n component = self.kwargs['component']\n result['object'] = component\n if self.request.user.has_perm('screenshot.add', component):\n if self._add_form is not None:\n result['add_form'] = self._add_form\n else:\n result['add_form'] = ScreenshotForm()\n return result\n\n def post(self, request, **kwargs):\n component = self.get_component()\n if not request.user.has_perm('screenshot.add', component):\n raise PermissionDenied()\n self._add_form = ScreenshotForm(request.POST, request.FILES)\n if self._add_form.is_valid():\n obj = Screenshot.objects.create(\n component=component,\n user=request.user,\n **self._add_form.cleaned_data\n )\n request.user.profile.uploaded += 1\n request.user.profile.save(update_fields=['uploaded'])\n\n try_add_source(request, obj)\n messages.success(\n request,\n _(\n 'Screenshot has been uploaded, '\n 'you can now assign it to source strings.'\n )\n )\n return redirect(obj)\n messages.error(\n request,\n _('Failed to upload screenshot, please fix errors below.')\n )\n return self.get(request, **kwargs)\n\n\nclass ScreenshotDetail(DetailView):\n model = Screenshot\n _edit_form = None\n\n def get_object(self, *args, **kwargs):\n obj = super(ScreenshotDetail, self).get_object(*args, **kwargs)\n self.request.user.check_access(obj.component.project)\n return obj\n\n def get_context_data(self, **kwargs):\n result = super(ScreenshotDetail, self).get_context_data(**kwargs)\n component = result['object'].component\n if self.request.user.has_perm('screenshot.edit', component):\n if self._edit_form is not None:\n result['edit_form'] = self._edit_form\n else:\n result['edit_form'] = ScreenshotForm(instance=result['object'])\n return result\n\n def post(self, request, **kwargs):\n obj = self.get_object()\n if request.user.has_perm('screenshot.edit', obj.component):\n self._edit_form = ScreenshotForm(\n request.POST, request.FILES, instance=obj\n )\n if self._edit_form.is_valid():\n if request.FILES:\n obj.user = request.user\n request.user.profile.uploaded += 1\n request.user.profile.save(update_fields=['uploaded'])\n self._edit_form.save()\n else:\n return self.get(request, **kwargs)\n return redirect(obj)\n\n\n@require_POST\n@login_required\ndef delete_screenshot(request, pk):\n obj = get_object_or_404(Screenshot, pk=pk)\n request.user.check_access(obj.component.project)\n if not request.user.has_perm('screenshot.delete', obj.component):\n raise PermissionDenied()\n\n kwargs = {\n 'project': obj.component.project.slug,\n 'component': obj.component.slug,\n }\n\n obj.delete()\n\n messages.success(request, _('Screenshot %s has been deleted.') % obj.name)\n\n return redirect('screenshots', **kwargs)\n\n\ndef get_screenshot(request, pk):\n obj = get_object_or_404(Screenshot, pk=pk)\n request.user.check_access(obj.component.project)\n if not request.user.has_perm('screenshot.edit', obj.component):\n raise PermissionDenied()\n return obj\n\n\n@require_POST\n@login_required\ndef remove_source(request, pk):\n obj = get_screenshot(request, pk)\n\n obj.sources.remove(request.POST['source'])\n\n messages.success(request, _('Source has been removed.'))\n\n return redirect(obj)\n\n\ndef search_results(code, obj, units=None):\n if units is None:\n units = []\n else:\n units = units.exclude(\n id_hash__in=obj.sources.values_list('id_hash', flat=True)\n )\n\n results = [\n {'text': unit.get_source_plurals()[0], 'pk': unit.source_info.pk}\n for unit in units\n ]\n\n return JsonResponse(\n data={'responseCode': code, 'results': results}\n )\n\n\n@login_required\n@require_POST\ndef search_source(request, pk):\n obj = get_screenshot(request, pk)\n try:\n translation = obj.component.translation_set.all()[0]\n except IndexError:\n return search_results(500, obj)\n\n units = translation.unit_set.search(\n {\n 'search': 'substring',\n 'q': request.POST.get('q', ''),\n 'type': 'all',\n 'source': True,\n 'context': True,\n },\n translation=translation,\n )\n\n return search_results(200, obj, units)\n\n\ndef ocr_extract(api, image, strings):\n \"\"\"Extract closes matches from an image\"\"\"\n api.SetImage(image)\n for item in api.GetComponentImages(RIL.TEXTLINE, True):\n api.SetRectangle(\n item[1]['x'], item[1]['y'], item[1]['w'], item[1]['h']\n )\n ocr_result = api.GetUTF8Text()\n parts = [ocr_result] + ocr_result.split('|') + ocr_result.split()\n for part in parts:\n for match in difflib.get_close_matches(part, strings, cutoff=0.9):\n yield match\n\n\n@login_required\n@require_POST\ndef ocr_search(request, pk):\n obj = get_screenshot(request, pk)\n if not HAS_OCR:\n return search_results(500, obj)\n try:\n translation = obj.component.translation_set.all()[0]\n except IndexError:\n return search_results(500, obj)\n\n # Load image\n original_image = Image.open(obj.image.path)\n # Convert to greyscale\n original_image = original_image.convert(\"L\")\n # Resize image (tesseract works best around 300dpi)\n scaled_image = original_image.copy().resize(\n [size * 4 for size in original_image.size],\n Image.BICUBIC\n )\n\n # Find all our strings\n sources = dict(translation.unit_set.values_list('source', 'pk'))\n strings = tuple(sources.keys())\n\n results = set()\n\n # Extract and match strings\n with PyTessBaseAPI() as api:\n for image in (original_image, scaled_image):\n for match in ocr_extract(api, image, strings):\n results.add(sources[match])\n\n return search_results(\n 200,\n obj,\n translation.unit_set.filter(pk__in=results)\n )\n\n\n@login_required\n@require_POST\ndef add_source(request, pk):\n obj = get_screenshot(request, pk)\n result = try_add_source(request, obj)\n return JsonResponse(\n data={'responseCode': 200, 'status': result}\n )\n\n\n@login_required\ndef get_sources(request, pk):\n obj = get_screenshot(request, pk)\n return render(\n request, 'screenshots/screenshot_sources_body.html',\n {'sources': obj.sources.all(), 'object': obj}\n )\n", "path": "weblate/screenshots/views.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright \u00a9 2012 - 2018 Michal \u010ciha\u0159 <[email protected]>\n#\n# This file is part of Weblate <https://weblate.org/>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <https://www.gnu.org/licenses/>.\n#\n\nimport difflib\n\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.exceptions import PermissionDenied\nfrom django.http import JsonResponse\nfrom django.utils.translation import ugettext as _\nfrom django.views.decorators.http import require_POST\nfrom django.views.generic import ListView, DetailView\nfrom django.shortcuts import get_object_or_404, redirect, render\n\nfrom PIL import Image\n\ntry:\n from tesserocr import PyTessBaseAPI, RIL\n HAS_OCR = True\nexcept ImportError:\n HAS_OCR = False\n\nfrom weblate.screenshots.forms import ScreenshotForm\nfrom weblate.screenshots.models import Screenshot\nfrom weblate.trans.models import Source\nfrom weblate.utils import messages\nfrom weblate.utils.views import ComponentViewMixin\n\n\ndef try_add_source(request, obj):\n if 'source' not in request.POST or not request.POST['source'].isdigit():\n return False\n\n try:\n source = Source.objects.get(\n pk=request.POST['source'],\n component=obj.component\n )\n obj.sources.add(source)\n return True\n except Source.DoesNotExist:\n return False\n\n\nclass ScreenshotList(ListView, ComponentViewMixin):\n paginate_by = 25\n model = Screenshot\n _add_form = None\n\n def get_queryset(self):\n self.kwargs['component'] = self.get_component()\n return Screenshot.objects.filter(component=self.kwargs['component'])\n\n def get_context_data(self):\n result = super(ScreenshotList, self).get_context_data()\n component = self.kwargs['component']\n result['object'] = component\n if self.request.user.has_perm('screenshot.add', component):\n if self._add_form is not None:\n result['add_form'] = self._add_form\n else:\n result['add_form'] = ScreenshotForm()\n return result\n\n def post(self, request, **kwargs):\n component = self.get_component()\n if not request.user.has_perm('screenshot.add', component):\n raise PermissionDenied()\n self._add_form = ScreenshotForm(request.POST, request.FILES)\n if self._add_form.is_valid():\n obj = Screenshot.objects.create(\n component=component,\n user=request.user,\n **self._add_form.cleaned_data\n )\n request.user.profile.uploaded += 1\n request.user.profile.save(update_fields=['uploaded'])\n\n try_add_source(request, obj)\n messages.success(\n request,\n _(\n 'Screenshot has been uploaded, '\n 'you can now assign it to source strings.'\n )\n )\n return redirect(obj)\n messages.error(\n request,\n _('Failed to upload screenshot, please fix errors below.')\n )\n return self.get(request, **kwargs)\n\n\nclass ScreenshotDetail(DetailView):\n model = Screenshot\n _edit_form = None\n\n def get_object(self, *args, **kwargs):\n obj = super(ScreenshotDetail, self).get_object(*args, **kwargs)\n self.request.user.check_access(obj.component.project)\n return obj\n\n def get_context_data(self, **kwargs):\n result = super(ScreenshotDetail, self).get_context_data(**kwargs)\n component = result['object'].component\n if self.request.user.has_perm('screenshot.edit', component):\n if self._edit_form is not None:\n result['edit_form'] = self._edit_form\n else:\n result['edit_form'] = ScreenshotForm(instance=result['object'])\n return result\n\n def post(self, request, **kwargs):\n obj = self.get_object()\n if request.user.has_perm('screenshot.edit', obj.component):\n self._edit_form = ScreenshotForm(\n request.POST, request.FILES, instance=obj\n )\n if self._edit_form.is_valid():\n if request.FILES:\n obj.user = request.user\n request.user.profile.uploaded += 1\n request.user.profile.save(update_fields=['uploaded'])\n self._edit_form.save()\n else:\n return self.get(request, **kwargs)\n return redirect(obj)\n\n\n@require_POST\n@login_required\ndef delete_screenshot(request, pk):\n obj = get_object_or_404(Screenshot, pk=pk)\n request.user.check_access(obj.component.project)\n if not request.user.has_perm('screenshot.delete', obj.component):\n raise PermissionDenied()\n\n kwargs = {\n 'project': obj.component.project.slug,\n 'component': obj.component.slug,\n }\n\n obj.delete()\n\n messages.success(request, _('Screenshot %s has been deleted.') % obj.name)\n\n return redirect('screenshots', **kwargs)\n\n\ndef get_screenshot(request, pk):\n obj = get_object_or_404(Screenshot, pk=pk)\n request.user.check_access(obj.component.project)\n if not request.user.has_perm('screenshot.edit', obj.component):\n raise PermissionDenied()\n return obj\n\n\n@require_POST\n@login_required\ndef remove_source(request, pk):\n obj = get_screenshot(request, pk)\n\n obj.sources.remove(request.POST['source'])\n\n messages.success(request, _('Source has been removed.'))\n\n return redirect(obj)\n\n\ndef search_results(code, obj, units=None):\n if units is None:\n units = []\n else:\n units = units.exclude(\n id_hash__in=obj.sources.values_list('id_hash', flat=True)\n )\n\n results = [\n {'text': unit.get_source_plurals()[0],\n 'pk': unit.source_info.pk,\n 'context': unit.context,\n 'location': unit.location}\n\n for unit in units\n ]\n\n return JsonResponse(\n data={'responseCode': code, 'results': results}\n )\n\n\n@login_required\n@require_POST\ndef search_source(request, pk):\n obj = get_screenshot(request, pk)\n try:\n translation = obj.component.translation_set.all()[0]\n except IndexError:\n return search_results(500, obj)\n\n units = translation.unit_set.search(\n {\n 'search': 'substring',\n 'q': request.POST.get('q', ''),\n 'type': 'all',\n 'source': True,\n 'context': True,\n },\n translation=translation,\n )\n\n return search_results(200, obj, units)\n\n\ndef ocr_extract(api, image, strings):\n \"\"\"Extract closes matches from an image\"\"\"\n api.SetImage(image)\n for item in api.GetComponentImages(RIL.TEXTLINE, True):\n api.SetRectangle(\n item[1]['x'], item[1]['y'], item[1]['w'], item[1]['h']\n )\n ocr_result = api.GetUTF8Text()\n parts = [ocr_result] + ocr_result.split('|') + ocr_result.split()\n for part in parts:\n for match in difflib.get_close_matches(part, strings, cutoff=0.9):\n yield match\n\n\n@login_required\n@require_POST\ndef ocr_search(request, pk):\n obj = get_screenshot(request, pk)\n if not HAS_OCR:\n return search_results(500, obj)\n try:\n translation = obj.component.translation_set.all()[0]\n except IndexError:\n return search_results(500, obj)\n\n # Load image\n original_image = Image.open(obj.image.path)\n # Convert to greyscale\n original_image = original_image.convert(\"L\")\n # Resize image (tesseract works best around 300dpi)\n scaled_image = original_image.copy().resize(\n [size * 4 for size in original_image.size],\n Image.BICUBIC\n )\n\n # Find all our strings\n sources = dict(translation.unit_set.values_list('source', 'pk'))\n strings = tuple(sources.keys())\n\n results = set()\n\n # Extract and match strings\n with PyTessBaseAPI() as api:\n for image in (original_image, scaled_image):\n for match in ocr_extract(api, image, strings):\n results.add(sources[match])\n\n return search_results(\n 200,\n obj,\n translation.unit_set.filter(pk__in=results)\n )\n\n\n@login_required\n@require_POST\ndef add_source(request, pk):\n obj = get_screenshot(request, pk)\n result = try_add_source(request, obj)\n return JsonResponse(\n data={'responseCode': 200, 'status': result}\n )\n\n\n@login_required\ndef get_sources(request, pk):\n obj = get_screenshot(request, pk)\n return render(\n request, 'screenshots/screenshot_sources_body.html',\n {'sources': obj.sources.all(), 'object': obj}\n )\n", "path": "weblate/screenshots/views.py"}]} | 3,755 | 133 |
gh_patches_debug_1955 | rasdani/github-patches | git_diff | OpenMined__PySyft-676 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Implement rsqrt Functionality in FloatTensor with CPU/GPU Backend Support
### User Story:
As a Data Scientist using PySyft's FloatTensor type, I want to leverage a wide range of methods which use our new Unity backend. For this ticket to be complete, the rsqrt() should be added to our FloatTensor class with the appropriate functionality, returning a new tensor.
Furthermore, the function should automatically determine which backend to use (CPU/GPU) based on where the data is located. If the data is located on the CPU, a performant CPU implementation should run but if the data for a given FloatTensor is located on a GPU, it should be run using an HLSL kernel where appropriate. Obviously, if no GPU is available, it should automatically fall back to the CPU implementation.
### Every Reference You Might Need for this Issue:
- For a reference on the operation this performs check out [PyTorch](http://pytorch.org/docs/master/tensors.html)'s documentation.
- For a reference on how to program in Unity, check out [this basic tutorial](https://unity3d.com/learn/tutorials/projects/roll-ball-tutorial)
- For a reference on how to write HLSL code, check out [this basic tutorial](http://kylehalladay.com/blog/tutorial/2014/06/27/Compute-Shaders-Are-Nifty.html)
- For a complete tutorial on how to add functions to FloatTensor (step by step guide) see [this Google Document](https://docs.google.com/document/d/1WRd7gGLFN0Awtf86AICYIHtg3gfFWLBa5wYTthsB3i0/edit)
- For a reference on how other functions like this have been implemented check out the functions in [this notebook](https://github.com/OpenMined/OpenMined/blob/master/notebooks/Syft%20Tensor%20Example%20Notebook.ipynb) as well as the corresponding files that made it possible ([SyftController](https://github.com/OpenMined/OpenMined/blob/master/Assets/OpenMined/Network/Controllers/SyftController.cs), [FloatTensor.Ops](https://github.com/OpenMined/OpenMined/blob/master/Assets/OpenMined/Syft/Tensor/FloatTensor.Ops.cs), [FloatTensor.ShaderOps](https://github.com/OpenMined/OpenMined/blob/master/Assets/OpenMined/Syft/Tensor/FloatTensor.ShaderOps.cs), [FloatTensorShaders](https://github.com/OpenMined/OpenMined/blob/master/Assets/OpenMined/Syft/Math/Shaders/FloatTensorShaders.compute), and [FloatTensorTest](https://github.com/OpenMined/OpenMined/blob/master/Assets/OpenMined.Tests/Editor/FloatTensorTest.cs)).
- And of course, please consider our [Contributor Guidelines](https://github.com/OpenMined/Docs/blob/master/contributing/guidelines.md) for all contributions.
### Acceptance Criteria:
- [ ] an integration test in PySyft demonstrating the correct CPU and GPU operation implemented over a FloatTensor while connected to a Unity backend
- [ ] a Unit Test in OpenMined/OpenMined demonstrating the correct operation on a FloatTensor
- [ ] [inline](http://pytorch.org/docs/master/tensors.html) documentation in the python code. For inspiration on inline documentation, please check out PyTorch's documentation for this operator.
- [ ] Link your Pull Request back to this Issue so that it gets closed appropriately when the PR is merged.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `syft/syft.py`
Content:
```
1 import zmq
2 import uuid
3
4
5 class FloatTensor():
6
7 def __init__(self, controller, data, data_is_pointer=False, verbose=False):
8 self.verbose = verbose
9 self.controller = controller
10 if(data is not None and not data_is_pointer):
11 data = data.astype('float')
12 controller.socket.send_json({"objectType": "tensor",
13 "functionCall": "create",
14 "data": list(data.flatten()),
15 "shape": data.shape})
16 self.id = int(controller.socket.recv_string())
17 if(verbose):
18 print("FloatTensor.__init__: " + str(self.id))
19
20 elif(data_is_pointer):
21 self.id = int(data)
22
23 def __del__(self):
24 self.delete_tensor()
25
26 def abs(self):
27 return self.no_params_func("abs", return_response=True)
28
29 def abs_(self):
30 return self.no_params_func("abs_")
31
32 def acos(self):
33 return self.no_params_func("acos", return_response=True)
34
35 def acos_(self):
36 return self.no_params_func("acos_")
37
38 def asin(self):
39 return self.no_params_func("asin", return_response=True)
40
41 def asin_(self):
42 return self.no_params_func("asin_")
43
44 def atan(self):
45 return self.no_params_func("atan", return_response=True)
46
47 def atan_(self):
48 return self.no_params_func("atan_")
49
50 def addmm_(self, x, y):
51 return self.params_func("addmm_", [x.id, y.id])
52
53 def addmm(self, x, y):
54 copy = self.copy()
55 copy.params_func("addmm_", [x.id, y.id])
56 return copy
57
58 def addmv_(self, x, y):
59 return self.params_func("addmv_", [x.id, y.id])
60
61 def addmv(self, x, y):
62 copy = self.copy()
63 copy.params_func("addmv_", [x.id, y.id])
64 return copy
65
66 def __add__(self, x):
67 return self.arithmetic_operation(x, "add", False)
68
69 def __iadd__(self, x):
70 return self.arithmetic_operation(x, "add", True)
71
72 def copy(self):
73 return self.no_params_func("copy", return_response=True)
74
75 def cos(self):
76 return self.no_params_func("cos", return_response=True)
77
78 def cos_(self):
79 return self.no_params_func("cos_")
80
81 def cosh(self):
82 return self.no_params_func("cosh", return_response=True)
83
84 def cosh_(self):
85 return self.no_params_func("cosh_")
86
87 def __truediv__(self, x):
88 return self.arithmetic_operation(x, "div", False)
89
90 def __itruediv__(self, x):
91 return self.arithmetic_operation(x, "div", True)
92
93 def floor_(self):
94 return self.no_params_func("floor_")
95
96 def __mul__(self, x):
97 return self.arithmetic_operation(x, "mul", False)
98
99 def __imul__(self, x):
100 return self.arithmetic_operation(x, "mul", True)
101
102 def neg(self):
103 return self.no_params_func("neg", return_response=True)
104
105 def sigmoid_(self):
106 return self.no_params_func("sigmoid_")
107
108 def sign(self):
109 return self.no_params_func("sign", return_response=True)
110
111 def sin(self):
112 return self.no_params_func("sin", return_response=True)
113
114 def sin_(self):
115 return self.no_params_func("sin_")
116
117 def size(self):
118 """
119 Returns the size of the self tensor as a FloatTensor.
120
121 Note:
122 The returned value currently is a FloatTensor because it leverages
123 the messaging mechanism with Unity.
124 """
125 return self.no_params_func("size", return_response=True)
126
127 def sqrt(self):
128 return self.no_params_func("sqrt", return_response=True)
129
130 def trunc(self):
131 return self.no_params_func("trunc", return_response=True)
132
133 def __sub__(self, x):
134 return self.arithmetic_operation(x, "sub", False)
135
136 def __isub__(self,x):
137 return self.arithmetic_operation(x,"sub",True)
138
139 def sum(self,dim):
140 assert type(dim) == int
141 return self.arithmetic_operation(dim, "sum", False)
142
143 def view(self, *args):
144 new_dim = list(args)
145 assert type(new_dim) == list
146 assert type(new_dim[0]) == int
147 return self.params_func("view", new_dim, return_response=True)
148
149 def view_(self, *args):
150 new_dim = list(args)
151 assert type(new_dim) == list
152 assert type(new_dim[0]) == int
153 self.params_func("view_", new_dim, return_response=False)
154 return self
155
156 def T(self):
157 return self.no_params_func("transpose", return_response=True)
158
159 def triu(self, k=0):
160 return self.params_func("triu", [k], return_response=True)
161
162 def triu_(self, k=0):
163 return self.params_func("triu_", [k])
164
165 # Fills this tensor with zeros.
166 def zero_(self):
167 return self.no_params_func("zero_")
168
169 def __repr__(self):
170 return self.no_params_func("print", True, False)
171
172 def __str__(self):
173 return self.no_params_func("print", True, False)
174
175 def cpu(self):
176 return self.no_params_func("cpu")
177
178 def gpu(self):
179 return self.no_params_func("gpu")
180
181 def cmd(self, functionCall, tensorIndexParams=[]):
182 cmd = {
183 'functionCall': functionCall,
184 'objectType': 'tensor',
185 'objectIndex': self.id,
186 'tensorIndexParams': tensorIndexParams}
187 return cmd
188
189 def params_func(self, name, params, return_response=False, return_as_tensor=True):
190 # send the command
191 self.controller.socket.send_json(
192 self.cmd(name, tensorIndexParams=params))
193 # receive output from command
194 res = self.controller.socket.recv_string()
195
196 if(self.verbose):
197 print(res)
198
199 if(return_response):
200 if(return_as_tensor):
201 if(self.verbose):
202 print("FloatTensor.__init__: " + res)
203 return FloatTensor(self.controller,int(res),True)
204 else:
205 return res
206 return self
207
208 def no_params_func(self, name, return_response=False, return_as_tensor=True):
209 return(self.params_func(name, [], return_response, return_as_tensor))
210
211 def arithmetic_operation(self, x, name, inline=False):
212
213 operation_cmd = name
214
215 if(type(x) == FloatTensor):
216 operation_cmd += "_elem"
217 parameter = x.id
218 else:
219 operation_cmd += "_scalar"
220 parameter = str(x)
221
222 if(inline):
223 operation_cmd += "_"
224
225 self.controller.socket.send_json(
226 self.cmd(operation_cmd, [parameter])) # sends the command
227 return FloatTensor(self.controller, int(self.controller.socket.recv_string()), True)
228
229 def delete_tensor(self):
230 if(self.id is not None):
231 self.no_params_func("delete")
232 self.verbose = None
233 self.controller = None
234 self.id = None
235
236 def T(self):
237 return self.no_params_func("transpose", return_response=True)
238
239 def is_contiguous(self):
240 return self.no_params_func("is_contiguous", return_response=True, return_as_tensor=False)
241
242 def sinh(self):
243 return self.no_params_func("sinh", return_response=True)
244
245 def sinh_(self):
246 return self.no_params_func("sinh_")
247
248 def tan(self):
249 return self.no_params_func("tan", return_response=True)
250
251 def tan_(self):
252 return self.no_params_func("tan_")
253
254 def tanh(self):
255 return self.no_params_func("tanh", return_response=True)
256
257
258 class SyftController():
259
260 def __init__(self,verbose=True):
261
262 self.identity = str(uuid.uuid4())
263
264 context = zmq.Context()
265 self.socket = context.socket(zmq.DEALER)
266 self.socket.setsockopt_string(zmq.IDENTITY, self.identity)
267 self.socket.connect("tcp://localhost:5555")
268 self.verbose=verbose
269
270 def FloatTensor(self, data):
271 verbose = self.verbose
272 return FloatTensor(self, data,verbose=verbose)
273
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/syft/syft.py b/syft/syft.py
--- a/syft/syft.py
+++ b/syft/syft.py
@@ -102,6 +102,9 @@
def neg(self):
return self.no_params_func("neg", return_response=True)
+ def rsqrt(self):
+ return self.no_params_func("rsqrt",return_response=True)
+
def sigmoid_(self):
return self.no_params_func("sigmoid_")
| {"golden_diff": "diff --git a/syft/syft.py b/syft/syft.py\n--- a/syft/syft.py\n+++ b/syft/syft.py\n@@ -102,6 +102,9 @@\n def neg(self):\n return self.no_params_func(\"neg\", return_response=True)\n \n+ def rsqrt(self):\n+ return self.no_params_func(\"rsqrt\",return_response=True)\n+\n def sigmoid_(self):\n return self.no_params_func(\"sigmoid_\")\n", "issue": "Implement rsqrt Functionality in FloatTensor with CPU/GPU Backend Support\n### User Story: \r\nAs a Data Scientist using PySyft's FloatTensor type, I want to leverage a wide range of methods which use our new Unity backend. For this ticket to be complete, the rsqrt() should be added to our FloatTensor class with the appropriate functionality, returning a new tensor.\r\n\r\nFurthermore, the function should automatically determine which backend to use (CPU/GPU) based on where the data is located. If the data is located on the CPU, a performant CPU implementation should run but if the data for a given FloatTensor is located on a GPU, it should be run using an HLSL kernel where appropriate. Obviously, if no GPU is available, it should automatically fall back to the CPU implementation.\r\n\r\n### Every Reference You Might Need for this Issue:\r\n\r\n- For a reference on the operation this performs check out [PyTorch](http://pytorch.org/docs/master/tensors.html)'s documentation.\r\n- For a reference on how to program in Unity, check out [this basic tutorial](https://unity3d.com/learn/tutorials/projects/roll-ball-tutorial)\r\n- For a reference on how to write HLSL code, check out [this basic tutorial](http://kylehalladay.com/blog/tutorial/2014/06/27/Compute-Shaders-Are-Nifty.html)\r\n- For a complete tutorial on how to add functions to FloatTensor (step by step guide) see [this Google Document](https://docs.google.com/document/d/1WRd7gGLFN0Awtf86AICYIHtg3gfFWLBa5wYTthsB3i0/edit)\r\n- For a reference on how other functions like this have been implemented check out the functions in [this notebook](https://github.com/OpenMined/OpenMined/blob/master/notebooks/Syft%20Tensor%20Example%20Notebook.ipynb) as well as the corresponding files that made it possible ([SyftController](https://github.com/OpenMined/OpenMined/blob/master/Assets/OpenMined/Network/Controllers/SyftController.cs), [FloatTensor.Ops](https://github.com/OpenMined/OpenMined/blob/master/Assets/OpenMined/Syft/Tensor/FloatTensor.Ops.cs), [FloatTensor.ShaderOps](https://github.com/OpenMined/OpenMined/blob/master/Assets/OpenMined/Syft/Tensor/FloatTensor.ShaderOps.cs), [FloatTensorShaders](https://github.com/OpenMined/OpenMined/blob/master/Assets/OpenMined/Syft/Math/Shaders/FloatTensorShaders.compute), and [FloatTensorTest](https://github.com/OpenMined/OpenMined/blob/master/Assets/OpenMined.Tests/Editor/FloatTensorTest.cs)).\r\n- And of course, please consider our [Contributor Guidelines](https://github.com/OpenMined/Docs/blob/master/contributing/guidelines.md) for all contributions.\r\n### Acceptance Criteria:\r\n\r\n- [ ] an integration test in PySyft demonstrating the correct CPU and GPU operation implemented over a FloatTensor while connected to a Unity backend\r\n- [ ] a Unit Test in OpenMined/OpenMined demonstrating the correct operation on a FloatTensor\r\n- [ ] [inline](http://pytorch.org/docs/master/tensors.html) documentation in the python code. For inspiration on inline documentation, please check out PyTorch's documentation for this operator.\r\n- [ ] Link your Pull Request back to this Issue so that it gets closed appropriately when the PR is merged.\n", "before_files": [{"content": "import zmq\nimport uuid\n\n\nclass FloatTensor():\n\n def __init__(self, controller, data, data_is_pointer=False, verbose=False):\n self.verbose = verbose\n self.controller = controller\n if(data is not None and not data_is_pointer):\n data = data.astype('float')\n controller.socket.send_json({\"objectType\": \"tensor\",\n \"functionCall\": \"create\",\n \"data\": list(data.flatten()),\n \"shape\": data.shape})\n self.id = int(controller.socket.recv_string())\n if(verbose):\n print(\"FloatTensor.__init__: \" + str(self.id))\n\n elif(data_is_pointer):\n self.id = int(data)\n\n def __del__(self):\n self.delete_tensor()\n\n def abs(self):\n return self.no_params_func(\"abs\", return_response=True)\n\n def abs_(self):\n return self.no_params_func(\"abs_\")\n\n def acos(self):\n return self.no_params_func(\"acos\", return_response=True)\n\n def acos_(self):\n return self.no_params_func(\"acos_\")\n\n def asin(self):\n return self.no_params_func(\"asin\", return_response=True)\n\n def asin_(self):\n return self.no_params_func(\"asin_\")\n\n def atan(self):\n return self.no_params_func(\"atan\", return_response=True)\n\n def atan_(self):\n return self.no_params_func(\"atan_\")\n\n def addmm_(self, x, y):\n return self.params_func(\"addmm_\", [x.id, y.id])\n\n def addmm(self, x, y):\n copy = self.copy()\n copy.params_func(\"addmm_\", [x.id, y.id])\n return copy\n\n def addmv_(self, x, y):\n return self.params_func(\"addmv_\", [x.id, y.id])\n\n def addmv(self, x, y):\n copy = self.copy()\n copy.params_func(\"addmv_\", [x.id, y.id])\n return copy\n\n def __add__(self, x):\n return self.arithmetic_operation(x, \"add\", False)\n\n def __iadd__(self, x):\n return self.arithmetic_operation(x, \"add\", True)\n\n def copy(self):\n return self.no_params_func(\"copy\", return_response=True)\n\n def cos(self):\n return self.no_params_func(\"cos\", return_response=True)\n\n def cos_(self):\n return self.no_params_func(\"cos_\")\n\n def cosh(self):\n return self.no_params_func(\"cosh\", return_response=True)\n\n def cosh_(self):\n return self.no_params_func(\"cosh_\")\n\n def __truediv__(self, x):\n return self.arithmetic_operation(x, \"div\", False)\n\n def __itruediv__(self, x):\n return self.arithmetic_operation(x, \"div\", True)\n\n def floor_(self):\n return self.no_params_func(\"floor_\")\n\n def __mul__(self, x):\n return self.arithmetic_operation(x, \"mul\", False)\n\n def __imul__(self, x):\n return self.arithmetic_operation(x, \"mul\", True)\n\n def neg(self):\n return self.no_params_func(\"neg\", return_response=True)\n\n def sigmoid_(self):\n return self.no_params_func(\"sigmoid_\")\n\n def sign(self):\n return self.no_params_func(\"sign\", return_response=True)\n\n def sin(self):\n return self.no_params_func(\"sin\", return_response=True)\n\n def sin_(self):\n return self.no_params_func(\"sin_\")\n\n def size(self):\n \"\"\"\n Returns the size of the self tensor as a FloatTensor.\n\n Note:\n The returned value currently is a FloatTensor because it leverages\n the messaging mechanism with Unity.\n \"\"\"\n return self.no_params_func(\"size\", return_response=True)\n\n def sqrt(self):\n return self.no_params_func(\"sqrt\", return_response=True)\n\n def trunc(self):\n return self.no_params_func(\"trunc\", return_response=True)\n\n def __sub__(self, x):\n return self.arithmetic_operation(x, \"sub\", False)\n\n def __isub__(self,x):\n return self.arithmetic_operation(x,\"sub\",True)\n\n def sum(self,dim):\n assert type(dim) == int\n return self.arithmetic_operation(dim, \"sum\", False)\n\n def view(self, *args):\n new_dim = list(args)\n assert type(new_dim) == list\n assert type(new_dim[0]) == int\n return self.params_func(\"view\", new_dim, return_response=True)\n\n def view_(self, *args):\n new_dim = list(args)\n assert type(new_dim) == list\n assert type(new_dim[0]) == int\n self.params_func(\"view_\", new_dim, return_response=False)\n return self\n\n def T(self):\n return self.no_params_func(\"transpose\", return_response=True)\n\n def triu(self, k=0):\n return self.params_func(\"triu\", [k], return_response=True)\n\n def triu_(self, k=0):\n return self.params_func(\"triu_\", [k])\n\n # Fills this tensor with zeros.\n def zero_(self):\n return self.no_params_func(\"zero_\")\n\n def __repr__(self):\n return self.no_params_func(\"print\", True, False)\n\n def __str__(self):\n return self.no_params_func(\"print\", True, False)\n\n def cpu(self):\n return self.no_params_func(\"cpu\")\n\n def gpu(self):\n return self.no_params_func(\"gpu\")\n\n def cmd(self, functionCall, tensorIndexParams=[]):\n cmd = {\n 'functionCall': functionCall,\n 'objectType': 'tensor',\n 'objectIndex': self.id,\n 'tensorIndexParams': tensorIndexParams}\n return cmd\n\n def params_func(self, name, params, return_response=False, return_as_tensor=True):\n # send the command\n self.controller.socket.send_json(\n self.cmd(name, tensorIndexParams=params))\n # receive output from command\n res = self.controller.socket.recv_string()\n\n if(self.verbose):\n print(res)\n\n if(return_response):\n if(return_as_tensor):\n if(self.verbose):\n print(\"FloatTensor.__init__: \" + res)\n return FloatTensor(self.controller,int(res),True)\n else:\n return res\n return self\n\n def no_params_func(self, name, return_response=False, return_as_tensor=True):\n return(self.params_func(name, [], return_response, return_as_tensor))\n\n def arithmetic_operation(self, x, name, inline=False):\n\n operation_cmd = name\n\n if(type(x) == FloatTensor):\n operation_cmd += \"_elem\"\n parameter = x.id\n else:\n operation_cmd += \"_scalar\"\n parameter = str(x)\n\n if(inline):\n operation_cmd += \"_\"\n\n self.controller.socket.send_json(\n self.cmd(operation_cmd, [parameter])) # sends the command\n return FloatTensor(self.controller, int(self.controller.socket.recv_string()), True)\n\n def delete_tensor(self):\n if(self.id is not None):\n self.no_params_func(\"delete\")\n self.verbose = None\n self.controller = None\n self.id = None\n\n def T(self):\n return self.no_params_func(\"transpose\", return_response=True)\n\n def is_contiguous(self):\n return self.no_params_func(\"is_contiguous\", return_response=True, return_as_tensor=False)\n\n def sinh(self):\n return self.no_params_func(\"sinh\", return_response=True)\n\n def sinh_(self):\n return self.no_params_func(\"sinh_\")\n\n def tan(self):\n return self.no_params_func(\"tan\", return_response=True)\n\n def tan_(self):\n return self.no_params_func(\"tan_\")\n\n def tanh(self):\n return self.no_params_func(\"tanh\", return_response=True)\n\n\nclass SyftController():\n\n def __init__(self,verbose=True):\n\n self.identity = str(uuid.uuid4())\n\n context = zmq.Context()\n self.socket = context.socket(zmq.DEALER)\n self.socket.setsockopt_string(zmq.IDENTITY, self.identity)\n self.socket.connect(\"tcp://localhost:5555\")\n self.verbose=verbose\n\n def FloatTensor(self, data):\n verbose = self.verbose\n return FloatTensor(self, data,verbose=verbose)\n", "path": "syft/syft.py"}], "after_files": [{"content": "import zmq\nimport uuid\n\n\nclass FloatTensor():\n\n def __init__(self, controller, data, data_is_pointer=False, verbose=False):\n self.verbose = verbose\n self.controller = controller\n if(data is not None and not data_is_pointer):\n data = data.astype('float')\n controller.socket.send_json({\"objectType\": \"tensor\",\n \"functionCall\": \"create\",\n \"data\": list(data.flatten()),\n \"shape\": data.shape})\n self.id = int(controller.socket.recv_string())\n if(verbose):\n print(\"FloatTensor.__init__: \" + str(self.id))\n\n elif(data_is_pointer):\n self.id = int(data)\n\n def __del__(self):\n self.delete_tensor()\n\n def abs(self):\n return self.no_params_func(\"abs\", return_response=True)\n\n def abs_(self):\n return self.no_params_func(\"abs_\")\n\n def acos(self):\n return self.no_params_func(\"acos\", return_response=True)\n\n def acos_(self):\n return self.no_params_func(\"acos_\")\n\n def asin(self):\n return self.no_params_func(\"asin\", return_response=True)\n\n def asin_(self):\n return self.no_params_func(\"asin_\")\n\n def atan(self):\n return self.no_params_func(\"atan\", return_response=True)\n\n def atan_(self):\n return self.no_params_func(\"atan_\")\n\n def addmm_(self, x, y):\n return self.params_func(\"addmm_\", [x.id, y.id])\n\n def addmm(self, x, y):\n copy = self.copy()\n copy.params_func(\"addmm_\", [x.id, y.id])\n return copy\n\n def addmv_(self, x, y):\n return self.params_func(\"addmv_\", [x.id, y.id])\n\n def addmv(self, x, y):\n copy = self.copy()\n copy.params_func(\"addmv_\", [x.id, y.id])\n return copy\n\n def __add__(self, x):\n return self.arithmetic_operation(x, \"add\", False)\n\n def __iadd__(self, x):\n return self.arithmetic_operation(x, \"add\", True)\n\n def copy(self):\n return self.no_params_func(\"copy\", return_response=True)\n\n def cos(self):\n return self.no_params_func(\"cos\", return_response=True)\n\n def cos_(self):\n return self.no_params_func(\"cos_\")\n\n def cosh(self):\n return self.no_params_func(\"cosh\", return_response=True)\n\n def cosh_(self):\n return self.no_params_func(\"cosh_\")\n\n def __truediv__(self, x):\n return self.arithmetic_operation(x, \"div\", False)\n\n def __itruediv__(self, x):\n return self.arithmetic_operation(x, \"div\", True)\n\n def floor_(self):\n return self.no_params_func(\"floor_\")\n\n def __mul__(self, x):\n return self.arithmetic_operation(x, \"mul\", False)\n\n def __imul__(self, x):\n return self.arithmetic_operation(x, \"mul\", True)\n\n def neg(self):\n return self.no_params_func(\"neg\", return_response=True)\n\n def rsqrt(self):\n return self.no_params_func(\"rsqrt\",return_response=True)\n\n def sigmoid_(self):\n return self.no_params_func(\"sigmoid_\")\n\n def sign(self):\n return self.no_params_func(\"sign\", return_response=True)\n\n def sin(self):\n return self.no_params_func(\"sin\", return_response=True)\n\n def sin_(self):\n return self.no_params_func(\"sin_\")\n\n def size(self):\n \"\"\"\n Returns the size of the self tensor as a FloatTensor.\n\n Note:\n The returned value currently is a FloatTensor because it leverages\n the messaging mechanism with Unity.\n \"\"\"\n return self.no_params_func(\"size\", return_response=True)\n\n def sqrt(self):\n return self.no_params_func(\"sqrt\", return_response=True)\n\n def trunc(self):\n return self.no_params_func(\"trunc\", return_response=True)\n\n def __sub__(self, x):\n return self.arithmetic_operation(x, \"sub\", False)\n\n def __isub__(self,x):\n return self.arithmetic_operation(x,\"sub\",True)\n\n def sum(self,dim):\n assert type(dim) == int\n return self.arithmetic_operation(dim, \"sum\", False)\n\n def view(self, *args):\n new_dim = list(args)\n assert type(new_dim) == list\n assert type(new_dim[0]) == int\n return self.params_func(\"view\", new_dim, return_response=True)\n\n def view_(self, *args):\n new_dim = list(args)\n assert type(new_dim) == list\n assert type(new_dim[0]) == int\n self.params_func(\"view_\", new_dim, return_response=False)\n return self\n\n def T(self):\n return self.no_params_func(\"transpose\", return_response=True)\n\n def triu(self, k=0):\n return self.params_func(\"triu\", [k], return_response=True)\n\n def triu_(self, k=0):\n return self.params_func(\"triu_\", [k])\n\n # Fills this tensor with zeros.\n def zero_(self):\n return self.no_params_func(\"zero_\")\n\n def __repr__(self):\n return self.no_params_func(\"print\", True, False)\n\n def __str__(self):\n return self.no_params_func(\"print\", True, False)\n\n def cpu(self):\n return self.no_params_func(\"cpu\")\n\n def gpu(self):\n return self.no_params_func(\"gpu\")\n\n def cmd(self, functionCall, tensorIndexParams=[]):\n cmd = {\n 'functionCall': functionCall,\n 'objectType': 'tensor',\n 'objectIndex': self.id,\n 'tensorIndexParams': tensorIndexParams}\n return cmd\n\n def params_func(self, name, params, return_response=False, return_as_tensor=True):\n # send the command\n self.controller.socket.send_json(\n self.cmd(name, tensorIndexParams=params))\n # receive output from command\n res = self.controller.socket.recv_string()\n\n if(self.verbose):\n print(res)\n\n if(return_response):\n if(return_as_tensor):\n if(self.verbose):\n print(\"FloatTensor.__init__: \" + res)\n return FloatTensor(self.controller,int(res),True)\n else:\n return res\n return self\n\n def no_params_func(self, name, return_response=False, return_as_tensor=True):\n return(self.params_func(name, [], return_response, return_as_tensor))\n\n def arithmetic_operation(self, x, name, inline=False):\n\n operation_cmd = name\n\n if(type(x) == FloatTensor):\n operation_cmd += \"_elem\"\n parameter = x.id\n else:\n operation_cmd += \"_scalar\"\n parameter = str(x)\n\n if(inline):\n operation_cmd += \"_\"\n\n self.controller.socket.send_json(\n self.cmd(operation_cmd, [parameter])) # sends the command\n return FloatTensor(self.controller, int(self.controller.socket.recv_string()), True)\n\n def delete_tensor(self):\n if(self.id is not None):\n self.no_params_func(\"delete\")\n self.verbose = None\n self.controller = None\n self.id = None\n\n def T(self):\n return self.no_params_func(\"transpose\", return_response=True)\n\n def is_contiguous(self):\n return self.no_params_func(\"is_contiguous\", return_response=True, return_as_tensor=False)\n\n def sinh(self):\n return self.no_params_func(\"sinh\", return_response=True)\n\n def sinh_(self):\n return self.no_params_func(\"sinh_\")\n\n def tan(self):\n return self.no_params_func(\"tan\", return_response=True)\n\n def tan_(self):\n return self.no_params_func(\"tan_\")\n\n def tanh(self):\n return self.no_params_func(\"tanh\", return_response=True)\n\n\nclass SyftController():\n\n def __init__(self,verbose=True):\n\n self.identity = str(uuid.uuid4())\n\n context = zmq.Context()\n self.socket = context.socket(zmq.DEALER)\n self.socket.setsockopt_string(zmq.IDENTITY, self.identity)\n self.socket.connect(\"tcp://localhost:5555\")\n self.verbose=verbose\n\n def FloatTensor(self, data):\n verbose = self.verbose\n return FloatTensor(self, data,verbose=verbose)\n", "path": "syft/syft.py"}]} | 3,553 | 112 |
gh_patches_debug_21124 | rasdani/github-patches | git_diff | mitmproxy__mitmproxy-3464 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
url with Chinese query string return 400
use IE11,url with Chinese query string,return 400.
1. Mitmproxy version: 3.0.0 (release version)
Python version: 3.5.3
Platform: Windows-10-10.0.14393-SP0
SSL version: OpenSSL 1.1.0e 16 Feb 2017
Windows version: 10 10.0.14393 SP0 Multiprocessor Free
2. chrome+mitmdump is fine.
3. but use IE11+mitmdump is error.
4. use IE11 + burpsuite is fine.
5. mitmdump --listen-host 127.0.0.1 --listen-port 8080
Mitmproxy was no hint error, but query string **lc_name** was submitted to the charset difference.
return HTTP 400.
html charset is gb2312.
IE11 developer tools see url http://wlpx.tax-edu.net/jsp/portal/PortalControl?flag=selectCourse&lc_id=42825&lc_name=�������
chrome developer tools see url http://wlpx.tax-edu.net/jsp/portal/PortalControl?flag=selectCourse&lc_id=42825&lc_name=%CD%A8%D3%C3%D6%AA%CA%B6%BA%CD%C4%DC%C1%A6

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mitmproxy/net/http/url.py`
Content:
```
1 import urllib.parse
2 from typing import Sequence
3 from typing import Tuple
4
5 from mitmproxy.net import check
6
7
8 def parse(url):
9 """
10 URL-parsing function that checks that
11 - port is an integer 0-65535
12 - host is a valid IDNA-encoded hostname with no null-bytes
13 - path is valid ASCII
14
15 Args:
16 A URL (as bytes or as unicode)
17
18 Returns:
19 A (scheme, host, port, path) tuple
20
21 Raises:
22 ValueError, if the URL is not properly formatted.
23 """
24 parsed = urllib.parse.urlparse(url)
25
26 if not parsed.hostname:
27 raise ValueError("No hostname given")
28
29 if isinstance(url, bytes):
30 host = parsed.hostname
31
32 # this should not raise a ValueError,
33 # but we try to be very forgiving here and accept just everything.
34 else:
35 host = parsed.hostname.encode("idna")
36 if isinstance(parsed, urllib.parse.ParseResult):
37 parsed = parsed.encode("ascii")
38
39 port = parsed.port # Returns None if port number invalid in Py3.5. Will throw ValueError in Py3.6
40 if not port:
41 port = 443 if parsed.scheme == b"https" else 80
42
43 full_path = urllib.parse.urlunparse(
44 (b"", b"", parsed.path, parsed.params, parsed.query, parsed.fragment)
45 )
46 if not full_path.startswith(b"/"):
47 full_path = b"/" + full_path
48
49 if not check.is_valid_host(host):
50 raise ValueError("Invalid Host")
51
52 return parsed.scheme, host, port, full_path
53
54
55 def unparse(scheme, host, port, path=""):
56 """
57 Returns a URL string, constructed from the specified components.
58
59 Args:
60 All args must be str.
61 """
62 if path == "*":
63 path = ""
64 return "%s://%s%s" % (scheme, hostport(scheme, host, port), path)
65
66
67 def encode(s: Sequence[Tuple[str, str]], similar_to: str=None) -> str:
68 """
69 Takes a list of (key, value) tuples and returns a urlencoded string.
70 If similar_to is passed, the output is formatted similar to the provided urlencoded string.
71 """
72
73 remove_trailing_equal = False
74 if similar_to:
75 remove_trailing_equal = any("=" not in param for param in similar_to.split("&"))
76
77 encoded = urllib.parse.urlencode(s, False, errors="surrogateescape")
78
79 if encoded and remove_trailing_equal:
80 encoded = encoded.replace("=&", "&")
81 if encoded[-1] == '=':
82 encoded = encoded[:-1]
83
84 return encoded
85
86
87 def decode(s):
88 """
89 Takes a urlencoded string and returns a list of surrogate-escaped (key, value) tuples.
90 """
91 return urllib.parse.parse_qsl(s, keep_blank_values=True, errors='surrogateescape')
92
93
94 def quote(b: str, safe: str="/") -> str:
95 """
96 Returns:
97 An ascii-encodable str.
98 """
99 return urllib.parse.quote(b, safe=safe, errors="surrogateescape")
100
101
102 def unquote(s: str) -> str:
103 """
104 Args:
105 s: A surrogate-escaped str
106 Returns:
107 A surrogate-escaped str
108 """
109 return urllib.parse.unquote(s, errors="surrogateescape")
110
111
112 def hostport(scheme, host, port):
113 """
114 Returns the host component, with a port specifcation if needed.
115 """
116 if (port, scheme) in [(80, "http"), (443, "https"), (80, b"http"), (443, b"https")]:
117 return host
118 else:
119 if isinstance(host, bytes):
120 return b"%s:%d" % (host, port)
121 else:
122 return "%s:%d" % (host, port)
123
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mitmproxy/net/http/url.py b/mitmproxy/net/http/url.py
--- a/mitmproxy/net/http/url.py
+++ b/mitmproxy/net/http/url.py
@@ -21,16 +21,25 @@
Raises:
ValueError, if the URL is not properly formatted.
"""
- parsed = urllib.parse.urlparse(url)
+ # Size of Ascii character after encoding is 1 byte which is same as its size
+ # But non-Ascii character's size after encoding will be more than its size
+ def ascii_check(l):
+ if len(l) == len(str(l).encode()):
+ return True
+ return False
+
+ if isinstance(url, bytes):
+ url = url.decode()
+ if not ascii_check(url):
+ url = urllib.parse.urlsplit(url)
+ url = list(url)
+ url[3] = urllib.parse.quote(url[3])
+ url = urllib.parse.urlunsplit(url)
+ parsed = urllib.parse.urlparse(url)
if not parsed.hostname:
raise ValueError("No hostname given")
- if isinstance(url, bytes):
- host = parsed.hostname
-
- # this should not raise a ValueError,
- # but we try to be very forgiving here and accept just everything.
else:
host = parsed.hostname.encode("idna")
if isinstance(parsed, urllib.parse.ParseResult):
| {"golden_diff": "diff --git a/mitmproxy/net/http/url.py b/mitmproxy/net/http/url.py\n--- a/mitmproxy/net/http/url.py\n+++ b/mitmproxy/net/http/url.py\n@@ -21,16 +21,25 @@\n Raises:\n ValueError, if the URL is not properly formatted.\n \"\"\"\n- parsed = urllib.parse.urlparse(url)\n+ # Size of Ascii character after encoding is 1 byte which is same as its size\n+ # But non-Ascii character's size after encoding will be more than its size\n+ def ascii_check(l):\n+ if len(l) == len(str(l).encode()):\n+ return True\n+ return False\n+\n+ if isinstance(url, bytes):\n+ url = url.decode()\n+ if not ascii_check(url):\n+ url = urllib.parse.urlsplit(url)\n+ url = list(url)\n+ url[3] = urllib.parse.quote(url[3])\n+ url = urllib.parse.urlunsplit(url)\n \n+ parsed = urllib.parse.urlparse(url)\n if not parsed.hostname:\n raise ValueError(\"No hostname given\")\n \n- if isinstance(url, bytes):\n- host = parsed.hostname\n-\n- # this should not raise a ValueError,\n- # but we try to be very forgiving here and accept just everything.\n else:\n host = parsed.hostname.encode(\"idna\")\n if isinstance(parsed, urllib.parse.ParseResult):\n", "issue": "url with Chinese query string return 400\nuse IE11,url with Chinese query string,return 400.\r\n\r\n1. Mitmproxy version: 3.0.0 (release version)\r\n Python version: 3.5.3\r\n Platform: Windows-10-10.0.14393-SP0\r\n SSL version: OpenSSL 1.1.0e 16 Feb 2017 \r\n Windows version: 10 10.0.14393 SP0 Multiprocessor Free\r\n2. chrome+mitmdump is fine.\r\n3. but use IE11+mitmdump is error. \r\n4. use IE11 + burpsuite is fine.\r\n5. mitmdump --listen-host 127.0.0.1 --listen-port 8080\r\n\r\nMitmproxy was no hint error, but query string **lc_name** was submitted to the charset difference.\r\nreturn HTTP 400.\r\nhtml charset is gb2312.\r\n\r\nIE11 developer tools see url http://wlpx.tax-edu.net/jsp/portal/PortalControl?flag=selectCourse&lc_id=42825&lc_name=\u00e9\ufffd\ufffd\u00e7\ufffd\u00a8\u00e7\ufffd\u00a5\u00e8\u00af\ufffd\u00e5\ufffd\ufffd\u00e8\ufffd\u00bd\u00e5\ufffd\ufffd\r\n\r\nchrome developer tools see url http://wlpx.tax-edu.net/jsp/portal/PortalControl?flag=selectCourse&lc_id=42825&lc_name=%CD%A8%D3%C3%D6%AA%CA%B6%BA%CD%C4%DC%C1%A6\r\n\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "import urllib.parse\nfrom typing import Sequence\nfrom typing import Tuple\n\nfrom mitmproxy.net import check\n\n\ndef parse(url):\n \"\"\"\n URL-parsing function that checks that\n - port is an integer 0-65535\n - host is a valid IDNA-encoded hostname with no null-bytes\n - path is valid ASCII\n\n Args:\n A URL (as bytes or as unicode)\n\n Returns:\n A (scheme, host, port, path) tuple\n\n Raises:\n ValueError, if the URL is not properly formatted.\n \"\"\"\n parsed = urllib.parse.urlparse(url)\n\n if not parsed.hostname:\n raise ValueError(\"No hostname given\")\n\n if isinstance(url, bytes):\n host = parsed.hostname\n\n # this should not raise a ValueError,\n # but we try to be very forgiving here and accept just everything.\n else:\n host = parsed.hostname.encode(\"idna\")\n if isinstance(parsed, urllib.parse.ParseResult):\n parsed = parsed.encode(\"ascii\")\n\n port = parsed.port # Returns None if port number invalid in Py3.5. Will throw ValueError in Py3.6\n if not port:\n port = 443 if parsed.scheme == b\"https\" else 80\n\n full_path = urllib.parse.urlunparse(\n (b\"\", b\"\", parsed.path, parsed.params, parsed.query, parsed.fragment)\n )\n if not full_path.startswith(b\"/\"):\n full_path = b\"/\" + full_path\n\n if not check.is_valid_host(host):\n raise ValueError(\"Invalid Host\")\n\n return parsed.scheme, host, port, full_path\n\n\ndef unparse(scheme, host, port, path=\"\"):\n \"\"\"\n Returns a URL string, constructed from the specified components.\n\n Args:\n All args must be str.\n \"\"\"\n if path == \"*\":\n path = \"\"\n return \"%s://%s%s\" % (scheme, hostport(scheme, host, port), path)\n\n\ndef encode(s: Sequence[Tuple[str, str]], similar_to: str=None) -> str:\n \"\"\"\n Takes a list of (key, value) tuples and returns a urlencoded string.\n If similar_to is passed, the output is formatted similar to the provided urlencoded string.\n \"\"\"\n\n remove_trailing_equal = False\n if similar_to:\n remove_trailing_equal = any(\"=\" not in param for param in similar_to.split(\"&\"))\n\n encoded = urllib.parse.urlencode(s, False, errors=\"surrogateescape\")\n\n if encoded and remove_trailing_equal:\n encoded = encoded.replace(\"=&\", \"&\")\n if encoded[-1] == '=':\n encoded = encoded[:-1]\n\n return encoded\n\n\ndef decode(s):\n \"\"\"\n Takes a urlencoded string and returns a list of surrogate-escaped (key, value) tuples.\n \"\"\"\n return urllib.parse.parse_qsl(s, keep_blank_values=True, errors='surrogateescape')\n\n\ndef quote(b: str, safe: str=\"/\") -> str:\n \"\"\"\n Returns:\n An ascii-encodable str.\n \"\"\"\n return urllib.parse.quote(b, safe=safe, errors=\"surrogateescape\")\n\n\ndef unquote(s: str) -> str:\n \"\"\"\n Args:\n s: A surrogate-escaped str\n Returns:\n A surrogate-escaped str\n \"\"\"\n return urllib.parse.unquote(s, errors=\"surrogateescape\")\n\n\ndef hostport(scheme, host, port):\n \"\"\"\n Returns the host component, with a port specifcation if needed.\n \"\"\"\n if (port, scheme) in [(80, \"http\"), (443, \"https\"), (80, b\"http\"), (443, b\"https\")]:\n return host\n else:\n if isinstance(host, bytes):\n return b\"%s:%d\" % (host, port)\n else:\n return \"%s:%d\" % (host, port)\n", "path": "mitmproxy/net/http/url.py"}], "after_files": [{"content": "import urllib.parse\nfrom typing import Sequence\nfrom typing import Tuple\n\nfrom mitmproxy.net import check\n\n\ndef parse(url):\n \"\"\"\n URL-parsing function that checks that\n - port is an integer 0-65535\n - host is a valid IDNA-encoded hostname with no null-bytes\n - path is valid ASCII\n\n Args:\n A URL (as bytes or as unicode)\n\n Returns:\n A (scheme, host, port, path) tuple\n\n Raises:\n ValueError, if the URL is not properly formatted.\n \"\"\"\n # Size of Ascii character after encoding is 1 byte which is same as its size\n # But non-Ascii character's size after encoding will be more than its size\n def ascii_check(l):\n if len(l) == len(str(l).encode()):\n return True\n return False\n\n if isinstance(url, bytes):\n url = url.decode()\n if not ascii_check(url):\n url = urllib.parse.urlsplit(url)\n url = list(url)\n url[3] = urllib.parse.quote(url[3])\n url = urllib.parse.urlunsplit(url)\n\n parsed = urllib.parse.urlparse(url)\n if not parsed.hostname:\n raise ValueError(\"No hostname given\")\n\n else:\n host = parsed.hostname.encode(\"idna\")\n if isinstance(parsed, urllib.parse.ParseResult):\n parsed = parsed.encode(\"ascii\")\n\n port = parsed.port # Returns None if port number invalid in Py3.5. Will throw ValueError in Py3.6\n if not port:\n port = 443 if parsed.scheme == b\"https\" else 80\n\n full_path = urllib.parse.urlunparse(\n (b\"\", b\"\", parsed.path, parsed.params, parsed.query, parsed.fragment)\n )\n if not full_path.startswith(b\"/\"):\n full_path = b\"/\" + full_path\n\n if not check.is_valid_host(host):\n raise ValueError(\"Invalid Host\")\n\n return parsed.scheme, host, port, full_path\n\n\ndef unparse(scheme, host, port, path=\"\"):\n \"\"\"\n Returns a URL string, constructed from the specified components.\n\n Args:\n All args must be str.\n \"\"\"\n if path == \"*\":\n path = \"\"\n return \"%s://%s%s\" % (scheme, hostport(scheme, host, port), path)\n\n\ndef encode(s: Sequence[Tuple[str, str]], similar_to: str=None) -> str:\n \"\"\"\n Takes a list of (key, value) tuples and returns a urlencoded string.\n If similar_to is passed, the output is formatted similar to the provided urlencoded string.\n \"\"\"\n\n remove_trailing_equal = False\n if similar_to:\n remove_trailing_equal = any(\"=\" not in param for param in similar_to.split(\"&\"))\n\n encoded = urllib.parse.urlencode(s, False, errors=\"surrogateescape\")\n\n if encoded and remove_trailing_equal:\n encoded = encoded.replace(\"=&\", \"&\")\n if encoded[-1] == '=':\n encoded = encoded[:-1]\n\n return encoded\n\n\ndef decode(s):\n \"\"\"\n Takes a urlencoded string and returns a list of surrogate-escaped (key, value) tuples.\n \"\"\"\n return urllib.parse.parse_qsl(s, keep_blank_values=True, errors='surrogateescape')\n\n\ndef quote(b: str, safe: str=\"/\") -> str:\n \"\"\"\n Returns:\n An ascii-encodable str.\n \"\"\"\n return urllib.parse.quote(b, safe=safe, errors=\"surrogateescape\")\n\n\ndef unquote(s: str) -> str:\n \"\"\"\n Args:\n s: A surrogate-escaped str\n Returns:\n A surrogate-escaped str\n \"\"\"\n return urllib.parse.unquote(s, errors=\"surrogateescape\")\n\n\ndef hostport(scheme, host, port):\n \"\"\"\n Returns the host component, with a port specifcation if needed.\n \"\"\"\n if (port, scheme) in [(80, \"http\"), (443, \"https\"), (80, b\"http\"), (443, b\"https\")]:\n return host\n else:\n if isinstance(host, bytes):\n return b\"%s:%d\" % (host, port)\n else:\n return \"%s:%d\" % (host, port)\n", "path": "mitmproxy/net/http/url.py"}]} | 1,788 | 307 |
gh_patches_debug_17447 | rasdani/github-patches | git_diff | wagtail__wagtail-10039 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
🎛️ Migrate site switcher to use Stimulus approach `ActionController`
> ℹ️ **Part of the [Stimulus 🎛️ RFC 78](https://github.com/wagtail/rfcs/pull/78)**
### Is your proposal related to a problem?
There is a custom JavaScript implementation to add behaviour to select drop-down that will update the location (URL) when changed.
This approach should be very close to what we are already doing with the `SubmitController` so let's do a a bit of clean up to avoid too much ad-hoc JS.
### Describe the solution you'd like
* Update the implementation of `client/src/controllers/SubmitController.ts` to allow for a new [Stimulus Value](https://stimulus.hotwired.dev/reference/values) called `updateAction`.
* When in use, the existing method `submit` will update the form's action value before submitting from the source element's value. `form.setAttribute('action', this.element.value); // example`
* Essentially we want to use the form `get` submit to do the location change, instead of updating the `window.location.url`.
* However, we need to ensure the right page is loaded, hence we need to revise `action` dynamically when the user selects the option.
* Remove the jQuery implementation completely [`wagtail/contrib/settings/static_src/wagtailsettings/js/site-switcher.js`](https://github.com/wagtail/wagtail/blob/main/wagtail/contrib/settings/static_src/wagtailsettings/js/site-switcher.js)
* Update the select field to have the suitable data attributes [`wagtail/contrib/settings/forms.py`](https://github.com/wagtail/wagtail/blob/main/wagtail/contrib/settings/forms.py#L23).
* Unit tests in JavaScript **must** be included with a PR.
* Validate that the 'current' option in the select drop-down for the site switcher is still function, so that selecting it will not do anything. See wagtail/contrib/settings/forms.py (Update: This is not a huge problem, the browser will not trigger a `change` event if the value has not changed).
#### Example HTML
```html
<form method="get" id="settings-site-switch" novalidate>
<select
name="site-switcher"
data-controller="w-submit"
data-action="change->w-submit#submit"
data-w-submit-update-action-value="true"
>
<option value="/path/to/current-site" selected>current.com</option>
<option value="/path/to/other-site">other.com</option>
</select>
</form>
```
### Additional notes
* Remember that Site Settings is not available in the bakery demo by default, you will need to add this locally to validate the behaviour https://docs.wagtail.org/en/stable/reference/contrib/settings.html
* `AutoFieldController` was added in this PR https://github.com/wagtail/wagtail/pull/9337 and then renamed to `SubmitController` in https://github.com/wagtail/wagtail/pull/10098
* The actual `form` HTML is located in [`wagtail/contrib/settings/templates/wagtailsettings/edit.html`](https://github.com/wagtail/wagtail/blob/main/wagtail/contrib/settings/templates/wagtailsettings/edit.html) - this HTML should not need changes but good to note
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `wagtail/contrib/settings/forms.py`
Content:
```
1 from django import forms
2 from django.urls import reverse
3 from django.utils.translation import gettext_lazy as _
4
5 from wagtail.admin.staticfiles import versioned_static
6 from wagtail.models import Site
7
8
9 class SiteSwitchForm(forms.Form):
10 site = forms.ChoiceField(choices=[])
11
12 @property
13 def media(self):
14 return forms.Media(
15 js=[
16 versioned_static("wagtailsettings/js/site-switcher.js"),
17 ]
18 )
19
20 def __init__(self, current_site, model, **kwargs):
21 initial_data = {"site": self.get_change_url(current_site, model)}
22 super().__init__(initial=initial_data, **kwargs)
23 self.fields["site"].choices = [
24 (
25 self.get_change_url(site, model),
26 (
27 site.hostname + " [{}]".format(_("default"))
28 if site.is_default_site
29 else site.hostname
30 ),
31 )
32 for site in Site.objects.all()
33 ]
34
35 @classmethod
36 def get_change_url(cls, site, model):
37 return reverse(
38 "wagtailsettings:edit",
39 args=[model._meta.app_label, model._meta.model_name, site.pk],
40 )
41
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/wagtail/contrib/settings/forms.py b/wagtail/contrib/settings/forms.py
--- a/wagtail/contrib/settings/forms.py
+++ b/wagtail/contrib/settings/forms.py
@@ -2,20 +2,19 @@
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
-from wagtail.admin.staticfiles import versioned_static
from wagtail.models import Site
class SiteSwitchForm(forms.Form):
- site = forms.ChoiceField(choices=[])
-
- @property
- def media(self):
- return forms.Media(
- js=[
- versioned_static("wagtailsettings/js/site-switcher.js"),
- ]
- )
+ site = forms.ChoiceField(
+ choices=[],
+ widget=forms.Select(
+ attrs={
+ "data-controller": "w-action",
+ "data-action": "change->w-action#redirect",
+ }
+ ),
+ )
def __init__(self, current_site, model, **kwargs):
initial_data = {"site": self.get_change_url(current_site, model)}
| {"golden_diff": "diff --git a/wagtail/contrib/settings/forms.py b/wagtail/contrib/settings/forms.py\n--- a/wagtail/contrib/settings/forms.py\n+++ b/wagtail/contrib/settings/forms.py\n@@ -2,20 +2,19 @@\n from django.urls import reverse\n from django.utils.translation import gettext_lazy as _\n \n-from wagtail.admin.staticfiles import versioned_static\n from wagtail.models import Site\n \n \n class SiteSwitchForm(forms.Form):\n- site = forms.ChoiceField(choices=[])\n-\n- @property\n- def media(self):\n- return forms.Media(\n- js=[\n- versioned_static(\"wagtailsettings/js/site-switcher.js\"),\n- ]\n- )\n+ site = forms.ChoiceField(\n+ choices=[],\n+ widget=forms.Select(\n+ attrs={\n+ \"data-controller\": \"w-action\",\n+ \"data-action\": \"change->w-action#redirect\",\n+ }\n+ ),\n+ )\n \n def __init__(self, current_site, model, **kwargs):\n initial_data = {\"site\": self.get_change_url(current_site, model)}\n", "issue": "\ud83c\udf9b\ufe0f Migrate site switcher to use Stimulus approach `ActionController`\n> \u2139\ufe0f **Part of the [Stimulus \ud83c\udf9b\ufe0f RFC 78](https://github.com/wagtail/rfcs/pull/78)**\r\n\r\n### Is your proposal related to a problem?\r\n\r\nThere is a custom JavaScript implementation to add behaviour to select drop-down that will update the location (URL) when changed.\r\n\r\nThis approach should be very close to what we are already doing with the `SubmitController` so let's do a a bit of clean up to avoid too much ad-hoc JS.\r\n\r\n### Describe the solution you'd like\r\n\r\n* Update the implementation of `client/src/controllers/SubmitController.ts` to allow for a new [Stimulus Value](https://stimulus.hotwired.dev/reference/values) called `updateAction`.\r\n * When in use, the existing method `submit` will update the form's action value before submitting from the source element's value. `form.setAttribute('action', this.element.value); // example`\r\n * Essentially we want to use the form `get` submit to do the location change, instead of updating the `window.location.url`.\r\n * However, we need to ensure the right page is loaded, hence we need to revise `action` dynamically when the user selects the option.\r\n* Remove the jQuery implementation completely [`wagtail/contrib/settings/static_src/wagtailsettings/js/site-switcher.js`](https://github.com/wagtail/wagtail/blob/main/wagtail/contrib/settings/static_src/wagtailsettings/js/site-switcher.js)\r\n* Update the select field to have the suitable data attributes [`wagtail/contrib/settings/forms.py`](https://github.com/wagtail/wagtail/blob/main/wagtail/contrib/settings/forms.py#L23).\r\n* Unit tests in JavaScript **must** be included with a PR.\r\n* Validate that the 'current' option in the select drop-down for the site switcher is still function, so that selecting it will not do anything. See wagtail/contrib/settings/forms.py (Update: This is not a huge problem, the browser will not trigger a `change` event if the value has not changed).\r\n\r\n#### Example HTML\r\n\r\n```html\r\n<form method=\"get\" id=\"settings-site-switch\" novalidate>\r\n <select\r\n name=\"site-switcher\"\r\n data-controller=\"w-submit\"\r\n data-action=\"change->w-submit#submit\"\r\n data-w-submit-update-action-value=\"true\"\r\n >\r\n <option value=\"/path/to/current-site\" selected>current.com</option>\r\n <option value=\"/path/to/other-site\">other.com</option>\r\n </select>\r\n</form>\r\n```\r\n\r\n\r\n### Additional notes\r\n\r\n* Remember that Site Settings is not available in the bakery demo by default, you will need to add this locally to validate the behaviour https://docs.wagtail.org/en/stable/reference/contrib/settings.html\r\n* `AutoFieldController` was added in this PR https://github.com/wagtail/wagtail/pull/9337 and then renamed to `SubmitController` in https://github.com/wagtail/wagtail/pull/10098\r\n* The actual `form` HTML is located in [`wagtail/contrib/settings/templates/wagtailsettings/edit.html`](https://github.com/wagtail/wagtail/blob/main/wagtail/contrib/settings/templates/wagtailsettings/edit.html) - this HTML should not need changes but good to note\r\n\n", "before_files": [{"content": "from django import forms\nfrom django.urls import reverse\nfrom django.utils.translation import gettext_lazy as _\n\nfrom wagtail.admin.staticfiles import versioned_static\nfrom wagtail.models import Site\n\n\nclass SiteSwitchForm(forms.Form):\n site = forms.ChoiceField(choices=[])\n\n @property\n def media(self):\n return forms.Media(\n js=[\n versioned_static(\"wagtailsettings/js/site-switcher.js\"),\n ]\n )\n\n def __init__(self, current_site, model, **kwargs):\n initial_data = {\"site\": self.get_change_url(current_site, model)}\n super().__init__(initial=initial_data, **kwargs)\n self.fields[\"site\"].choices = [\n (\n self.get_change_url(site, model),\n (\n site.hostname + \" [{}]\".format(_(\"default\"))\n if site.is_default_site\n else site.hostname\n ),\n )\n for site in Site.objects.all()\n ]\n\n @classmethod\n def get_change_url(cls, site, model):\n return reverse(\n \"wagtailsettings:edit\",\n args=[model._meta.app_label, model._meta.model_name, site.pk],\n )\n", "path": "wagtail/contrib/settings/forms.py"}], "after_files": [{"content": "from django import forms\nfrom django.urls import reverse\nfrom django.utils.translation import gettext_lazy as _\n\nfrom wagtail.models import Site\n\n\nclass SiteSwitchForm(forms.Form):\n site = forms.ChoiceField(\n choices=[],\n widget=forms.Select(\n attrs={\n \"data-controller\": \"w-action\",\n \"data-action\": \"change->w-action#redirect\",\n }\n ),\n )\n\n def __init__(self, current_site, model, **kwargs):\n initial_data = {\"site\": self.get_change_url(current_site, model)}\n super().__init__(initial=initial_data, **kwargs)\n self.fields[\"site\"].choices = [\n (\n self.get_change_url(site, model),\n (\n site.hostname + \" [{}]\".format(_(\"default\"))\n if site.is_default_site\n else site.hostname\n ),\n )\n for site in Site.objects.all()\n ]\n\n @classmethod\n def get_change_url(cls, site, model):\n return reverse(\n \"wagtailsettings:edit\",\n args=[model._meta.app_label, model._meta.model_name, site.pk],\n )\n", "path": "wagtail/contrib/settings/forms.py"}]} | 1,312 | 242 |
gh_patches_debug_8734 | rasdani/github-patches | git_diff | spack__spack-23478 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Installation issue: zfp install fails due to use of system python
When trying to build zfp on my system, I encountered a bunch of odd python errors. Upon deeper investigation, it appears that the CMake configuration tool zfp was using found/decided to use the system installed python instead of the spack installed python. Looking at the zfp recipe, it had dependencies on py-numpy and py-cython, but no explicit dependency on python.
I confess that might understanding of the different spack dependency types is still a bit vague, and I thought zfp's dependency on py-numpy should pick up py-numpy's dependency on python, and indeed I am seeing paths for the spack-installed python in the PATH, PYTHONHOME, and PYTHONPATH env variables in spack-build-env.txt, but for some reason CMake is using the system python, perhaps because spack-installed python is not in CMAKE_PREFIX_PATH???
(I also see the system python in SPACK_PYTHON env var --- I assume this means the python under which spack itself is running?)
Adding an explicit dependency on python in the zfp recipe resolves the issue for me (it seems that this adds the spack-installed python to CMAKE_PREFIX_PATH). I can provide a patch for such, but I am unsure if that is the best solution or not.
### Steps to reproduce the issue
```console
$ spack install zfp
...
-- Found PythonLibs: /usr/lib64/libpython3.6m.so (found version "3.6.8")
-- Found PythonInterp: /usr/bin/python3.6
Fatal Python error: Py_Initialize: Unable to get the locale encoding
ModuleNotFoundError: No module named 'encodings'
Current thread 0x00007f8e898a2b80 (most recent call first):
CMake Error at python/scikit-build-cmake/FindPythonExtensions.cmake:299 (list):
list GET given empty list
Call Stack (most recent call first):
python/CMakeLists.txt:4 (include)
...
```
The spack-installed python is version 3.7.7, and has a much longer path.
### Information on your system
```console
$ spack debug report
* **Spack:** 0.16.1-2432-16111354aa
* **Python:** 3.6.8
* **Platform:** linux-rhel8-x86_64
* **Concretizer:** original
```
### Additional information
* [spack-build-env.txt](https://github.com/spack/spack/files/6424165/spack-build-env.txt)
* [spack-build-out.txt](https://github.com/spack/spack/files/6424166/spack-build-out.txt)
Maintainers of the zfp package: @GarrettDMorrison @lindstro
### General information
- [x ] I have run `spack debug report` and reported the version of Spack/Python/Platform
- [x ] I have run `spack maintainers <name-of-the-package>` and @mentioned any maintainers
- [x ] I have uploaded the build log and environment files
- [x ] I have searched the issues of this repo and believe this is not a duplicate
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `var/spack/repos/builtin/packages/zfp/package.py`
Content:
```
1 # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
2 # Spack Project Developers. See the top-level COPYRIGHT file for details.
3 #
4 # SPDX-License-Identifier: (Apache-2.0 OR MIT)
5
6 from spack import *
7
8
9 class Zfp(CMakePackage, CudaPackage):
10 """zfp is a compressed number format for multidimensional floating-point
11 and integer arrays.
12
13 zfp provides compressed-array classes that support high throughput
14 read and write random access to individual array elements. zfp also
15 supports serial and parallel (OpenMP and CUDA) compression of whole
16 arrays.
17 """
18
19 # Package info
20 homepage = 'https://zfp.llnl.gov'
21 url = 'https://github.com/LLNL/zfp/releases/download/0.5.5/zfp-0.5.5.tar.gz'
22 git = 'https://github.com/LLNL/zfp.git'
23 maintainers = ['lindstro', 'GarrettDMorrison']
24
25 # Versions
26 version('develop', branch='develop')
27 version('0.5.5', sha256='fdf7b948bab1f4e5dccfe2c2048fd98c24e417ad8fb8a51ed3463d04147393c5')
28 version('0.5.4', sha256='746e17aaa401c67dcffd273d6e6f95c76adfbbd5cf523dcad56d09e9d3b71196')
29 version('0.5.3', sha256='a5d2f8e5b47a7c92e2a5775b82cbfb3a76c87d0ac83d25abb4ac10ea75a2856e')
30 version('0.5.2', sha256='9c738ec525cc76b4bb80b2b3f7c9f07507eeda3a341470e5942cda97efbe9a4f', url='https://github.com/LLNL/zfp/archive/0.5.2/zfp-0.5.2.tar.gz')
31 version('0.5.1', sha256='f255dd1708c9ae4dc6a56dd2614e8b47a10d833c87fd349cbd47545a19c2b779', url='https://github.com/LLNL/zfp/archive/0.5.1/zfp-0.5.1.tar.gz')
32
33 # Build targets
34 # TODO: variant('utilities', default=True, description='Build utilities')
35 variant('shared', default=True, description='Build shared libraries')
36
37 # Language bindings
38 variant('c', default=False, description='Enable C bindings')
39 variant('python', default=False, description='Enable Python bindings')
40 variant('fortran', default=False, description='Enable Fortran bindings')
41
42 # Execution policies
43 variant('openmp', default=False, description='Enable OpenMP execution')
44 variant('cuda', default=False, description='Enable CUDA execution')
45
46 # Advanced options
47 variant('bsws', default='64', values=('8', '16', '32', '64'), multi=False,
48 description='Bit stream word size: '
49 'use smaller for finer rate granularity. '
50 'Use 8 for H5Z-ZFP filter.')
51 variant('strided', default=False,
52 description='Enable strided access for progressive zfp streams')
53 variant('aligned', default=False,
54 description='Enable aligned memory allocation')
55 variant('twoway', default=False,
56 description='Use two-way skew-associative cache')
57 variant('fasthash', default=False,
58 description='Use a faster but more collision prone hash function')
59 variant('profile', default=False,
60 description='Count cache misses')
61
62 # Conflicts
63 conflicts('+c', when='@:0.5.3',
64 msg='+c requires zfp 0.5.4 or later')
65 conflicts('+python', when='@:0.5.4',
66 msg='+python requires zfp 0.5.5 or later')
67 conflicts('+fortran', when='@:0.5.4',
68 msg='+fortran requires zfp 0.5.5 or later')
69 conflicts('+openmp', when='@:0.5.2',
70 msg='+openmp requires zfp 0.5.3 or later')
71 conflicts('+cuda', when='@:0.5.3',
72 msg='+cuda requires zfp 0.5.4 or later')
73 conflicts('+fasthash', when='@:0.5.1',
74 msg='+fasthash requires zfp 0.5.2 or later')
75 conflicts('+profile', when='@:0.5.1',
76 msg='+profile requires zfp 0.5.2 or later')
77
78 # Dependencies
79 depends_on('[email protected]:', type='build')
80 depends_on('cuda@7:', type=('build', 'test', 'run'), when='+cuda')
81 depends_on('py-numpy', type=('build', 'test', 'run'), when='+python')
82 depends_on('py-cython', type='build', when='+python')
83
84 def cmake_args(self):
85 spec = self.spec
86
87 # CMake options
88 args = [
89 # TODO: self.define_from_variant('BUILD_UTILITIES', 'utilities'),
90 self.define('BUILD_TESTING', self.run_tests),
91 self.define_from_variant('BUILD_SHARED_LIBS', 'shared'),
92 self.define_from_variant('BUILD_CFP', 'c'),
93 self.define_from_variant('BUILD_ZFPY', 'python'),
94 self.define_from_variant('BUILD_ZFORP', 'fortran'),
95 self.define('ZFP_BIT_STREAM_WORD_SIZE',
96 spec.variants['bsws'].value),
97 self.define_from_variant('ZFP_WITH_BIT_STREAM_STRIDED', 'strided'),
98 self.define_from_variant('ZFP_WITH_ALIGNED_ALLOC', 'aligned'),
99 self.define_from_variant('ZFP_WITH_CACHE_TWOWAY', 'twoway'),
100 self.define_from_variant('ZFP_WITH_CACHE_FAST_HASH', 'fasthash'),
101 self.define_from_variant('ZFP_WITH_CACHE_PROFILE', 'profile')
102 ]
103
104 return args
105
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/var/spack/repos/builtin/packages/zfp/package.py b/var/spack/repos/builtin/packages/zfp/package.py
--- a/var/spack/repos/builtin/packages/zfp/package.py
+++ b/var/spack/repos/builtin/packages/zfp/package.py
@@ -78,6 +78,7 @@
# Dependencies
depends_on('[email protected]:', type='build')
depends_on('cuda@7:', type=('build', 'test', 'run'), when='+cuda')
+ depends_on('python', type=('build', 'test', 'run'), when='+python')
depends_on('py-numpy', type=('build', 'test', 'run'), when='+python')
depends_on('py-cython', type='build', when='+python')
| {"golden_diff": "diff --git a/var/spack/repos/builtin/packages/zfp/package.py b/var/spack/repos/builtin/packages/zfp/package.py\n--- a/var/spack/repos/builtin/packages/zfp/package.py\n+++ b/var/spack/repos/builtin/packages/zfp/package.py\n@@ -78,6 +78,7 @@\n # Dependencies\n depends_on('[email protected]:', type='build')\n depends_on('cuda@7:', type=('build', 'test', 'run'), when='+cuda')\n+ depends_on('python', type=('build', 'test', 'run'), when='+python')\n depends_on('py-numpy', type=('build', 'test', 'run'), when='+python')\n depends_on('py-cython', type='build', when='+python')\n", "issue": "Installation issue: zfp install fails due to use of system python\nWhen trying to build zfp on my system, I encountered a bunch of odd python errors. Upon deeper investigation, it appears that the CMake configuration tool zfp was using found/decided to use the system installed python instead of the spack installed python. Looking at the zfp recipe, it had dependencies on py-numpy and py-cython, but no explicit dependency on python.\r\n\r\nI confess that might understanding of the different spack dependency types is still a bit vague, and I thought zfp's dependency on py-numpy should pick up py-numpy's dependency on python, and indeed I am seeing paths for the spack-installed python in the PATH, PYTHONHOME, and PYTHONPATH env variables in spack-build-env.txt, but for some reason CMake is using the system python, perhaps because spack-installed python is not in CMAKE_PREFIX_PATH???\r\n\r\n(I also see the system python in SPACK_PYTHON env var --- I assume this means the python under which spack itself is running?) \r\n\r\nAdding an explicit dependency on python in the zfp recipe resolves the issue for me (it seems that this adds the spack-installed python to CMAKE_PREFIX_PATH). I can provide a patch for such, but I am unsure if that is the best solution or not.\r\n\r\n### Steps to reproduce the issue\r\n\r\n\r\n```console\r\n$ spack install zfp\r\n...\r\n-- Found PythonLibs: /usr/lib64/libpython3.6m.so (found version \"3.6.8\")\r\n-- Found PythonInterp: /usr/bin/python3.6\r\nFatal Python error: Py_Initialize: Unable to get the locale encoding\r\nModuleNotFoundError: No module named 'encodings'\r\n\r\nCurrent thread 0x00007f8e898a2b80 (most recent call first):\r\nCMake Error at python/scikit-build-cmake/FindPythonExtensions.cmake:299 (list):\r\n list GET given empty list\r\nCall Stack (most recent call first):\r\n python/CMakeLists.txt:4 (include)\r\n...\r\n```\r\n\r\nThe spack-installed python is version 3.7.7, and has a much longer path.\r\n\r\n### Information on your system\r\n\r\n```console\r\n$ spack debug report\r\n* **Spack:** 0.16.1-2432-16111354aa\r\n* **Python:** 3.6.8\r\n* **Platform:** linux-rhel8-x86_64\r\n* **Concretizer:** original\r\n```\r\n\r\n\r\n\r\n### Additional information\r\n\r\n\r\n* [spack-build-env.txt](https://github.com/spack/spack/files/6424165/spack-build-env.txt)\r\n* [spack-build-out.txt](https://github.com/spack/spack/files/6424166/spack-build-out.txt)\r\n\r\nMaintainers of the zfp package: @GarrettDMorrison @lindstro\r\n\r\n\r\n### General information\r\n\r\n- [x ] I have run `spack debug report` and reported the version of Spack/Python/Platform\r\n- [x ] I have run `spack maintainers <name-of-the-package>` and @mentioned any maintainers\r\n- [x ] I have uploaded the build log and environment files\r\n- [x ] I have searched the issues of this repo and believe this is not a duplicate\r\n\n", "before_files": [{"content": "# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\nfrom spack import *\n\n\nclass Zfp(CMakePackage, CudaPackage):\n \"\"\"zfp is a compressed number format for multidimensional floating-point\n and integer arrays.\n\n zfp provides compressed-array classes that support high throughput\n read and write random access to individual array elements. zfp also\n supports serial and parallel (OpenMP and CUDA) compression of whole\n arrays.\n \"\"\"\n\n # Package info\n homepage = 'https://zfp.llnl.gov'\n url = 'https://github.com/LLNL/zfp/releases/download/0.5.5/zfp-0.5.5.tar.gz'\n git = 'https://github.com/LLNL/zfp.git'\n maintainers = ['lindstro', 'GarrettDMorrison']\n\n # Versions\n version('develop', branch='develop')\n version('0.5.5', sha256='fdf7b948bab1f4e5dccfe2c2048fd98c24e417ad8fb8a51ed3463d04147393c5')\n version('0.5.4', sha256='746e17aaa401c67dcffd273d6e6f95c76adfbbd5cf523dcad56d09e9d3b71196')\n version('0.5.3', sha256='a5d2f8e5b47a7c92e2a5775b82cbfb3a76c87d0ac83d25abb4ac10ea75a2856e')\n version('0.5.2', sha256='9c738ec525cc76b4bb80b2b3f7c9f07507eeda3a341470e5942cda97efbe9a4f', url='https://github.com/LLNL/zfp/archive/0.5.2/zfp-0.5.2.tar.gz')\n version('0.5.1', sha256='f255dd1708c9ae4dc6a56dd2614e8b47a10d833c87fd349cbd47545a19c2b779', url='https://github.com/LLNL/zfp/archive/0.5.1/zfp-0.5.1.tar.gz')\n\n # Build targets\n # TODO: variant('utilities', default=True, description='Build utilities')\n variant('shared', default=True, description='Build shared libraries')\n\n # Language bindings\n variant('c', default=False, description='Enable C bindings')\n variant('python', default=False, description='Enable Python bindings')\n variant('fortran', default=False, description='Enable Fortran bindings')\n\n # Execution policies\n variant('openmp', default=False, description='Enable OpenMP execution')\n variant('cuda', default=False, description='Enable CUDA execution')\n\n # Advanced options\n variant('bsws', default='64', values=('8', '16', '32', '64'), multi=False,\n description='Bit stream word size: '\n 'use smaller for finer rate granularity. '\n 'Use 8 for H5Z-ZFP filter.')\n variant('strided', default=False,\n description='Enable strided access for progressive zfp streams')\n variant('aligned', default=False,\n description='Enable aligned memory allocation')\n variant('twoway', default=False,\n description='Use two-way skew-associative cache')\n variant('fasthash', default=False,\n description='Use a faster but more collision prone hash function')\n variant('profile', default=False,\n description='Count cache misses')\n\n # Conflicts\n conflicts('+c', when='@:0.5.3',\n msg='+c requires zfp 0.5.4 or later')\n conflicts('+python', when='@:0.5.4',\n msg='+python requires zfp 0.5.5 or later')\n conflicts('+fortran', when='@:0.5.4',\n msg='+fortran requires zfp 0.5.5 or later')\n conflicts('+openmp', when='@:0.5.2',\n msg='+openmp requires zfp 0.5.3 or later')\n conflicts('+cuda', when='@:0.5.3',\n msg='+cuda requires zfp 0.5.4 or later')\n conflicts('+fasthash', when='@:0.5.1',\n msg='+fasthash requires zfp 0.5.2 or later')\n conflicts('+profile', when='@:0.5.1',\n msg='+profile requires zfp 0.5.2 or later')\n\n # Dependencies\n depends_on('[email protected]:', type='build')\n depends_on('cuda@7:', type=('build', 'test', 'run'), when='+cuda')\n depends_on('py-numpy', type=('build', 'test', 'run'), when='+python')\n depends_on('py-cython', type='build', when='+python')\n\n def cmake_args(self):\n spec = self.spec\n\n # CMake options\n args = [\n # TODO: self.define_from_variant('BUILD_UTILITIES', 'utilities'),\n self.define('BUILD_TESTING', self.run_tests),\n self.define_from_variant('BUILD_SHARED_LIBS', 'shared'),\n self.define_from_variant('BUILD_CFP', 'c'),\n self.define_from_variant('BUILD_ZFPY', 'python'),\n self.define_from_variant('BUILD_ZFORP', 'fortran'),\n self.define('ZFP_BIT_STREAM_WORD_SIZE',\n spec.variants['bsws'].value),\n self.define_from_variant('ZFP_WITH_BIT_STREAM_STRIDED', 'strided'),\n self.define_from_variant('ZFP_WITH_ALIGNED_ALLOC', 'aligned'),\n self.define_from_variant('ZFP_WITH_CACHE_TWOWAY', 'twoway'),\n self.define_from_variant('ZFP_WITH_CACHE_FAST_HASH', 'fasthash'),\n self.define_from_variant('ZFP_WITH_CACHE_PROFILE', 'profile')\n ]\n\n return args\n", "path": "var/spack/repos/builtin/packages/zfp/package.py"}], "after_files": [{"content": "# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\nfrom spack import *\n\n\nclass Zfp(CMakePackage, CudaPackage):\n \"\"\"zfp is a compressed number format for multidimensional floating-point\n and integer arrays.\n\n zfp provides compressed-array classes that support high throughput\n read and write random access to individual array elements. zfp also\n supports serial and parallel (OpenMP and CUDA) compression of whole\n arrays.\n \"\"\"\n\n # Package info\n homepage = 'https://zfp.llnl.gov'\n url = 'https://github.com/LLNL/zfp/releases/download/0.5.5/zfp-0.5.5.tar.gz'\n git = 'https://github.com/LLNL/zfp.git'\n maintainers = ['lindstro', 'GarrettDMorrison']\n\n # Versions\n version('develop', branch='develop')\n version('0.5.5', sha256='fdf7b948bab1f4e5dccfe2c2048fd98c24e417ad8fb8a51ed3463d04147393c5')\n version('0.5.4', sha256='746e17aaa401c67dcffd273d6e6f95c76adfbbd5cf523dcad56d09e9d3b71196')\n version('0.5.3', sha256='a5d2f8e5b47a7c92e2a5775b82cbfb3a76c87d0ac83d25abb4ac10ea75a2856e')\n version('0.5.2', sha256='9c738ec525cc76b4bb80b2b3f7c9f07507eeda3a341470e5942cda97efbe9a4f', url='https://github.com/LLNL/zfp/archive/0.5.2/zfp-0.5.2.tar.gz')\n version('0.5.1', sha256='f255dd1708c9ae4dc6a56dd2614e8b47a10d833c87fd349cbd47545a19c2b779', url='https://github.com/LLNL/zfp/archive/0.5.1/zfp-0.5.1.tar.gz')\n\n # Build targets\n # TODO: variant('utilities', default=True, description='Build utilities')\n variant('shared', default=True, description='Build shared libraries')\n\n # Language bindings\n variant('c', default=False, description='Enable C bindings')\n variant('python', default=False, description='Enable Python bindings')\n variant('fortran', default=False, description='Enable Fortran bindings')\n\n # Execution policies\n variant('openmp', default=False, description='Enable OpenMP execution')\n variant('cuda', default=False, description='Enable CUDA execution')\n\n # Advanced options\n variant('bsws', default='64', values=('8', '16', '32', '64'), multi=False,\n description='Bit stream word size: '\n 'use smaller for finer rate granularity. '\n 'Use 8 for H5Z-ZFP filter.')\n variant('strided', default=False,\n description='Enable strided access for progressive zfp streams')\n variant('aligned', default=False,\n description='Enable aligned memory allocation')\n variant('twoway', default=False,\n description='Use two-way skew-associative cache')\n variant('fasthash', default=False,\n description='Use a faster but more collision prone hash function')\n variant('profile', default=False,\n description='Count cache misses')\n\n # Conflicts\n conflicts('+c', when='@:0.5.3',\n msg='+c requires zfp 0.5.4 or later')\n conflicts('+python', when='@:0.5.4',\n msg='+python requires zfp 0.5.5 or later')\n conflicts('+fortran', when='@:0.5.4',\n msg='+fortran requires zfp 0.5.5 or later')\n conflicts('+openmp', when='@:0.5.2',\n msg='+openmp requires zfp 0.5.3 or later')\n conflicts('+cuda', when='@:0.5.3',\n msg='+cuda requires zfp 0.5.4 or later')\n conflicts('+fasthash', when='@:0.5.1',\n msg='+fasthash requires zfp 0.5.2 or later')\n conflicts('+profile', when='@:0.5.1',\n msg='+profile requires zfp 0.5.2 or later')\n\n # Dependencies\n depends_on('[email protected]:', type='build')\n depends_on('cuda@7:', type=('build', 'test', 'run'), when='+cuda')\n depends_on('python', type=('build', 'test', 'run'), when='+python')\n depends_on('py-numpy', type=('build', 'test', 'run'), when='+python')\n depends_on('py-cython', type='build', when='+python')\n\n def cmake_args(self):\n spec = self.spec\n\n # CMake options\n args = [\n # TODO: self.define_from_variant('BUILD_UTILITIES', 'utilities'),\n self.define('BUILD_TESTING', self.run_tests),\n self.define_from_variant('BUILD_SHARED_LIBS', 'shared'),\n self.define_from_variant('BUILD_CFP', 'c'),\n self.define_from_variant('BUILD_ZFPY', 'python'),\n self.define_from_variant('BUILD_ZFORP', 'fortran'),\n self.define('ZFP_BIT_STREAM_WORD_SIZE',\n spec.variants['bsws'].value),\n self.define_from_variant('ZFP_WITH_BIT_STREAM_STRIDED', 'strided'),\n self.define_from_variant('ZFP_WITH_ALIGNED_ALLOC', 'aligned'),\n self.define_from_variant('ZFP_WITH_CACHE_TWOWAY', 'twoway'),\n self.define_from_variant('ZFP_WITH_CACHE_FAST_HASH', 'fasthash'),\n self.define_from_variant('ZFP_WITH_CACHE_PROFILE', 'profile')\n ]\n\n return args\n", "path": "var/spack/repos/builtin/packages/zfp/package.py"}]} | 2,674 | 175 |
gh_patches_debug_15647 | rasdani/github-patches | git_diff | electricitymaps__electricitymaps-contrib-1812 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Discuss type for renewable category for Ukraine UA
Currently, there is no breakdown for the Ukrainian renewable category provided on https://ua.energy/diyalnist/dyspetcherska-informatsiya/dobovyj-grafik-vyrobnytstva-spozhyvannya-e-e/
The renewable category (ВДЕ) is mapped as "wind" in the parser, because wind used to be the dominant source in the past.
Going through the last few days on the website, you will notice a very clear solar peak at noon (~1.200. MW) each day. Wind at nighttimes reaches a maximum value of ~400 MW, mostly it is around 200 MW.
Here is an example for yesterday:

The installed capacity of solar grew very fast, and will continue because it's cheap and the potential in UA is huge:

Some suggestions to deal with this situation:
1. Any artificial boundaries (depending on x-axis-time or y-axis-megawatts or both) pushing production to wind or solar?
Like "from 06:00 to 18:00" -> solar if P > 200 MW, else wind".
2. Put renewables to unknown category with a mixed carbon intensity (looking at the intalled capacity, 50% wind : 50% solar seems reasonable).
3. actively search for a breakdown of wind and solar
4. ask the data provider for a breakdown
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `parsers/UA.py`
Content:
```
1 #!/usr/bin/env python3
2
3 import arrow
4 import dateutil
5 import requests
6
7 """
8 tec - same as `tes` but also working as central heater,
9 main fuel is gas, in critical situations - black oil
10 gesgaes - hydro run of river and poundage
11 consumptiongaespump - hydro pumped storage
12 vde - wind + solar, mostly wind
13
14 no data for biomass, solar and oil
15 """
16 MAP_GENERATION = {
17 'aes': 'nuclear',
18 'tec': 'gas',
19 'tes': 'coal',
20 'vde': 'wind',
21 'biomass': 'biomass',
22 'gesgaes': 'hydro',
23 'solar': 'solar',
24 'oil': 'oil',
25 'geothermal': 'geothermal',
26 }
27
28 MAP_STORAGE = {
29 'consumptiongaespump': 'hydro',
30 }
31
32 tz = 'Europe/Kiev'
33
34
35 def fetch_production(zone_key='UA', session=None, target_datetime=None, logger=None):
36 if target_datetime:
37 raise NotImplementedError('This parser is not yet able to parse past dates')
38 r = session or requests.session()
39
40 data = []
41 today = arrow.now(tz=tz).format('DD.MM.YYYY')
42 url = 'https://ua.energy/wp-admin/admin-ajax.php'
43 postdata = {
44 'action': 'get_data_oes',
45 'report_date': today,
46 'type': 'day'
47 }
48
49 response = r.post(url, postdata)
50
51 for serie in response.json():
52 row = {
53 'zoneKey': zone_key,
54 'production': {},
55 'storage': {},
56 'source': 'ua.energy'
57 }
58
59 # Storage
60 if 'consumptiongaespump' in serie:
61 row['storage']['hydro'] = serie['consumptiongaespump'] * -1
62
63 # Production
64 for k, v in MAP_GENERATION.items():
65 if k in serie:
66 row['production'][v] = serie[k]
67 else:
68 row['production'][v] = 0.0
69
70 # Date
71 date = arrow.get('%s %s' % (today, serie['hour']), 'DD.MM.YYYY HH:mm')
72 row['datetime'] = date.replace(tzinfo=dateutil.tz.gettz(tz)).datetime
73
74 data.append(row)
75 return data
76
77
78 if __name__ == '__main__':
79 print(fetch_production())
80
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/parsers/UA.py b/parsers/UA.py
--- a/parsers/UA.py
+++ b/parsers/UA.py
@@ -9,18 +9,18 @@
main fuel is gas, in critical situations - black oil
gesgaes - hydro run of river and poundage
consumptiongaespump - hydro pumped storage
-vde - wind + solar, mostly wind
+vde - renewable sources - mostly wind at nighttimes and solar peaks during the day
-no data for biomass, solar and oil
"""
MAP_GENERATION = {
'aes': 'nuclear',
'tec': 'gas',
'tes': 'coal',
- 'vde': 'wind',
+ 'vde': 'unknown',
'biomass': 'biomass',
'gesgaes': 'hydro',
'solar': 'solar',
+ 'wind': 'wind',
'oil': 'oil',
'geothermal': 'geothermal',
}
| {"golden_diff": "diff --git a/parsers/UA.py b/parsers/UA.py\n--- a/parsers/UA.py\n+++ b/parsers/UA.py\n@@ -9,18 +9,18 @@\n main fuel is gas, in critical situations - black oil\n gesgaes - hydro run of river and poundage\n consumptiongaespump - hydro pumped storage\n-vde - wind + solar, mostly wind\n+vde - renewable sources - mostly wind at nighttimes and solar peaks during the day\n \n-no data for biomass, solar and oil\n \"\"\"\n MAP_GENERATION = {\n 'aes': 'nuclear',\n 'tec': 'gas',\n 'tes': 'coal',\n- 'vde': 'wind',\n+ 'vde': 'unknown',\n 'biomass': 'biomass',\n 'gesgaes': 'hydro',\n 'solar': 'solar',\n+ 'wind': 'wind',\n 'oil': 'oil',\n 'geothermal': 'geothermal',\n }\n", "issue": "Discuss type for renewable category for Ukraine UA\nCurrently, there is no breakdown for the Ukrainian renewable category provided on https://ua.energy/diyalnist/dyspetcherska-informatsiya/dobovyj-grafik-vyrobnytstva-spozhyvannya-e-e/\r\n\r\nThe renewable category (\u0412\u0414\u0415) is mapped as \"wind\" in the parser, because wind used to be the dominant source in the past.\r\nGoing through the last few days on the website, you will notice a very clear solar peak at noon (~1.200. MW) each day. Wind at nighttimes reaches a maximum value of ~400 MW, mostly it is around 200 MW.\r\n\r\nHere is an example for yesterday:\r\n\r\n\r\nThe installed capacity of solar grew very fast, and will continue because it's cheap and the potential in UA is huge:\r\n\r\n\r\nSome suggestions to deal with this situation:\r\n1. Any artificial boundaries (depending on x-axis-time or y-axis-megawatts or both) pushing production to wind or solar?\r\nLike \"from 06:00 to 18:00\" -> solar if P > 200 MW, else wind\". \r\n2. Put renewables to unknown category with a mixed carbon intensity (looking at the intalled capacity, 50% wind : 50% solar seems reasonable).\r\n3. actively search for a breakdown of wind and solar\r\n4. ask the data provider for a breakdown\n", "before_files": [{"content": "#!/usr/bin/env python3\n\nimport arrow\nimport dateutil\nimport requests\n\n\"\"\"\ntec - same as `tes` but also working as central heater,\n main fuel is gas, in critical situations - black oil\ngesgaes - hydro run of river and poundage\nconsumptiongaespump - hydro pumped storage\nvde - wind + solar, mostly wind\n\nno data for biomass, solar and oil\n\"\"\"\nMAP_GENERATION = {\n 'aes': 'nuclear',\n 'tec': 'gas',\n 'tes': 'coal',\n 'vde': 'wind',\n 'biomass': 'biomass',\n 'gesgaes': 'hydro',\n 'solar': 'solar',\n 'oil': 'oil',\n 'geothermal': 'geothermal',\n}\n\nMAP_STORAGE = {\n 'consumptiongaespump': 'hydro',\n}\n\ntz = 'Europe/Kiev'\n\n\ndef fetch_production(zone_key='UA', session=None, target_datetime=None, logger=None):\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n r = session or requests.session()\n\n data = []\n today = arrow.now(tz=tz).format('DD.MM.YYYY')\n url = 'https://ua.energy/wp-admin/admin-ajax.php'\n postdata = {\n 'action': 'get_data_oes',\n 'report_date': today,\n 'type': 'day'\n }\n\n response = r.post(url, postdata)\n\n for serie in response.json():\n row = {\n 'zoneKey': zone_key,\n 'production': {},\n 'storage': {},\n 'source': 'ua.energy'\n }\n\n # Storage\n if 'consumptiongaespump' in serie:\n row['storage']['hydro'] = serie['consumptiongaespump'] * -1\n\n # Production\n for k, v in MAP_GENERATION.items():\n if k in serie:\n row['production'][v] = serie[k]\n else:\n row['production'][v] = 0.0\n\n # Date\n date = arrow.get('%s %s' % (today, serie['hour']), 'DD.MM.YYYY HH:mm')\n row['datetime'] = date.replace(tzinfo=dateutil.tz.gettz(tz)).datetime\n\n data.append(row)\n return data\n\n\nif __name__ == '__main__':\n print(fetch_production())\n", "path": "parsers/UA.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\nimport arrow\nimport dateutil\nimport requests\n\n\"\"\"\ntec - same as `tes` but also working as central heater,\n main fuel is gas, in critical situations - black oil\ngesgaes - hydro run of river and poundage\nconsumptiongaespump - hydro pumped storage\nvde - renewable sources - mostly wind at nighttimes and solar peaks during the day\n\n\"\"\"\nMAP_GENERATION = {\n 'aes': 'nuclear',\n 'tec': 'gas',\n 'tes': 'coal',\n 'vde': 'unknown',\n 'biomass': 'biomass',\n 'gesgaes': 'hydro',\n 'solar': 'solar',\n 'wind': 'wind',\n 'oil': 'oil',\n 'geothermal': 'geothermal',\n}\n\nMAP_STORAGE = {\n 'consumptiongaespump': 'hydro',\n}\n\ntz = 'Europe/Kiev'\n\n\ndef fetch_production(zone_key='UA', session=None, target_datetime=None, logger=None):\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n r = session or requests.session()\n\n data = []\n today = arrow.now(tz=tz).format('DD.MM.YYYY')\n url = 'https://ua.energy/wp-admin/admin-ajax.php'\n postdata = {\n 'action': 'get_data_oes',\n 'report_date': today,\n 'type': 'day'\n }\n\n response = r.post(url, postdata)\n\n for serie in response.json():\n row = {\n 'zoneKey': zone_key,\n 'production': {},\n 'storage': {},\n 'source': 'ua.energy'\n }\n\n # Storage\n if 'consumptiongaespump' in serie:\n row['storage']['hydro'] = serie['consumptiongaespump'] * -1\n\n # Production\n for k, v in MAP_GENERATION.items():\n if k in serie:\n row['production'][v] = serie[k]\n else:\n row['production'][v] = 0.0\n\n # Date\n date = arrow.get('%s %s' % (today, serie['hour']), 'DD.MM.YYYY HH:mm')\n row['datetime'] = date.replace(tzinfo=dateutil.tz.gettz(tz)).datetime\n\n data.append(row)\n return data\n\n\nif __name__ == '__main__':\n print(fetch_production())\n", "path": "parsers/UA.py"}]} | 1,364 | 215 |
gh_patches_debug_19934 | rasdani/github-patches | git_diff | Mailu__Mailu-1599 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Hardcoded http://admin/ in fetchmail.py
I've tweaked ``docker-compose.yml`` so that all my containers related to ``mailu`` are prefixed by ``mailu-``, in order to pro-actively avoid conflict with any other containers I may eventually define in future.
However, the hardcode ``http://admin/...`` below causes failure in ``fetchmail``, since my container is now named ``mailu-admin`` in my ``docker-compose.yml``, not ``admin`` as the code supposes it should be.
```
./services/fetchmail/fetchmail.py:47: fetches = requests.get("http://admin/internal/fetch").json()
./services/fetchmail/fetchmail.py:85: requests.post("http://admin/internal/fetch/{}".format(fetch["id"]),
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `optional/fetchmail/fetchmail.py`
Content:
```
1 #!/usr/bin/python3
2
3 import time
4 import os
5 import tempfile
6 import shlex
7 import subprocess
8 import re
9 import requests
10 import sys
11 import traceback
12
13
14 FETCHMAIL = """
15 fetchmail -N \
16 --sslcertck --sslcertpath /etc/ssl/certs \
17 -f {}
18 """
19
20
21 RC_LINE = """
22 poll "{host}" proto {protocol} port {port}
23 user "{username}" password "{password}"
24 is "{user_email}"
25 smtphost "{smtphost}"
26 {options}
27 """
28
29
30 def extract_host_port(host_and_port, default_port):
31 host, _, port = re.match('^(.*)(:([0-9]*))?$', host_and_port).groups()
32 return host, int(port) if port else default_port
33
34
35 def escape_rc_string(arg):
36 return "".join("\\x%2x" % ord(char) for char in arg)
37
38
39 def fetchmail(fetchmailrc):
40 with tempfile.NamedTemporaryFile() as handler:
41 handler.write(fetchmailrc.encode("utf8"))
42 handler.flush()
43 command = FETCHMAIL.format(shlex.quote(handler.name))
44 output = subprocess.check_output(command, shell=True)
45 return output
46
47
48 def run(debug):
49 try:
50 fetches = requests.get("http://admin/internal/fetch").json()
51 smtphost, smtpport = extract_host_port(os.environ.get("HOST_SMTP", "smtp"), None)
52 if smtpport is None:
53 smtphostport = smtphost
54 else:
55 smtphostport = "%s/%d" % (smtphost, smtpport)
56 for fetch in fetches:
57 fetchmailrc = ""
58 options = "options antispam 501, 504, 550, 553, 554"
59 options += " ssl" if fetch["tls"] else ""
60 options += " keep" if fetch["keep"] else " fetchall"
61 fetchmailrc += RC_LINE.format(
62 user_email=escape_rc_string(fetch["user_email"]),
63 protocol=fetch["protocol"],
64 host=escape_rc_string(fetch["host"]),
65 port=fetch["port"],
66 smtphost=smtphostport,
67 username=escape_rc_string(fetch["username"]),
68 password=escape_rc_string(fetch["password"]),
69 options=options
70 )
71 if debug:
72 print(fetchmailrc)
73 try:
74 print(fetchmail(fetchmailrc))
75 error_message = ""
76 except subprocess.CalledProcessError as error:
77 error_message = error.output.decode("utf8")
78 # No mail is not an error
79 if not error_message.startswith("fetchmail: No mail"):
80 print(error_message)
81 user_info = "for %s at %s" % (fetch["user_email"], fetch["host"])
82 # Number of messages seen is not a error as well
83 if ("messages" in error_message and
84 "(seen " in error_message and
85 user_info in error_message):
86 print(error_message)
87 finally:
88 requests.post("http://admin/internal/fetch/{}".format(fetch["id"]),
89 json=error_message.split("\n")[0]
90 )
91 except Exception:
92 traceback.print_exc()
93
94
95 if __name__ == "__main__":
96 while True:
97 time.sleep(int(os.environ.get("FETCHMAIL_DELAY", 60)))
98 run(os.environ.get("DEBUG", None) == "True")
99 sys.stdout.flush()
100
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/optional/fetchmail/fetchmail.py b/optional/fetchmail/fetchmail.py
--- a/optional/fetchmail/fetchmail.py
+++ b/optional/fetchmail/fetchmail.py
@@ -47,7 +47,7 @@
def run(debug):
try:
- fetches = requests.get("http://admin/internal/fetch").json()
+ fetches = requests.get("http://" + os.environ.get("HOST_ADMIN", "admin") + "/internal/fetch").json()
smtphost, smtpport = extract_host_port(os.environ.get("HOST_SMTP", "smtp"), None)
if smtpport is None:
smtphostport = smtphost
@@ -85,7 +85,7 @@
user_info in error_message):
print(error_message)
finally:
- requests.post("http://admin/internal/fetch/{}".format(fetch["id"]),
+ requests.post("http://" + os.environ.get("HOST_ADMIN", "admin") + "/internal/fetch/{}".format(fetch["id"]),
json=error_message.split("\n")[0]
)
except Exception:
| {"golden_diff": "diff --git a/optional/fetchmail/fetchmail.py b/optional/fetchmail/fetchmail.py\n--- a/optional/fetchmail/fetchmail.py\n+++ b/optional/fetchmail/fetchmail.py\n@@ -47,7 +47,7 @@\n \n def run(debug):\n try:\n- fetches = requests.get(\"http://admin/internal/fetch\").json()\n+ fetches = requests.get(\"http://\" + os.environ.get(\"HOST_ADMIN\", \"admin\") + \"/internal/fetch\").json()\n smtphost, smtpport = extract_host_port(os.environ.get(\"HOST_SMTP\", \"smtp\"), None)\n if smtpport is None:\n smtphostport = smtphost\n@@ -85,7 +85,7 @@\n user_info in error_message):\n print(error_message)\n finally:\n- requests.post(\"http://admin/internal/fetch/{}\".format(fetch[\"id\"]),\n+ requests.post(\"http://\" + os.environ.get(\"HOST_ADMIN\", \"admin\") + \"/internal/fetch/{}\".format(fetch[\"id\"]),\n json=error_message.split(\"\\n\")[0]\n )\n except Exception:\n", "issue": "Hardcoded http://admin/ in fetchmail.py\nI've tweaked ``docker-compose.yml`` so that all my containers related to ``mailu`` are prefixed by ``mailu-``, in order to pro-actively avoid conflict with any other containers I may eventually define in future.\r\n\r\nHowever, the hardcode ``http://admin/...`` below causes failure in ``fetchmail``, since my container is now named ``mailu-admin`` in my ``docker-compose.yml``, not ``admin`` as the code supposes it should be.\r\n\r\n```\r\n./services/fetchmail/fetchmail.py:47: fetches = requests.get(\"http://admin/internal/fetch\").json()\r\n./services/fetchmail/fetchmail.py:85: requests.post(\"http://admin/internal/fetch/{}\".format(fetch[\"id\"]),\r\n```\n", "before_files": [{"content": "#!/usr/bin/python3\n\nimport time\nimport os\nimport tempfile\nimport shlex\nimport subprocess\nimport re\nimport requests\nimport sys\nimport traceback\n\n\nFETCHMAIL = \"\"\"\nfetchmail -N \\\n --sslcertck --sslcertpath /etc/ssl/certs \\\n -f {}\n\"\"\"\n\n\nRC_LINE = \"\"\"\npoll \"{host}\" proto {protocol} port {port}\n user \"{username}\" password \"{password}\"\n is \"{user_email}\"\n smtphost \"{smtphost}\"\n {options}\n\"\"\"\n\n\ndef extract_host_port(host_and_port, default_port):\n host, _, port = re.match('^(.*)(:([0-9]*))?$', host_and_port).groups()\n return host, int(port) if port else default_port\n\n\ndef escape_rc_string(arg):\n return \"\".join(\"\\\\x%2x\" % ord(char) for char in arg)\n\n\ndef fetchmail(fetchmailrc):\n with tempfile.NamedTemporaryFile() as handler:\n handler.write(fetchmailrc.encode(\"utf8\"))\n handler.flush()\n command = FETCHMAIL.format(shlex.quote(handler.name))\n output = subprocess.check_output(command, shell=True)\n return output\n\n\ndef run(debug):\n try:\n fetches = requests.get(\"http://admin/internal/fetch\").json()\n smtphost, smtpport = extract_host_port(os.environ.get(\"HOST_SMTP\", \"smtp\"), None)\n if smtpport is None:\n smtphostport = smtphost\n else:\n smtphostport = \"%s/%d\" % (smtphost, smtpport)\n for fetch in fetches:\n fetchmailrc = \"\"\n options = \"options antispam 501, 504, 550, 553, 554\"\n options += \" ssl\" if fetch[\"tls\"] else \"\"\n options += \" keep\" if fetch[\"keep\"] else \" fetchall\"\n fetchmailrc += RC_LINE.format(\n user_email=escape_rc_string(fetch[\"user_email\"]),\n protocol=fetch[\"protocol\"],\n host=escape_rc_string(fetch[\"host\"]),\n port=fetch[\"port\"],\n smtphost=smtphostport,\n username=escape_rc_string(fetch[\"username\"]),\n password=escape_rc_string(fetch[\"password\"]),\n options=options\n )\n if debug:\n print(fetchmailrc)\n try:\n print(fetchmail(fetchmailrc))\n error_message = \"\"\n except subprocess.CalledProcessError as error:\n error_message = error.output.decode(\"utf8\")\n # No mail is not an error\n if not error_message.startswith(\"fetchmail: No mail\"):\n print(error_message)\n user_info = \"for %s at %s\" % (fetch[\"user_email\"], fetch[\"host\"])\n # Number of messages seen is not a error as well\n if (\"messages\" in error_message and\n \"(seen \" in error_message and\n user_info in error_message):\n print(error_message)\n finally:\n requests.post(\"http://admin/internal/fetch/{}\".format(fetch[\"id\"]),\n json=error_message.split(\"\\n\")[0]\n )\n except Exception:\n traceback.print_exc()\n\n\nif __name__ == \"__main__\":\n while True:\n time.sleep(int(os.environ.get(\"FETCHMAIL_DELAY\", 60)))\n run(os.environ.get(\"DEBUG\", None) == \"True\")\n sys.stdout.flush()\n", "path": "optional/fetchmail/fetchmail.py"}], "after_files": [{"content": "#!/usr/bin/python3\n\nimport time\nimport os\nimport tempfile\nimport shlex\nimport subprocess\nimport re\nimport requests\nimport sys\nimport traceback\n\n\nFETCHMAIL = \"\"\"\nfetchmail -N \\\n --sslcertck --sslcertpath /etc/ssl/certs \\\n -f {}\n\"\"\"\n\n\nRC_LINE = \"\"\"\npoll \"{host}\" proto {protocol} port {port}\n user \"{username}\" password \"{password}\"\n is \"{user_email}\"\n smtphost \"{smtphost}\"\n {options}\n\"\"\"\n\n\ndef extract_host_port(host_and_port, default_port):\n host, _, port = re.match('^(.*)(:([0-9]*))?$', host_and_port).groups()\n return host, int(port) if port else default_port\n\n\ndef escape_rc_string(arg):\n return \"\".join(\"\\\\x%2x\" % ord(char) for char in arg)\n\n\ndef fetchmail(fetchmailrc):\n with tempfile.NamedTemporaryFile() as handler:\n handler.write(fetchmailrc.encode(\"utf8\"))\n handler.flush()\n command = FETCHMAIL.format(shlex.quote(handler.name))\n output = subprocess.check_output(command, shell=True)\n return output\n\n\ndef run(debug):\n try:\n fetches = requests.get(\"http://\" + os.environ.get(\"HOST_ADMIN\", \"admin\") + \"/internal/fetch\").json()\n smtphost, smtpport = extract_host_port(os.environ.get(\"HOST_SMTP\", \"smtp\"), None)\n if smtpport is None:\n smtphostport = smtphost\n else:\n smtphostport = \"%s/%d\" % (smtphost, smtpport)\n for fetch in fetches:\n fetchmailrc = \"\"\n options = \"options antispam 501, 504, 550, 553, 554\"\n options += \" ssl\" if fetch[\"tls\"] else \"\"\n options += \" keep\" if fetch[\"keep\"] else \" fetchall\"\n fetchmailrc += RC_LINE.format(\n user_email=escape_rc_string(fetch[\"user_email\"]),\n protocol=fetch[\"protocol\"],\n host=escape_rc_string(fetch[\"host\"]),\n port=fetch[\"port\"],\n smtphost=smtphostport,\n username=escape_rc_string(fetch[\"username\"]),\n password=escape_rc_string(fetch[\"password\"]),\n options=options\n )\n if debug:\n print(fetchmailrc)\n try:\n print(fetchmail(fetchmailrc))\n error_message = \"\"\n except subprocess.CalledProcessError as error:\n error_message = error.output.decode(\"utf8\")\n # No mail is not an error\n if not error_message.startswith(\"fetchmail: No mail\"):\n print(error_message)\n user_info = \"for %s at %s\" % (fetch[\"user_email\"], fetch[\"host\"])\n # Number of messages seen is not a error as well\n if (\"messages\" in error_message and\n \"(seen \" in error_message and\n user_info in error_message):\n print(error_message)\n finally:\n requests.post(\"http://\" + os.environ.get(\"HOST_ADMIN\", \"admin\") + \"/internal/fetch/{}\".format(fetch[\"id\"]),\n json=error_message.split(\"\\n\")[0]\n )\n except Exception:\n traceback.print_exc()\n\n\nif __name__ == \"__main__\":\n while True:\n time.sleep(int(os.environ.get(\"FETCHMAIL_DELAY\", 60)))\n run(os.environ.get(\"DEBUG\", None) == \"True\")\n sys.stdout.flush()\n", "path": "optional/fetchmail/fetchmail.py"}]} | 1,367 | 250 |
gh_patches_debug_9389 | rasdani/github-patches | git_diff | opsdroid__opsdroid-1774 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Mattermost connector is broken
# Description
The Mattermost connector is broken. It can connect to a Mattermost instance, but when sending a message to OpsDroid (using the Hello skill) you get:
```
ERROR opsdroid.core: Exception when running skill 'hello'.
Traceback (most recent call last):
File "/usr/local/lib/python3.8/site-packages/opsdroid/core.py", line 465, in run_skill
return await skill(event)
File "/root/.local/share/opsdroid/opsdroid-modules/skill/hello/__init__.py", line 13, in hello
await message.respond(text)
File "/usr/local/lib/python3.8/site-packages/opsdroid/events.py", line 278, in respond
"thinking-delay" in self.connector.configuration
AttributeError: 'NoneType' object has no attribute 'configuration'
WARNING mattermostdriver.websocket: Failed to establish websocket connection: 'NoneType' object has no attribute 'configuration'
```
## Steps to Reproduce
Configure the Mattermost connector and the Hello skill, start Opsdroid and send a message to the bot in Mattermost.
## Expected Functionality
A reply form the Hello skill.
## Experienced Functionality
No reply, and the above error in the Opsdroid logs.
## Versions
- **Opsdroid version:** 0.22.0
- **Python version:** 3.8
- **OS/Docker version:** N/A
## Configuration File
Please include your version of the configuration file below.
```yaml
welcome-message: false
connectors:
## Mattermost (core)
mattermost:
# Required
token: "<redacted>"
url: "<redacted>"
team-name: "<redacted>"
# Optional
scheme: "https" # default: https
port: 443 # default: 8065
ssl-verify: true # default: true
connect-timeout: 30 # default: 30
skills:
## Hello (https://github.com/opsdroid/skill-hello)
hello: {}
## Seen (https://github.com/opsdroid/skill-seen)
seen: {}
```
## Additional Details
Looks like this the Mattermost connector was missed in #1116 -- I'll submit a PR shortly to correct this.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `opsdroid/connector/mattermost/__init__.py`
Content:
```
1 """A connector for Mattermost."""
2 import logging
3 import json
4
5 from mattermostdriver import Driver, Websocket
6 from voluptuous import Required
7
8 from opsdroid.connector import Connector, register_event
9 from opsdroid.events import Message
10
11 _LOGGER = logging.getLogger(__name__)
12 CONFIG_SCHEMA = {
13 Required("token"): str,
14 Required("url"): str,
15 Required("team-name"): str,
16 "scheme": str,
17 "port": int,
18 "ssl-verify": bool,
19 "connect-timeout": int,
20 }
21
22
23 class ConnectorMattermost(Connector):
24 """A connector for Mattermost."""
25
26 def __init__(self, config, opsdroid=None):
27 """Create the connector."""
28 super().__init__(config, opsdroid=opsdroid)
29 _LOGGER.debug(_("Starting Mattermost connector"))
30 self.name = "mattermost"
31 self.token = config["token"]
32 self.url = config["url"]
33 self.team_name = config["team-name"]
34 self.scheme = config.get("scheme", "https")
35 self.port = config.get("port", 8065)
36 self.verify = config.get("ssl-verify", True)
37 self.timeout = config.get("connect-timeout", 30)
38 self.request_timeout = None
39 self.mfa_token = None
40 self.debug = False
41 self.listening = True
42
43 self.mm_driver = Driver(
44 {
45 "url": self.url,
46 "token": self.token,
47 "scheme": self.scheme,
48 "port": self.port,
49 "verify": self.verify,
50 "timeout": self.timeout,
51 "request_timeout": self.request_timeout,
52 "mfa_token": self.mfa_token,
53 "debug": self.debug,
54 }
55 )
56
57 async def connect(self):
58 """Connect to the chat service."""
59 _LOGGER.info(_("Connecting to Mattermost"))
60
61 login_response = self.mm_driver.login()
62
63 _LOGGER.info(login_response)
64
65 if "id" in login_response:
66 self.bot_id = login_response["id"]
67 if "username" in login_response:
68 self.bot_name = login_response["username"]
69
70 _LOGGER.info(_("Connected as %s"), self.bot_name)
71
72 self.mm_driver.websocket = Websocket(
73 self.mm_driver.options, self.mm_driver.client.token
74 )
75
76 _LOGGER.info(_("Connected successfully"))
77
78 async def disconnect(self):
79 """Disconnect from Mattermost."""
80 self.listening = False
81 self.mm_driver.logout()
82
83 async def listen(self):
84 """Listen for and parse new messages."""
85 await self.mm_driver.websocket.connect(self.process_message)
86
87 async def process_message(self, raw_message):
88 """Process a raw message and pass it to the parser."""
89 _LOGGER.info(raw_message)
90
91 message = json.loads(raw_message)
92
93 if "event" in message and message["event"] == "posted":
94 data = message["data"]
95 post = json.loads(data["post"])
96 await self.opsdroid.parse(
97 Message(
98 post["message"],
99 data["sender_name"],
100 data["channel_name"],
101 self,
102 raw_event=message,
103 )
104 )
105
106 @register_event(Message)
107 async def send_message(self, message):
108 """Respond with a message."""
109 _LOGGER.debug(
110 _("Responding with: '%s' in room %s"), message.text, message.target
111 )
112 channel_id = self.mm_driver.channels.get_channel_by_name_and_team_name(
113 self.team_name, message.target
114 )["id"]
115 self.mm_driver.posts.create_post(
116 options={"channel_id": channel_id, "message": message.text}
117 )
118
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/opsdroid/connector/mattermost/__init__.py b/opsdroid/connector/mattermost/__init__.py
--- a/opsdroid/connector/mattermost/__init__.py
+++ b/opsdroid/connector/mattermost/__init__.py
@@ -95,10 +95,10 @@
post = json.loads(data["post"])
await self.opsdroid.parse(
Message(
- post["message"],
- data["sender_name"],
- data["channel_name"],
- self,
+ text=post["message"],
+ user=data["sender_name"],
+ target=data["channel_name"],
+ connector=self,
raw_event=message,
)
)
| {"golden_diff": "diff --git a/opsdroid/connector/mattermost/__init__.py b/opsdroid/connector/mattermost/__init__.py\n--- a/opsdroid/connector/mattermost/__init__.py\n+++ b/opsdroid/connector/mattermost/__init__.py\n@@ -95,10 +95,10 @@\n post = json.loads(data[\"post\"])\n await self.opsdroid.parse(\n Message(\n- post[\"message\"],\n- data[\"sender_name\"],\n- data[\"channel_name\"],\n- self,\n+ text=post[\"message\"],\n+ user=data[\"sender_name\"],\n+ target=data[\"channel_name\"],\n+ connector=self,\n raw_event=message,\n )\n )\n", "issue": "Mattermost connector is broken\n# Description\r\nThe Mattermost connector is broken. It can connect to a Mattermost instance, but when sending a message to OpsDroid (using the Hello skill) you get:\r\n\r\n```\r\nERROR opsdroid.core: Exception when running skill 'hello'.\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.8/site-packages/opsdroid/core.py\", line 465, in run_skill\r\n return await skill(event)\r\n File \"/root/.local/share/opsdroid/opsdroid-modules/skill/hello/__init__.py\", line 13, in hello\r\n await message.respond(text)\r\n File \"/usr/local/lib/python3.8/site-packages/opsdroid/events.py\", line 278, in respond\r\n \"thinking-delay\" in self.connector.configuration\r\nAttributeError: 'NoneType' object has no attribute 'configuration'\r\nWARNING mattermostdriver.websocket: Failed to establish websocket connection: 'NoneType' object has no attribute 'configuration'\r\n```\r\n\r\n## Steps to Reproduce\r\nConfigure the Mattermost connector and the Hello skill, start Opsdroid and send a message to the bot in Mattermost.\r\n\r\n\r\n## Expected Functionality\r\nA reply form the Hello skill.\r\n\r\n\r\n## Experienced Functionality\r\nNo reply, and the above error in the Opsdroid logs.\r\n\r\n## Versions\r\n- **Opsdroid version:** 0.22.0\r\n- **Python version:** 3.8\r\n- **OS/Docker version:** N/A\r\n\r\n## Configuration File\r\nPlease include your version of the configuration file below.\r\n\r\n```yaml\r\nwelcome-message: false\r\n\r\nconnectors:\r\n ## Mattermost (core)\r\n mattermost:\r\n # Required\r\n token: \"<redacted>\"\r\n url: \"<redacted>\"\r\n team-name: \"<redacted>\"\r\n # Optional\r\n scheme: \"https\" # default: https\r\n port: 443 # default: 8065\r\n ssl-verify: true # default: true\r\n connect-timeout: 30 # default: 30\r\n\r\nskills:\r\n ## Hello (https://github.com/opsdroid/skill-hello)\r\n hello: {}\r\n ## Seen (https://github.com/opsdroid/skill-seen)\r\n seen: {}\r\n```\r\n\r\n## Additional Details\r\nLooks like this the Mattermost connector was missed in #1116 -- I'll submit a PR shortly to correct this.\n", "before_files": [{"content": "\"\"\"A connector for Mattermost.\"\"\"\nimport logging\nimport json\n\nfrom mattermostdriver import Driver, Websocket\nfrom voluptuous import Required\n\nfrom opsdroid.connector import Connector, register_event\nfrom opsdroid.events import Message\n\n_LOGGER = logging.getLogger(__name__)\nCONFIG_SCHEMA = {\n Required(\"token\"): str,\n Required(\"url\"): str,\n Required(\"team-name\"): str,\n \"scheme\": str,\n \"port\": int,\n \"ssl-verify\": bool,\n \"connect-timeout\": int,\n}\n\n\nclass ConnectorMattermost(Connector):\n \"\"\"A connector for Mattermost.\"\"\"\n\n def __init__(self, config, opsdroid=None):\n \"\"\"Create the connector.\"\"\"\n super().__init__(config, opsdroid=opsdroid)\n _LOGGER.debug(_(\"Starting Mattermost connector\"))\n self.name = \"mattermost\"\n self.token = config[\"token\"]\n self.url = config[\"url\"]\n self.team_name = config[\"team-name\"]\n self.scheme = config.get(\"scheme\", \"https\")\n self.port = config.get(\"port\", 8065)\n self.verify = config.get(\"ssl-verify\", True)\n self.timeout = config.get(\"connect-timeout\", 30)\n self.request_timeout = None\n self.mfa_token = None\n self.debug = False\n self.listening = True\n\n self.mm_driver = Driver(\n {\n \"url\": self.url,\n \"token\": self.token,\n \"scheme\": self.scheme,\n \"port\": self.port,\n \"verify\": self.verify,\n \"timeout\": self.timeout,\n \"request_timeout\": self.request_timeout,\n \"mfa_token\": self.mfa_token,\n \"debug\": self.debug,\n }\n )\n\n async def connect(self):\n \"\"\"Connect to the chat service.\"\"\"\n _LOGGER.info(_(\"Connecting to Mattermost\"))\n\n login_response = self.mm_driver.login()\n\n _LOGGER.info(login_response)\n\n if \"id\" in login_response:\n self.bot_id = login_response[\"id\"]\n if \"username\" in login_response:\n self.bot_name = login_response[\"username\"]\n\n _LOGGER.info(_(\"Connected as %s\"), self.bot_name)\n\n self.mm_driver.websocket = Websocket(\n self.mm_driver.options, self.mm_driver.client.token\n )\n\n _LOGGER.info(_(\"Connected successfully\"))\n\n async def disconnect(self):\n \"\"\"Disconnect from Mattermost.\"\"\"\n self.listening = False\n self.mm_driver.logout()\n\n async def listen(self):\n \"\"\"Listen for and parse new messages.\"\"\"\n await self.mm_driver.websocket.connect(self.process_message)\n\n async def process_message(self, raw_message):\n \"\"\"Process a raw message and pass it to the parser.\"\"\"\n _LOGGER.info(raw_message)\n\n message = json.loads(raw_message)\n\n if \"event\" in message and message[\"event\"] == \"posted\":\n data = message[\"data\"]\n post = json.loads(data[\"post\"])\n await self.opsdroid.parse(\n Message(\n post[\"message\"],\n data[\"sender_name\"],\n data[\"channel_name\"],\n self,\n raw_event=message,\n )\n )\n\n @register_event(Message)\n async def send_message(self, message):\n \"\"\"Respond with a message.\"\"\"\n _LOGGER.debug(\n _(\"Responding with: '%s' in room %s\"), message.text, message.target\n )\n channel_id = self.mm_driver.channels.get_channel_by_name_and_team_name(\n self.team_name, message.target\n )[\"id\"]\n self.mm_driver.posts.create_post(\n options={\"channel_id\": channel_id, \"message\": message.text}\n )\n", "path": "opsdroid/connector/mattermost/__init__.py"}], "after_files": [{"content": "\"\"\"A connector for Mattermost.\"\"\"\nimport logging\nimport json\n\nfrom mattermostdriver import Driver, Websocket\nfrom voluptuous import Required\n\nfrom opsdroid.connector import Connector, register_event\nfrom opsdroid.events import Message\n\n_LOGGER = logging.getLogger(__name__)\nCONFIG_SCHEMA = {\n Required(\"token\"): str,\n Required(\"url\"): str,\n Required(\"team-name\"): str,\n \"scheme\": str,\n \"port\": int,\n \"ssl-verify\": bool,\n \"connect-timeout\": int,\n}\n\n\nclass ConnectorMattermost(Connector):\n \"\"\"A connector for Mattermost.\"\"\"\n\n def __init__(self, config, opsdroid=None):\n \"\"\"Create the connector.\"\"\"\n super().__init__(config, opsdroid=opsdroid)\n _LOGGER.debug(_(\"Starting Mattermost connector\"))\n self.name = \"mattermost\"\n self.token = config[\"token\"]\n self.url = config[\"url\"]\n self.team_name = config[\"team-name\"]\n self.scheme = config.get(\"scheme\", \"https\")\n self.port = config.get(\"port\", 8065)\n self.verify = config.get(\"ssl-verify\", True)\n self.timeout = config.get(\"connect-timeout\", 30)\n self.request_timeout = None\n self.mfa_token = None\n self.debug = False\n self.listening = True\n\n self.mm_driver = Driver(\n {\n \"url\": self.url,\n \"token\": self.token,\n \"scheme\": self.scheme,\n \"port\": self.port,\n \"verify\": self.verify,\n \"timeout\": self.timeout,\n \"request_timeout\": self.request_timeout,\n \"mfa_token\": self.mfa_token,\n \"debug\": self.debug,\n }\n )\n\n async def connect(self):\n \"\"\"Connect to the chat service.\"\"\"\n _LOGGER.info(_(\"Connecting to Mattermost\"))\n\n login_response = self.mm_driver.login()\n\n _LOGGER.info(login_response)\n\n if \"id\" in login_response:\n self.bot_id = login_response[\"id\"]\n if \"username\" in login_response:\n self.bot_name = login_response[\"username\"]\n\n _LOGGER.info(_(\"Connected as %s\"), self.bot_name)\n\n self.mm_driver.websocket = Websocket(\n self.mm_driver.options, self.mm_driver.client.token\n )\n\n _LOGGER.info(_(\"Connected successfully\"))\n\n async def disconnect(self):\n \"\"\"Disconnect from Mattermost.\"\"\"\n self.listening = False\n self.mm_driver.logout()\n\n async def listen(self):\n \"\"\"Listen for and parse new messages.\"\"\"\n await self.mm_driver.websocket.connect(self.process_message)\n\n async def process_message(self, raw_message):\n \"\"\"Process a raw message and pass it to the parser.\"\"\"\n _LOGGER.info(raw_message)\n\n message = json.loads(raw_message)\n\n if \"event\" in message and message[\"event\"] == \"posted\":\n data = message[\"data\"]\n post = json.loads(data[\"post\"])\n await self.opsdroid.parse(\n Message(\n text=post[\"message\"],\n user=data[\"sender_name\"],\n target=data[\"channel_name\"],\n connector=self,\n raw_event=message,\n )\n )\n\n @register_event(Message)\n async def send_message(self, message):\n \"\"\"Respond with a message.\"\"\"\n _LOGGER.debug(\n _(\"Responding with: '%s' in room %s\"), message.text, message.target\n )\n channel_id = self.mm_driver.channels.get_channel_by_name_and_team_name(\n self.team_name, message.target\n )[\"id\"]\n self.mm_driver.posts.create_post(\n options={\"channel_id\": channel_id, \"message\": message.text}\n )\n", "path": "opsdroid/connector/mattermost/__init__.py"}]} | 1,808 | 159 |
gh_patches_debug_1963 | rasdani/github-patches | git_diff | graspologic-org__graspologic-583 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix bug in `is_unweighted` for sparse
- [ ] Does this PR add any new dependencies?
- [ ] Does this PR modify any existing APIs?
- [ ] Is the change to the API backwards compatible?
- [ ] Have you built the documentation (reference and/or tutorial) and verified the generated documentation is appropriate?
#### Reference Issues/PRs
#### What does this implement/fix? Briefly explain your changes.
`is_unweighted` doesn't work properly for a sparse array input
#### Any other comments?
I think we could instead just do `graph[graph != 0].max() == 1 and graph[graph != 0].min() == 1`
for that entire section of the code.
[BUG] Bug in joblib transitive dependency causes exception when multi-threading
## Expected Behavior
Multi-threading LatentDistributionTest using a "workers" value != 1 should return without error on all platforms.
## Actual Behavior
When using any "workers" value > 1 or equal to -1 on a Windows computer, the code throws an exception.
## Example Code
```python
test = LatentDistributionTest(input_graph=False, workers=10)
result = test.fit_predict(graph1, graph2)
```
## Full Traceback
```pytb
C:\ProgramData\Anaconda3\lib\site-packages\joblib\disk.py:122: UserWarning: Unable to delete folder C:\Users\msrwinadm4\AppData\Local\Temp\5\joblib_memmapping_folder_11132_7308949288 after 5 tentatives.
.format(folder_path, RM_SUBDIRS_N_RETRY))
Traceback (most recent call last):
File "GraphsByOrg.py", line 79, in <module>
logger.info(f'Calculating nonpar for {org1} and {org2}')
File "C:\ProgramData\Anaconda3\lib\site-packages\graspologic\inference\latent_distribution_test.py", line 487, in fit_predict
self.fit(A1, A2)
File "C:\ProgramData\Anaconda3\lib\site-packages\graspologic\inference\latent_distribution_test.py", line 449, in fit
X1_hat, X2_hat, reps=self.n_bootstraps, workers=self.workers, auto=False
File "C:\ProgramData\Anaconda3\lib\site-packages\hyppo\ksample\ksamp.py", line 166, in test
return self.indep_test.test(u, v, reps, workers, auto=auto)
File "C:\ProgramData\Anaconda3\lib\site-packages\hyppo\independence\dcorr.py", line 215, in test
stat, pvalue = super(Dcorr, self).test(x, y, reps, workers)
File "C:\ProgramData\Anaconda3\lib\site-packages\hyppo\independence\base.py", line 67, in test
self._statistic, x, y, reps=reps, workers=workers, is_distsim=is_distsim
File "C:\ProgramData\Anaconda3\lib\site-packages\hyppo\_utils.py", line 140, in perm_test
[delayed(_perm_stat)(calc_stat, x, y, is_distsim) for rep in range(reps)]
File "C:\ProgramData\Anaconda3\lib\site-packages\joblib\parallel.py", line 1027, in __call__
self._terminate_backend()
File "C:\ProgramData\Anaconda3\lib\site-packages\joblib\parallel.py", line 734, in _terminate_backend
self._backend.terminate()
File "C:\ProgramData\Anaconda3\lib\site-packages\joblib\_parallel_backends.py", line 571, in terminate
delete_folder(self._workers._temp_folder)
File "C:\ProgramData\Anaconda3\lib\site-packages\joblib\disk.py", line 115, in delete_folder
shutil.rmtree(folder_path, False, None)
File "C:\ProgramData\Anaconda3\lib\shutil.py", line 516, in rmtree
return _rmtree_unsafe(path, onerror)
File "C:\ProgramData\Anaconda3\lib\shutil.py", line 400, in _rmtree_unsafe
onerror(os.unlink, fullname, sys.exc_info())
File "C:\ProgramData\Anaconda3\lib\shutil.py", line 398, in _rmtree_unsafe
os.unlink(fullname)
PermissionError: [WinError 32] The process cannot access the file because it is being used by another process: 'C:\\Users\\msrwinadm4\\AppData\\Local\\Temp\\5\\joblib_memmapping_folder_11132_7308949288\\11132-1819792920136-683b9c4b033b449dbac251acbe3decfb.pkl'
C:\ProgramData\Anaconda3\lib\site-packages\joblib\disk.py:122: UserWarning: Unable to delete folder C:\Users\msrwinadm4\AppData\Local\Temp\5\joblib_memmapping_folder_11132_7308949288 after 5 tentatives.
.format(folder_path, RM_SUBDIRS_N_RETRY))
C:\ProgramData\Anaconda3\lib\site-packages\joblib\_memmapping_reducer.py:409: UserWarning: Failed to clean temporary folder: C:\Users\msrwinadm4\AppData\Local\Temp\5\joblib_memmapping_folder_11132_7308949288
.format(pool_folder))
```
## Your Environment
* Python version: 3.7.6 (Anaconda)
* graspologic version: 0.1.0.dev331219603
* Windows 2016 Datacenter (448 GB RAM) x64
## Additional Details
graspologic==0.1.0.dev331219603
joblib==0.14.1
hyppo==0.1.3
scikit-image==0.16.2
scikit-learn==0.22.1
scipy==1.4.1
numpy==1.18.1
## Underlying problem:
Older versions of joblib have a known issue running on Windows. See https://github.com/joblib/joblib/issues/806. This appears to be fixed on May 3rd, 2020 by https://github.com/joblib/joblib/pull/966.
Hyppo uses joblib as a transitive dependency of scikit-learn but does not declare it as a dependency. Scikit-learn only requires joblib 0.11 which does not include this fix. See https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/_min_dependencies.py
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # Copyright (c) Microsoft Corporation and contributors.
2 # Licensed under the MIT License.
3
4 import os
5 import sys
6 from setuptools import setup, find_packages
7
8
9 MINIMUM_PYTHON_VERSION = 3, 6 # Minimum of Python 3.6
10
11 if sys.version_info < MINIMUM_PYTHON_VERSION:
12 sys.exit("Python {}.{}+ is required.".format(*MINIMUM_PYTHON_VERSION))
13
14 sys.path.insert(0, os.path.join("graspologic", "version"))
15 from version import version
16
17 sys.path.pop(0)
18
19 version_path = os.path.join("graspologic", "version", "version.txt")
20 with open(version_path, "w") as version_file:
21 version_file.write(f"{version}")
22
23 with open("README.md", "r") as f:
24 LONG_DESCRIPTION = f.read()
25
26 setup(
27 name="graspologic",
28 version=version,
29 description="A set of python modules for graph statistics",
30 long_description=LONG_DESCRIPTION,
31 long_description_content_type="text/markdown",
32 author="Eric Bridgeford, Jaewon Chung, Benjamin Pedigo, Bijan Varjavand",
33 author_email="[email protected]",
34 maintainer="Dwayne Pryce",
35 maintainer_email="[email protected]",
36 url="https://github.com/microsoft/graspologic",
37 license="MIT",
38 classifiers=[
39 "Development Status :: 3 - Alpha",
40 "Intended Audience :: Science/Research",
41 "Topic :: Scientific/Engineering :: Mathematics",
42 "License :: OSI Approved :: MIT License",
43 "Programming Language :: Python :: 3",
44 "Programming Language :: Python :: 3.6",
45 "Programming Language :: Python :: 3.7",
46 ],
47 packages=find_packages(exclude=["tests", "tests.*", "tests/*"]),
48 include_package_data=True,
49 package_data={"version": [os.path.join("graspologic", "version", "version.txt")]},
50 install_requires=[
51 "anytree>=2.8.0",
52 "gensim",
53 "hyppo>=0.1.3",
54 "matplotlib>=3.0.0,<=3.3.0",
55 "networkx>=2.1",
56 "numpy>=1.8.1",
57 "POT>=0.7.0",
58 "seaborn>=0.9.0",
59 "scikit-learn>=0.19.1",
60 "scipy>=1.4.0",
61 ],
62 extras_require={
63 "dev": [
64 "black",
65 "ipykernel>=5.1.0",
66 "ipython>=7.4.0",
67 "mypy",
68 "nbsphinx",
69 "numpydoc",
70 "pandoc",
71 "pytest",
72 "pytest-cov",
73 "sphinx",
74 "sphinxcontrib-rawfiles",
75 "sphinx-rtd-theme",
76 "testfixtures",
77 ]
78 },
79 )
80
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -51,6 +51,7 @@
"anytree>=2.8.0",
"gensim",
"hyppo>=0.1.3",
+ "joblib>=0.17.0", # Older versions of joblib cause issue #806. Transitive dependency of hyppo.
"matplotlib>=3.0.0,<=3.3.0",
"networkx>=2.1",
"numpy>=1.8.1",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -51,6 +51,7 @@\n \"anytree>=2.8.0\",\n \"gensim\",\n \"hyppo>=0.1.3\",\n+ \"joblib>=0.17.0\", # Older versions of joblib cause issue #806. Transitive dependency of hyppo.\n \"matplotlib>=3.0.0,<=3.3.0\",\n \"networkx>=2.1\",\n \"numpy>=1.8.1\",\n", "issue": "Fix bug in `is_unweighted` for sparse\n- [ ] Does this PR add any new dependencies?\r\n- [ ] Does this PR modify any existing APIs?\r\n - [ ] Is the change to the API backwards compatible?\r\n- [ ] Have you built the documentation (reference and/or tutorial) and verified the generated documentation is appropriate?\r\n\r\n#### Reference Issues/PRs\r\n\r\n#### What does this implement/fix? Briefly explain your changes.\r\n`is_unweighted` doesn't work properly for a sparse array input\r\n\r\n#### Any other comments?\r\nI think we could instead just do `graph[graph != 0].max() == 1 and graph[graph != 0].min() == 1`\r\nfor that entire section of the code.\n[BUG] Bug in joblib transitive dependency causes exception when multi-threading\n## Expected Behavior\r\nMulti-threading LatentDistributionTest using a \"workers\" value != 1 should return without error on all platforms.\r\n\r\n## Actual Behavior\r\nWhen using any \"workers\" value > 1 or equal to -1 on a Windows computer, the code throws an exception.\r\n\r\n## Example Code\r\n\r\n```python\r\ntest = LatentDistributionTest(input_graph=False, workers=10)\r\nresult = test.fit_predict(graph1, graph2)\r\n```\r\n\r\n## Full Traceback\r\n```pytb\r\nC:\\ProgramData\\Anaconda3\\lib\\site-packages\\joblib\\disk.py:122: UserWarning: Unable to delete folder C:\\Users\\msrwinadm4\\AppData\\Local\\Temp\\5\\joblib_memmapping_folder_11132_7308949288 after 5 tentatives.\r\n .format(folder_path, RM_SUBDIRS_N_RETRY))\r\nTraceback (most recent call last):\r\n File \"GraphsByOrg.py\", line 79, in <module>\r\n logger.info(f'Calculating nonpar for {org1} and {org2}')\r\n File \"C:\\ProgramData\\Anaconda3\\lib\\site-packages\\graspologic\\inference\\latent_distribution_test.py\", line 487, in fit_predict\r\n self.fit(A1, A2)\r\n File \"C:\\ProgramData\\Anaconda3\\lib\\site-packages\\graspologic\\inference\\latent_distribution_test.py\", line 449, in fit\r\n X1_hat, X2_hat, reps=self.n_bootstraps, workers=self.workers, auto=False\r\n File \"C:\\ProgramData\\Anaconda3\\lib\\site-packages\\hyppo\\ksample\\ksamp.py\", line 166, in test\r\n return self.indep_test.test(u, v, reps, workers, auto=auto)\r\n File \"C:\\ProgramData\\Anaconda3\\lib\\site-packages\\hyppo\\independence\\dcorr.py\", line 215, in test\r\n stat, pvalue = super(Dcorr, self).test(x, y, reps, workers)\r\n File \"C:\\ProgramData\\Anaconda3\\lib\\site-packages\\hyppo\\independence\\base.py\", line 67, in test\r\n self._statistic, x, y, reps=reps, workers=workers, is_distsim=is_distsim\r\n File \"C:\\ProgramData\\Anaconda3\\lib\\site-packages\\hyppo\\_utils.py\", line 140, in perm_test\r\n [delayed(_perm_stat)(calc_stat, x, y, is_distsim) for rep in range(reps)]\r\n File \"C:\\ProgramData\\Anaconda3\\lib\\site-packages\\joblib\\parallel.py\", line 1027, in __call__\r\n self._terminate_backend()\r\n File \"C:\\ProgramData\\Anaconda3\\lib\\site-packages\\joblib\\parallel.py\", line 734, in _terminate_backend\r\n self._backend.terminate()\r\n File \"C:\\ProgramData\\Anaconda3\\lib\\site-packages\\joblib\\_parallel_backends.py\", line 571, in terminate\r\n delete_folder(self._workers._temp_folder)\r\n File \"C:\\ProgramData\\Anaconda3\\lib\\site-packages\\joblib\\disk.py\", line 115, in delete_folder\r\n shutil.rmtree(folder_path, False, None)\r\n File \"C:\\ProgramData\\Anaconda3\\lib\\shutil.py\", line 516, in rmtree\r\n return _rmtree_unsafe(path, onerror)\r\n File \"C:\\ProgramData\\Anaconda3\\lib\\shutil.py\", line 400, in _rmtree_unsafe\r\n onerror(os.unlink, fullname, sys.exc_info())\r\n File \"C:\\ProgramData\\Anaconda3\\lib\\shutil.py\", line 398, in _rmtree_unsafe\r\n os.unlink(fullname)\r\nPermissionError: [WinError 32] The process cannot access the file because it is being used by another process: 'C:\\\\Users\\\\msrwinadm4\\\\AppData\\\\Local\\\\Temp\\\\5\\\\joblib_memmapping_folder_11132_7308949288\\\\11132-1819792920136-683b9c4b033b449dbac251acbe3decfb.pkl'\r\nC:\\ProgramData\\Anaconda3\\lib\\site-packages\\joblib\\disk.py:122: UserWarning: Unable to delete folder C:\\Users\\msrwinadm4\\AppData\\Local\\Temp\\5\\joblib_memmapping_folder_11132_7308949288 after 5 tentatives.\r\n .format(folder_path, RM_SUBDIRS_N_RETRY))\r\nC:\\ProgramData\\Anaconda3\\lib\\site-packages\\joblib\\_memmapping_reducer.py:409: UserWarning: Failed to clean temporary folder: C:\\Users\\msrwinadm4\\AppData\\Local\\Temp\\5\\joblib_memmapping_folder_11132_7308949288\r\n .format(pool_folder))\r\n\r\n```\r\n\r\n## Your Environment\r\n* Python version: 3.7.6 (Anaconda)\r\n* graspologic version: 0.1.0.dev331219603\r\n* Windows 2016 Datacenter (448 GB RAM) x64\r\n\r\n## Additional Details\r\ngraspologic==0.1.0.dev331219603\r\njoblib==0.14.1\r\nhyppo==0.1.3\r\nscikit-image==0.16.2\r\nscikit-learn==0.22.1\r\nscipy==1.4.1\r\nnumpy==1.18.1\r\n\r\n## Underlying problem:\r\nOlder versions of joblib have a known issue running on Windows. See https://github.com/joblib/joblib/issues/806. This appears to be fixed on May 3rd, 2020 by https://github.com/joblib/joblib/pull/966.\r\n\r\nHyppo uses joblib as a transitive dependency of scikit-learn but does not declare it as a dependency. Scikit-learn only requires joblib 0.11 which does not include this fix. See https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/_min_dependencies.py\r\n\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation and contributors.\n# Licensed under the MIT License.\n\nimport os\nimport sys\nfrom setuptools import setup, find_packages\n\n\nMINIMUM_PYTHON_VERSION = 3, 6 # Minimum of Python 3.6\n\nif sys.version_info < MINIMUM_PYTHON_VERSION:\n sys.exit(\"Python {}.{}+ is required.\".format(*MINIMUM_PYTHON_VERSION))\n\nsys.path.insert(0, os.path.join(\"graspologic\", \"version\"))\nfrom version import version\n\nsys.path.pop(0)\n\nversion_path = os.path.join(\"graspologic\", \"version\", \"version.txt\")\nwith open(version_path, \"w\") as version_file:\n version_file.write(f\"{version}\")\n\nwith open(\"README.md\", \"r\") as f:\n LONG_DESCRIPTION = f.read()\n\nsetup(\n name=\"graspologic\",\n version=version,\n description=\"A set of python modules for graph statistics\",\n long_description=LONG_DESCRIPTION,\n long_description_content_type=\"text/markdown\",\n author=\"Eric Bridgeford, Jaewon Chung, Benjamin Pedigo, Bijan Varjavand\",\n author_email=\"[email protected]\",\n maintainer=\"Dwayne Pryce\",\n maintainer_email=\"[email protected]\",\n url=\"https://github.com/microsoft/graspologic\",\n license=\"MIT\",\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Intended Audience :: Science/Research\",\n \"Topic :: Scientific/Engineering :: Mathematics\",\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n ],\n packages=find_packages(exclude=[\"tests\", \"tests.*\", \"tests/*\"]),\n include_package_data=True,\n package_data={\"version\": [os.path.join(\"graspologic\", \"version\", \"version.txt\")]},\n install_requires=[\n \"anytree>=2.8.0\",\n \"gensim\",\n \"hyppo>=0.1.3\",\n \"matplotlib>=3.0.0,<=3.3.0\",\n \"networkx>=2.1\",\n \"numpy>=1.8.1\",\n \"POT>=0.7.0\",\n \"seaborn>=0.9.0\",\n \"scikit-learn>=0.19.1\",\n \"scipy>=1.4.0\",\n ],\n extras_require={\n \"dev\": [\n \"black\",\n \"ipykernel>=5.1.0\",\n \"ipython>=7.4.0\",\n \"mypy\",\n \"nbsphinx\",\n \"numpydoc\",\n \"pandoc\",\n \"pytest\",\n \"pytest-cov\",\n \"sphinx\",\n \"sphinxcontrib-rawfiles\",\n \"sphinx-rtd-theme\",\n \"testfixtures\",\n ]\n },\n)\n", "path": "setup.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation and contributors.\n# Licensed under the MIT License.\n\nimport os\nimport sys\nfrom setuptools import setup, find_packages\n\n\nMINIMUM_PYTHON_VERSION = 3, 6 # Minimum of Python 3.6\n\nif sys.version_info < MINIMUM_PYTHON_VERSION:\n sys.exit(\"Python {}.{}+ is required.\".format(*MINIMUM_PYTHON_VERSION))\n\nsys.path.insert(0, os.path.join(\"graspologic\", \"version\"))\nfrom version import version\n\nsys.path.pop(0)\n\nversion_path = os.path.join(\"graspologic\", \"version\", \"version.txt\")\nwith open(version_path, \"w\") as version_file:\n version_file.write(f\"{version}\")\n\nwith open(\"README.md\", \"r\") as f:\n LONG_DESCRIPTION = f.read()\n\nsetup(\n name=\"graspologic\",\n version=version,\n description=\"A set of python modules for graph statistics\",\n long_description=LONG_DESCRIPTION,\n long_description_content_type=\"text/markdown\",\n author=\"Eric Bridgeford, Jaewon Chung, Benjamin Pedigo, Bijan Varjavand\",\n author_email=\"[email protected]\",\n maintainer=\"Dwayne Pryce\",\n maintainer_email=\"[email protected]\",\n url=\"https://github.com/microsoft/graspologic\",\n license=\"MIT\",\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Intended Audience :: Science/Research\",\n \"Topic :: Scientific/Engineering :: Mathematics\",\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n ],\n packages=find_packages(exclude=[\"tests\", \"tests.*\", \"tests/*\"]),\n include_package_data=True,\n package_data={\"version\": [os.path.join(\"graspologic\", \"version\", \"version.txt\")]},\n install_requires=[\n \"anytree>=2.8.0\",\n \"gensim\",\n \"hyppo>=0.1.3\",\n \"joblib>=0.17.0\", # Older versions of joblib cause issue #806. Transitive dependency of hyppo.\n \"matplotlib>=3.0.0,<=3.3.0\",\n \"networkx>=2.1\",\n \"numpy>=1.8.1\",\n \"POT>=0.7.0\",\n \"seaborn>=0.9.0\",\n \"scikit-learn>=0.19.1\",\n \"scipy>=1.4.0\",\n ],\n extras_require={\n \"dev\": [\n \"black\",\n \"ipykernel>=5.1.0\",\n \"ipython>=7.4.0\",\n \"mypy\",\n \"nbsphinx\",\n \"numpydoc\",\n \"pandoc\",\n \"pytest\",\n \"pytest-cov\",\n \"sphinx\",\n \"sphinxcontrib-rawfiles\",\n \"sphinx-rtd-theme\",\n \"testfixtures\",\n ]\n },\n)\n", "path": "setup.py"}]} | 2,645 | 129 |
gh_patches_debug_40779 | rasdani/github-patches | git_diff | getsentry__sentry-python-766 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
AWS Lambda under Python 3.8 is currently broken
It seems they have done some updates on the runtime that broke our integration.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sentry_sdk/integrations/aws_lambda.py`
Content:
```
1 from datetime import datetime, timedelta
2 from os import environ
3 import sys
4 import json
5
6 from sentry_sdk.hub import Hub, _should_send_default_pii
7 from sentry_sdk._compat import reraise
8 from sentry_sdk.utils import (
9 AnnotatedValue,
10 capture_internal_exceptions,
11 event_from_exception,
12 logger,
13 TimeoutThread,
14 )
15 from sentry_sdk.integrations import Integration
16 from sentry_sdk.integrations._wsgi_common import _filter_headers
17
18 from sentry_sdk._types import MYPY
19
20 if MYPY:
21 from typing import Any
22 from typing import TypeVar
23 from typing import Callable
24 from typing import Optional
25
26 from sentry_sdk._types import EventProcessor, Event, Hint
27
28 F = TypeVar("F", bound=Callable[..., Any])
29
30 # Constants
31 TIMEOUT_WARNING_BUFFER = 1500 # Buffer time required to send timeout warning to Sentry
32 MILLIS_TO_SECONDS = 1000.0
33
34
35 def _wrap_init_error(init_error):
36 # type: (F) -> F
37 def sentry_init_error(*args, **kwargs):
38 # type: (*Any, **Any) -> Any
39
40 hub = Hub.current
41 integration = hub.get_integration(AwsLambdaIntegration)
42 if integration is None:
43 return init_error(*args, **kwargs)
44
45 # Fetch Initialization error details from arguments
46 error = json.loads(args[1])
47
48 # If an integration is there, a client has to be there.
49 client = hub.client # type: Any
50
51 with hub.push_scope() as scope:
52 with capture_internal_exceptions():
53 scope.clear_breadcrumbs()
54 # Checking if there is any error/exception which is raised in the runtime
55 # environment from arguments and, re-raising it to capture it as an event.
56 if error.get("errorType"):
57 exc_info = sys.exc_info()
58 event, hint = event_from_exception(
59 exc_info,
60 client_options=client.options,
61 mechanism={"type": "aws_lambda", "handled": False},
62 )
63 hub.capture_event(event, hint=hint)
64
65 return init_error(*args, **kwargs)
66
67 return sentry_init_error # type: ignore
68
69
70 def _wrap_handler(handler):
71 # type: (F) -> F
72 def sentry_handler(event, context, *args, **kwargs):
73 # type: (Any, Any, *Any, **Any) -> Any
74 hub = Hub.current
75 integration = hub.get_integration(AwsLambdaIntegration)
76 if integration is None:
77 return handler(event, context, *args, **kwargs)
78
79 # If an integration is there, a client has to be there.
80 client = hub.client # type: Any
81 configured_time = context.get_remaining_time_in_millis()
82
83 with hub.push_scope() as scope:
84 with capture_internal_exceptions():
85 scope.clear_breadcrumbs()
86 scope.transaction = context.function_name
87 scope.add_event_processor(
88 _make_request_event_processor(event, context, configured_time)
89 )
90 # Starting the Timeout thread only if the configured time is greater than Timeout warning
91 # buffer and timeout_warning parameter is set True.
92 if (
93 integration.timeout_warning
94 and configured_time > TIMEOUT_WARNING_BUFFER
95 ):
96 waiting_time = (
97 configured_time - TIMEOUT_WARNING_BUFFER
98 ) / MILLIS_TO_SECONDS
99
100 timeout_thread = TimeoutThread(
101 waiting_time, configured_time / MILLIS_TO_SECONDS
102 )
103
104 # Starting the thread to raise timeout warning exception
105 timeout_thread.start()
106
107 try:
108 return handler(event, context, *args, **kwargs)
109 except Exception:
110 exc_info = sys.exc_info()
111 event, hint = event_from_exception(
112 exc_info,
113 client_options=client.options,
114 mechanism={"type": "aws_lambda", "handled": False},
115 )
116 hub.capture_event(event, hint=hint)
117 reraise(*exc_info)
118
119 return sentry_handler # type: ignore
120
121
122 def _drain_queue():
123 # type: () -> None
124 with capture_internal_exceptions():
125 hub = Hub.current
126 integration = hub.get_integration(AwsLambdaIntegration)
127 if integration is not None:
128 # Flush out the event queue before AWS kills the
129 # process.
130 hub.flush()
131
132
133 class AwsLambdaIntegration(Integration):
134 identifier = "aws_lambda"
135
136 def __init__(self, timeout_warning=False):
137 # type: (bool) -> None
138 self.timeout_warning = timeout_warning
139
140 @staticmethod
141 def setup_once():
142 # type: () -> None
143 import __main__ as lambda_bootstrap # type: ignore
144
145 pre_37 = True # Python 3.6 or 2.7
146
147 if not hasattr(lambda_bootstrap, "handle_http_request"):
148 try:
149 import bootstrap as lambda_bootstrap # type: ignore
150
151 pre_37 = False # Python 3.7
152 except ImportError:
153 pass
154
155 if not hasattr(lambda_bootstrap, "handle_event_request"):
156 logger.warning(
157 "Not running in AWS Lambda environment, "
158 "AwsLambdaIntegration disabled"
159 )
160 return
161
162 if pre_37:
163 old_handle_event_request = lambda_bootstrap.handle_event_request
164
165 def sentry_handle_event_request(request_handler, *args, **kwargs):
166 # type: (Any, *Any, **Any) -> Any
167 request_handler = _wrap_handler(request_handler)
168 return old_handle_event_request(request_handler, *args, **kwargs)
169
170 lambda_bootstrap.handle_event_request = sentry_handle_event_request
171
172 old_handle_http_request = lambda_bootstrap.handle_http_request
173
174 def sentry_handle_http_request(request_handler, *args, **kwargs):
175 # type: (Any, *Any, **Any) -> Any
176 request_handler = _wrap_handler(request_handler)
177 return old_handle_http_request(request_handler, *args, **kwargs)
178
179 lambda_bootstrap.handle_http_request = sentry_handle_http_request
180
181 # Patch to_json to drain the queue. This should work even when the
182 # SDK is initialized inside of the handler
183
184 old_to_json = lambda_bootstrap.to_json
185
186 def sentry_to_json(*args, **kwargs):
187 # type: (*Any, **Any) -> Any
188 _drain_queue()
189 return old_to_json(*args, **kwargs)
190
191 lambda_bootstrap.to_json = sentry_to_json
192 else:
193 lambda_bootstrap.LambdaRuntimeClient.post_init_error = _wrap_init_error(
194 lambda_bootstrap.LambdaRuntimeClient.post_init_error
195 )
196
197 old_handle_event_request = lambda_bootstrap.handle_event_request
198
199 def sentry_handle_event_request( # type: ignore
200 lambda_runtime_client, request_handler, *args, **kwargs
201 ):
202 request_handler = _wrap_handler(request_handler)
203 return old_handle_event_request(
204 lambda_runtime_client, request_handler, *args, **kwargs
205 )
206
207 lambda_bootstrap.handle_event_request = sentry_handle_event_request
208
209 # Patch the runtime client to drain the queue. This should work
210 # even when the SDK is initialized inside of the handler
211
212 def _wrap_post_function(f):
213 # type: (F) -> F
214 def inner(*args, **kwargs):
215 # type: (*Any, **Any) -> Any
216 _drain_queue()
217 return f(*args, **kwargs)
218
219 return inner # type: ignore
220
221 lambda_bootstrap.LambdaRuntimeClient.post_invocation_result = _wrap_post_function(
222 lambda_bootstrap.LambdaRuntimeClient.post_invocation_result
223 )
224 lambda_bootstrap.LambdaRuntimeClient.post_invocation_error = _wrap_post_function(
225 lambda_bootstrap.LambdaRuntimeClient.post_invocation_error
226 )
227
228
229 def _make_request_event_processor(aws_event, aws_context, configured_timeout):
230 # type: (Any, Any, Any) -> EventProcessor
231 start_time = datetime.now()
232
233 def event_processor(event, hint, start_time=start_time):
234 # type: (Event, Hint, datetime) -> Optional[Event]
235 remaining_time_in_milis = aws_context.get_remaining_time_in_millis()
236 exec_duration = configured_timeout - remaining_time_in_milis
237
238 extra = event.setdefault("extra", {})
239 extra["lambda"] = {
240 "function_name": aws_context.function_name,
241 "function_version": aws_context.function_version,
242 "invoked_function_arn": aws_context.invoked_function_arn,
243 "aws_request_id": aws_context.aws_request_id,
244 "execution_duration_in_millis": exec_duration,
245 "remaining_time_in_millis": remaining_time_in_milis,
246 }
247
248 extra["cloudwatch logs"] = {
249 "url": _get_cloudwatch_logs_url(aws_context, start_time),
250 "log_group": aws_context.log_group_name,
251 "log_stream": aws_context.log_stream_name,
252 }
253
254 request = event.get("request", {})
255
256 if "httpMethod" in aws_event:
257 request["method"] = aws_event["httpMethod"]
258
259 request["url"] = _get_url(aws_event, aws_context)
260
261 if "queryStringParameters" in aws_event:
262 request["query_string"] = aws_event["queryStringParameters"]
263
264 if "headers" in aws_event:
265 request["headers"] = _filter_headers(aws_event["headers"])
266
267 if aws_event.get("body", None):
268 # Unfortunately couldn't find a way to get structured body from AWS
269 # event. Meaning every body is unstructured to us.
270 request["data"] = AnnotatedValue("", {"rem": [["!raw", "x", 0, 0]]})
271
272 if _should_send_default_pii():
273 user_info = event.setdefault("user", {})
274
275 id = aws_event.get("identity", {}).get("userArn")
276 if id is not None:
277 user_info.setdefault("id", id)
278
279 ip = aws_event.get("identity", {}).get("sourceIp")
280 if ip is not None:
281 user_info.setdefault("ip_address", ip)
282
283 event["request"] = request
284
285 return event
286
287 return event_processor
288
289
290 def _get_url(event, context):
291 # type: (Any, Any) -> str
292 path = event.get("path", None)
293 headers = event.get("headers", {})
294 host = headers.get("Host", None)
295 proto = headers.get("X-Forwarded-Proto", None)
296 if proto and host and path:
297 return "{}://{}{}".format(proto, host, path)
298 return "awslambda:///{}".format(context.function_name)
299
300
301 def _get_cloudwatch_logs_url(context, start_time):
302 # type: (Any, datetime) -> str
303 """
304 Generates a CloudWatchLogs console URL based on the context object
305
306 Arguments:
307 context {Any} -- context from lambda handler
308
309 Returns:
310 str -- AWS Console URL to logs.
311 """
312 formatstring = "%Y-%m-%dT%H:%M:%S"
313
314 url = (
315 "https://console.aws.amazon.com/cloudwatch/home?region={region}"
316 "#logEventViewer:group={log_group};stream={log_stream}"
317 ";start={start_time};end={end_time}"
318 ).format(
319 region=environ.get("AWS_REGION"),
320 log_group=context.log_group_name,
321 log_stream=context.log_stream_name,
322 start_time=(start_time - timedelta(seconds=1)).strftime(formatstring),
323 end_time=(datetime.now() + timedelta(seconds=2)).strftime(formatstring),
324 )
325
326 return url
327
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sentry_sdk/integrations/aws_lambda.py b/sentry_sdk/integrations/aws_lambda.py
--- a/sentry_sdk/integrations/aws_lambda.py
+++ b/sentry_sdk/integrations/aws_lambda.py
@@ -1,7 +1,6 @@
from datetime import datetime, timedelta
from os import environ
import sys
-import json
from sentry_sdk.hub import Hub, _should_send_default_pii
from sentry_sdk._compat import reraise
@@ -42,19 +41,15 @@
if integration is None:
return init_error(*args, **kwargs)
- # Fetch Initialization error details from arguments
- error = json.loads(args[1])
-
# If an integration is there, a client has to be there.
client = hub.client # type: Any
- with hub.push_scope() as scope:
- with capture_internal_exceptions():
+ with capture_internal_exceptions():
+ with hub.configure_scope() as scope:
scope.clear_breadcrumbs()
- # Checking if there is any error/exception which is raised in the runtime
- # environment from arguments and, re-raising it to capture it as an event.
- if error.get("errorType"):
- exc_info = sys.exc_info()
+
+ exc_info = sys.exc_info()
+ if exc_info and all(exc_info):
event, hint = event_from_exception(
exc_info,
client_options=client.options,
@@ -140,25 +135,39 @@
@staticmethod
def setup_once():
# type: () -> None
- import __main__ as lambda_bootstrap # type: ignore
-
- pre_37 = True # Python 3.6 or 2.7
-
- if not hasattr(lambda_bootstrap, "handle_http_request"):
- try:
- import bootstrap as lambda_bootstrap # type: ignore
- pre_37 = False # Python 3.7
- except ImportError:
- pass
+ # Python 2.7: Everything is in `__main__`.
+ #
+ # Python 3.7: If the bootstrap module is *already imported*, it is the
+ # one we actually want to use (no idea what's in __main__)
+ #
+ # On Python 3.8 bootstrap is also importable, but will be the same file
+ # as __main__ imported under a different name:
+ #
+ # sys.modules['__main__'].__file__ == sys.modules['bootstrap'].__file__
+ # sys.modules['__main__'] is not sys.modules['bootstrap']
+ #
+ # Such a setup would then make all monkeypatches useless.
+ if "bootstrap" in sys.modules:
+ lambda_bootstrap = sys.modules["bootstrap"] # type: Any
+ elif "__main__" in sys.modules:
+ lambda_bootstrap = sys.modules["__main__"]
+ else:
+ logger.warning(
+ "Not running in AWS Lambda environment, "
+ "AwsLambdaIntegration disabled (could not find bootstrap module)"
+ )
+ return
if not hasattr(lambda_bootstrap, "handle_event_request"):
logger.warning(
"Not running in AWS Lambda environment, "
- "AwsLambdaIntegration disabled"
+ "AwsLambdaIntegration disabled (could not find handle_event_request)"
)
return
+ pre_37 = hasattr(lambda_bootstrap, "handle_http_request") # Python 3.6 or 2.7
+
if pre_37:
old_handle_event_request = lambda_bootstrap.handle_event_request
| {"golden_diff": "diff --git a/sentry_sdk/integrations/aws_lambda.py b/sentry_sdk/integrations/aws_lambda.py\n--- a/sentry_sdk/integrations/aws_lambda.py\n+++ b/sentry_sdk/integrations/aws_lambda.py\n@@ -1,7 +1,6 @@\n from datetime import datetime, timedelta\n from os import environ\n import sys\n-import json\n \n from sentry_sdk.hub import Hub, _should_send_default_pii\n from sentry_sdk._compat import reraise\n@@ -42,19 +41,15 @@\n if integration is None:\n return init_error(*args, **kwargs)\n \n- # Fetch Initialization error details from arguments\n- error = json.loads(args[1])\n-\n # If an integration is there, a client has to be there.\n client = hub.client # type: Any\n \n- with hub.push_scope() as scope:\n- with capture_internal_exceptions():\n+ with capture_internal_exceptions():\n+ with hub.configure_scope() as scope:\n scope.clear_breadcrumbs()\n- # Checking if there is any error/exception which is raised in the runtime\n- # environment from arguments and, re-raising it to capture it as an event.\n- if error.get(\"errorType\"):\n- exc_info = sys.exc_info()\n+\n+ exc_info = sys.exc_info()\n+ if exc_info and all(exc_info):\n event, hint = event_from_exception(\n exc_info,\n client_options=client.options,\n@@ -140,25 +135,39 @@\n @staticmethod\n def setup_once():\n # type: () -> None\n- import __main__ as lambda_bootstrap # type: ignore\n-\n- pre_37 = True # Python 3.6 or 2.7\n-\n- if not hasattr(lambda_bootstrap, \"handle_http_request\"):\n- try:\n- import bootstrap as lambda_bootstrap # type: ignore\n \n- pre_37 = False # Python 3.7\n- except ImportError:\n- pass\n+ # Python 2.7: Everything is in `__main__`.\n+ #\n+ # Python 3.7: If the bootstrap module is *already imported*, it is the\n+ # one we actually want to use (no idea what's in __main__)\n+ #\n+ # On Python 3.8 bootstrap is also importable, but will be the same file\n+ # as __main__ imported under a different name:\n+ #\n+ # sys.modules['__main__'].__file__ == sys.modules['bootstrap'].__file__\n+ # sys.modules['__main__'] is not sys.modules['bootstrap']\n+ #\n+ # Such a setup would then make all monkeypatches useless.\n+ if \"bootstrap\" in sys.modules:\n+ lambda_bootstrap = sys.modules[\"bootstrap\"] # type: Any\n+ elif \"__main__\" in sys.modules:\n+ lambda_bootstrap = sys.modules[\"__main__\"]\n+ else:\n+ logger.warning(\n+ \"Not running in AWS Lambda environment, \"\n+ \"AwsLambdaIntegration disabled (could not find bootstrap module)\"\n+ )\n+ return\n \n if not hasattr(lambda_bootstrap, \"handle_event_request\"):\n logger.warning(\n \"Not running in AWS Lambda environment, \"\n- \"AwsLambdaIntegration disabled\"\n+ \"AwsLambdaIntegration disabled (could not find handle_event_request)\"\n )\n return\n \n+ pre_37 = hasattr(lambda_bootstrap, \"handle_http_request\") # Python 3.6 or 2.7\n+\n if pre_37:\n old_handle_event_request = lambda_bootstrap.handle_event_request\n", "issue": "AWS Lambda under Python 3.8 is currently broken\nIt seems they have done some updates on the runtime that broke our integration.\n", "before_files": [{"content": "from datetime import datetime, timedelta\nfrom os import environ\nimport sys\nimport json\n\nfrom sentry_sdk.hub import Hub, _should_send_default_pii\nfrom sentry_sdk._compat import reraise\nfrom sentry_sdk.utils import (\n AnnotatedValue,\n capture_internal_exceptions,\n event_from_exception,\n logger,\n TimeoutThread,\n)\nfrom sentry_sdk.integrations import Integration\nfrom sentry_sdk.integrations._wsgi_common import _filter_headers\n\nfrom sentry_sdk._types import MYPY\n\nif MYPY:\n from typing import Any\n from typing import TypeVar\n from typing import Callable\n from typing import Optional\n\n from sentry_sdk._types import EventProcessor, Event, Hint\n\n F = TypeVar(\"F\", bound=Callable[..., Any])\n\n# Constants\nTIMEOUT_WARNING_BUFFER = 1500 # Buffer time required to send timeout warning to Sentry\nMILLIS_TO_SECONDS = 1000.0\n\n\ndef _wrap_init_error(init_error):\n # type: (F) -> F\n def sentry_init_error(*args, **kwargs):\n # type: (*Any, **Any) -> Any\n\n hub = Hub.current\n integration = hub.get_integration(AwsLambdaIntegration)\n if integration is None:\n return init_error(*args, **kwargs)\n\n # Fetch Initialization error details from arguments\n error = json.loads(args[1])\n\n # If an integration is there, a client has to be there.\n client = hub.client # type: Any\n\n with hub.push_scope() as scope:\n with capture_internal_exceptions():\n scope.clear_breadcrumbs()\n # Checking if there is any error/exception which is raised in the runtime\n # environment from arguments and, re-raising it to capture it as an event.\n if error.get(\"errorType\"):\n exc_info = sys.exc_info()\n event, hint = event_from_exception(\n exc_info,\n client_options=client.options,\n mechanism={\"type\": \"aws_lambda\", \"handled\": False},\n )\n hub.capture_event(event, hint=hint)\n\n return init_error(*args, **kwargs)\n\n return sentry_init_error # type: ignore\n\n\ndef _wrap_handler(handler):\n # type: (F) -> F\n def sentry_handler(event, context, *args, **kwargs):\n # type: (Any, Any, *Any, **Any) -> Any\n hub = Hub.current\n integration = hub.get_integration(AwsLambdaIntegration)\n if integration is None:\n return handler(event, context, *args, **kwargs)\n\n # If an integration is there, a client has to be there.\n client = hub.client # type: Any\n configured_time = context.get_remaining_time_in_millis()\n\n with hub.push_scope() as scope:\n with capture_internal_exceptions():\n scope.clear_breadcrumbs()\n scope.transaction = context.function_name\n scope.add_event_processor(\n _make_request_event_processor(event, context, configured_time)\n )\n # Starting the Timeout thread only if the configured time is greater than Timeout warning\n # buffer and timeout_warning parameter is set True.\n if (\n integration.timeout_warning\n and configured_time > TIMEOUT_WARNING_BUFFER\n ):\n waiting_time = (\n configured_time - TIMEOUT_WARNING_BUFFER\n ) / MILLIS_TO_SECONDS\n\n timeout_thread = TimeoutThread(\n waiting_time, configured_time / MILLIS_TO_SECONDS\n )\n\n # Starting the thread to raise timeout warning exception\n timeout_thread.start()\n\n try:\n return handler(event, context, *args, **kwargs)\n except Exception:\n exc_info = sys.exc_info()\n event, hint = event_from_exception(\n exc_info,\n client_options=client.options,\n mechanism={\"type\": \"aws_lambda\", \"handled\": False},\n )\n hub.capture_event(event, hint=hint)\n reraise(*exc_info)\n\n return sentry_handler # type: ignore\n\n\ndef _drain_queue():\n # type: () -> None\n with capture_internal_exceptions():\n hub = Hub.current\n integration = hub.get_integration(AwsLambdaIntegration)\n if integration is not None:\n # Flush out the event queue before AWS kills the\n # process.\n hub.flush()\n\n\nclass AwsLambdaIntegration(Integration):\n identifier = \"aws_lambda\"\n\n def __init__(self, timeout_warning=False):\n # type: (bool) -> None\n self.timeout_warning = timeout_warning\n\n @staticmethod\n def setup_once():\n # type: () -> None\n import __main__ as lambda_bootstrap # type: ignore\n\n pre_37 = True # Python 3.6 or 2.7\n\n if not hasattr(lambda_bootstrap, \"handle_http_request\"):\n try:\n import bootstrap as lambda_bootstrap # type: ignore\n\n pre_37 = False # Python 3.7\n except ImportError:\n pass\n\n if not hasattr(lambda_bootstrap, \"handle_event_request\"):\n logger.warning(\n \"Not running in AWS Lambda environment, \"\n \"AwsLambdaIntegration disabled\"\n )\n return\n\n if pre_37:\n old_handle_event_request = lambda_bootstrap.handle_event_request\n\n def sentry_handle_event_request(request_handler, *args, **kwargs):\n # type: (Any, *Any, **Any) -> Any\n request_handler = _wrap_handler(request_handler)\n return old_handle_event_request(request_handler, *args, **kwargs)\n\n lambda_bootstrap.handle_event_request = sentry_handle_event_request\n\n old_handle_http_request = lambda_bootstrap.handle_http_request\n\n def sentry_handle_http_request(request_handler, *args, **kwargs):\n # type: (Any, *Any, **Any) -> Any\n request_handler = _wrap_handler(request_handler)\n return old_handle_http_request(request_handler, *args, **kwargs)\n\n lambda_bootstrap.handle_http_request = sentry_handle_http_request\n\n # Patch to_json to drain the queue. This should work even when the\n # SDK is initialized inside of the handler\n\n old_to_json = lambda_bootstrap.to_json\n\n def sentry_to_json(*args, **kwargs):\n # type: (*Any, **Any) -> Any\n _drain_queue()\n return old_to_json(*args, **kwargs)\n\n lambda_bootstrap.to_json = sentry_to_json\n else:\n lambda_bootstrap.LambdaRuntimeClient.post_init_error = _wrap_init_error(\n lambda_bootstrap.LambdaRuntimeClient.post_init_error\n )\n\n old_handle_event_request = lambda_bootstrap.handle_event_request\n\n def sentry_handle_event_request( # type: ignore\n lambda_runtime_client, request_handler, *args, **kwargs\n ):\n request_handler = _wrap_handler(request_handler)\n return old_handle_event_request(\n lambda_runtime_client, request_handler, *args, **kwargs\n )\n\n lambda_bootstrap.handle_event_request = sentry_handle_event_request\n\n # Patch the runtime client to drain the queue. This should work\n # even when the SDK is initialized inside of the handler\n\n def _wrap_post_function(f):\n # type: (F) -> F\n def inner(*args, **kwargs):\n # type: (*Any, **Any) -> Any\n _drain_queue()\n return f(*args, **kwargs)\n\n return inner # type: ignore\n\n lambda_bootstrap.LambdaRuntimeClient.post_invocation_result = _wrap_post_function(\n lambda_bootstrap.LambdaRuntimeClient.post_invocation_result\n )\n lambda_bootstrap.LambdaRuntimeClient.post_invocation_error = _wrap_post_function(\n lambda_bootstrap.LambdaRuntimeClient.post_invocation_error\n )\n\n\ndef _make_request_event_processor(aws_event, aws_context, configured_timeout):\n # type: (Any, Any, Any) -> EventProcessor\n start_time = datetime.now()\n\n def event_processor(event, hint, start_time=start_time):\n # type: (Event, Hint, datetime) -> Optional[Event]\n remaining_time_in_milis = aws_context.get_remaining_time_in_millis()\n exec_duration = configured_timeout - remaining_time_in_milis\n\n extra = event.setdefault(\"extra\", {})\n extra[\"lambda\"] = {\n \"function_name\": aws_context.function_name,\n \"function_version\": aws_context.function_version,\n \"invoked_function_arn\": aws_context.invoked_function_arn,\n \"aws_request_id\": aws_context.aws_request_id,\n \"execution_duration_in_millis\": exec_duration,\n \"remaining_time_in_millis\": remaining_time_in_milis,\n }\n\n extra[\"cloudwatch logs\"] = {\n \"url\": _get_cloudwatch_logs_url(aws_context, start_time),\n \"log_group\": aws_context.log_group_name,\n \"log_stream\": aws_context.log_stream_name,\n }\n\n request = event.get(\"request\", {})\n\n if \"httpMethod\" in aws_event:\n request[\"method\"] = aws_event[\"httpMethod\"]\n\n request[\"url\"] = _get_url(aws_event, aws_context)\n\n if \"queryStringParameters\" in aws_event:\n request[\"query_string\"] = aws_event[\"queryStringParameters\"]\n\n if \"headers\" in aws_event:\n request[\"headers\"] = _filter_headers(aws_event[\"headers\"])\n\n if aws_event.get(\"body\", None):\n # Unfortunately couldn't find a way to get structured body from AWS\n # event. Meaning every body is unstructured to us.\n request[\"data\"] = AnnotatedValue(\"\", {\"rem\": [[\"!raw\", \"x\", 0, 0]]})\n\n if _should_send_default_pii():\n user_info = event.setdefault(\"user\", {})\n\n id = aws_event.get(\"identity\", {}).get(\"userArn\")\n if id is not None:\n user_info.setdefault(\"id\", id)\n\n ip = aws_event.get(\"identity\", {}).get(\"sourceIp\")\n if ip is not None:\n user_info.setdefault(\"ip_address\", ip)\n\n event[\"request\"] = request\n\n return event\n\n return event_processor\n\n\ndef _get_url(event, context):\n # type: (Any, Any) -> str\n path = event.get(\"path\", None)\n headers = event.get(\"headers\", {})\n host = headers.get(\"Host\", None)\n proto = headers.get(\"X-Forwarded-Proto\", None)\n if proto and host and path:\n return \"{}://{}{}\".format(proto, host, path)\n return \"awslambda:///{}\".format(context.function_name)\n\n\ndef _get_cloudwatch_logs_url(context, start_time):\n # type: (Any, datetime) -> str\n \"\"\"\n Generates a CloudWatchLogs console URL based on the context object\n\n Arguments:\n context {Any} -- context from lambda handler\n\n Returns:\n str -- AWS Console URL to logs.\n \"\"\"\n formatstring = \"%Y-%m-%dT%H:%M:%S\"\n\n url = (\n \"https://console.aws.amazon.com/cloudwatch/home?region={region}\"\n \"#logEventViewer:group={log_group};stream={log_stream}\"\n \";start={start_time};end={end_time}\"\n ).format(\n region=environ.get(\"AWS_REGION\"),\n log_group=context.log_group_name,\n log_stream=context.log_stream_name,\n start_time=(start_time - timedelta(seconds=1)).strftime(formatstring),\n end_time=(datetime.now() + timedelta(seconds=2)).strftime(formatstring),\n )\n\n return url\n", "path": "sentry_sdk/integrations/aws_lambda.py"}], "after_files": [{"content": "from datetime import datetime, timedelta\nfrom os import environ\nimport sys\n\nfrom sentry_sdk.hub import Hub, _should_send_default_pii\nfrom sentry_sdk._compat import reraise\nfrom sentry_sdk.utils import (\n AnnotatedValue,\n capture_internal_exceptions,\n event_from_exception,\n logger,\n TimeoutThread,\n)\nfrom sentry_sdk.integrations import Integration\nfrom sentry_sdk.integrations._wsgi_common import _filter_headers\n\nfrom sentry_sdk._types import MYPY\n\nif MYPY:\n from typing import Any\n from typing import TypeVar\n from typing import Callable\n from typing import Optional\n\n from sentry_sdk._types import EventProcessor, Event, Hint\n\n F = TypeVar(\"F\", bound=Callable[..., Any])\n\n# Constants\nTIMEOUT_WARNING_BUFFER = 1500 # Buffer time required to send timeout warning to Sentry\nMILLIS_TO_SECONDS = 1000.0\n\n\ndef _wrap_init_error(init_error):\n # type: (F) -> F\n def sentry_init_error(*args, **kwargs):\n # type: (*Any, **Any) -> Any\n\n hub = Hub.current\n integration = hub.get_integration(AwsLambdaIntegration)\n if integration is None:\n return init_error(*args, **kwargs)\n\n # If an integration is there, a client has to be there.\n client = hub.client # type: Any\n\n with capture_internal_exceptions():\n with hub.configure_scope() as scope:\n scope.clear_breadcrumbs()\n\n exc_info = sys.exc_info()\n if exc_info and all(exc_info):\n event, hint = event_from_exception(\n exc_info,\n client_options=client.options,\n mechanism={\"type\": \"aws_lambda\", \"handled\": False},\n )\n hub.capture_event(event, hint=hint)\n\n return init_error(*args, **kwargs)\n\n return sentry_init_error # type: ignore\n\n\ndef _wrap_handler(handler):\n # type: (F) -> F\n def sentry_handler(event, context, *args, **kwargs):\n # type: (Any, Any, *Any, **Any) -> Any\n hub = Hub.current\n integration = hub.get_integration(AwsLambdaIntegration)\n if integration is None:\n return handler(event, context, *args, **kwargs)\n\n # If an integration is there, a client has to be there.\n client = hub.client # type: Any\n configured_time = context.get_remaining_time_in_millis()\n\n with hub.push_scope() as scope:\n with capture_internal_exceptions():\n scope.clear_breadcrumbs()\n scope.transaction = context.function_name\n scope.add_event_processor(\n _make_request_event_processor(event, context, configured_time)\n )\n # Starting the Timeout thread only if the configured time is greater than Timeout warning\n # buffer and timeout_warning parameter is set True.\n if (\n integration.timeout_warning\n and configured_time > TIMEOUT_WARNING_BUFFER\n ):\n waiting_time = (\n configured_time - TIMEOUT_WARNING_BUFFER\n ) / MILLIS_TO_SECONDS\n\n timeout_thread = TimeoutThread(\n waiting_time, configured_time / MILLIS_TO_SECONDS\n )\n\n # Starting the thread to raise timeout warning exception\n timeout_thread.start()\n\n try:\n return handler(event, context, *args, **kwargs)\n except Exception:\n exc_info = sys.exc_info()\n event, hint = event_from_exception(\n exc_info,\n client_options=client.options,\n mechanism={\"type\": \"aws_lambda\", \"handled\": False},\n )\n hub.capture_event(event, hint=hint)\n reraise(*exc_info)\n\n return sentry_handler # type: ignore\n\n\ndef _drain_queue():\n # type: () -> None\n with capture_internal_exceptions():\n hub = Hub.current\n integration = hub.get_integration(AwsLambdaIntegration)\n if integration is not None:\n # Flush out the event queue before AWS kills the\n # process.\n hub.flush()\n\n\nclass AwsLambdaIntegration(Integration):\n identifier = \"aws_lambda\"\n\n def __init__(self, timeout_warning=False):\n # type: (bool) -> None\n self.timeout_warning = timeout_warning\n\n @staticmethod\n def setup_once():\n # type: () -> None\n\n # Python 2.7: Everything is in `__main__`.\n #\n # Python 3.7: If the bootstrap module is *already imported*, it is the\n # one we actually want to use (no idea what's in __main__)\n #\n # On Python 3.8 bootstrap is also importable, but will be the same file\n # as __main__ imported under a different name:\n #\n # sys.modules['__main__'].__file__ == sys.modules['bootstrap'].__file__\n # sys.modules['__main__'] is not sys.modules['bootstrap']\n #\n # Such a setup would then make all monkeypatches useless.\n if \"bootstrap\" in sys.modules:\n lambda_bootstrap = sys.modules[\"bootstrap\"] # type: Any\n elif \"__main__\" in sys.modules:\n lambda_bootstrap = sys.modules[\"__main__\"]\n else:\n logger.warning(\n \"Not running in AWS Lambda environment, \"\n \"AwsLambdaIntegration disabled (could not find bootstrap module)\"\n )\n return\n\n if not hasattr(lambda_bootstrap, \"handle_event_request\"):\n logger.warning(\n \"Not running in AWS Lambda environment, \"\n \"AwsLambdaIntegration disabled (could not find handle_event_request)\"\n )\n return\n\n pre_37 = hasattr(lambda_bootstrap, \"handle_http_request\") # Python 3.6 or 2.7\n\n if pre_37:\n old_handle_event_request = lambda_bootstrap.handle_event_request\n\n def sentry_handle_event_request(request_handler, *args, **kwargs):\n # type: (Any, *Any, **Any) -> Any\n request_handler = _wrap_handler(request_handler)\n return old_handle_event_request(request_handler, *args, **kwargs)\n\n lambda_bootstrap.handle_event_request = sentry_handle_event_request\n\n old_handle_http_request = lambda_bootstrap.handle_http_request\n\n def sentry_handle_http_request(request_handler, *args, **kwargs):\n # type: (Any, *Any, **Any) -> Any\n request_handler = _wrap_handler(request_handler)\n return old_handle_http_request(request_handler, *args, **kwargs)\n\n lambda_bootstrap.handle_http_request = sentry_handle_http_request\n\n # Patch to_json to drain the queue. This should work even when the\n # SDK is initialized inside of the handler\n\n old_to_json = lambda_bootstrap.to_json\n\n def sentry_to_json(*args, **kwargs):\n # type: (*Any, **Any) -> Any\n _drain_queue()\n return old_to_json(*args, **kwargs)\n\n lambda_bootstrap.to_json = sentry_to_json\n else:\n lambda_bootstrap.LambdaRuntimeClient.post_init_error = _wrap_init_error(\n lambda_bootstrap.LambdaRuntimeClient.post_init_error\n )\n\n old_handle_event_request = lambda_bootstrap.handle_event_request\n\n def sentry_handle_event_request( # type: ignore\n lambda_runtime_client, request_handler, *args, **kwargs\n ):\n request_handler = _wrap_handler(request_handler)\n return old_handle_event_request(\n lambda_runtime_client, request_handler, *args, **kwargs\n )\n\n lambda_bootstrap.handle_event_request = sentry_handle_event_request\n\n # Patch the runtime client to drain the queue. This should work\n # even when the SDK is initialized inside of the handler\n\n def _wrap_post_function(f):\n # type: (F) -> F\n def inner(*args, **kwargs):\n # type: (*Any, **Any) -> Any\n _drain_queue()\n return f(*args, **kwargs)\n\n return inner # type: ignore\n\n lambda_bootstrap.LambdaRuntimeClient.post_invocation_result = _wrap_post_function(\n lambda_bootstrap.LambdaRuntimeClient.post_invocation_result\n )\n lambda_bootstrap.LambdaRuntimeClient.post_invocation_error = _wrap_post_function(\n lambda_bootstrap.LambdaRuntimeClient.post_invocation_error\n )\n\n\ndef _make_request_event_processor(aws_event, aws_context, configured_timeout):\n # type: (Any, Any, Any) -> EventProcessor\n start_time = datetime.now()\n\n def event_processor(event, hint, start_time=start_time):\n # type: (Event, Hint, datetime) -> Optional[Event]\n remaining_time_in_milis = aws_context.get_remaining_time_in_millis()\n exec_duration = configured_timeout - remaining_time_in_milis\n\n extra = event.setdefault(\"extra\", {})\n extra[\"lambda\"] = {\n \"function_name\": aws_context.function_name,\n \"function_version\": aws_context.function_version,\n \"invoked_function_arn\": aws_context.invoked_function_arn,\n \"aws_request_id\": aws_context.aws_request_id,\n \"execution_duration_in_millis\": exec_duration,\n \"remaining_time_in_millis\": remaining_time_in_milis,\n }\n\n extra[\"cloudwatch logs\"] = {\n \"url\": _get_cloudwatch_logs_url(aws_context, start_time),\n \"log_group\": aws_context.log_group_name,\n \"log_stream\": aws_context.log_stream_name,\n }\n\n request = event.get(\"request\", {})\n\n if \"httpMethod\" in aws_event:\n request[\"method\"] = aws_event[\"httpMethod\"]\n\n request[\"url\"] = _get_url(aws_event, aws_context)\n\n if \"queryStringParameters\" in aws_event:\n request[\"query_string\"] = aws_event[\"queryStringParameters\"]\n\n if \"headers\" in aws_event:\n request[\"headers\"] = _filter_headers(aws_event[\"headers\"])\n\n if aws_event.get(\"body\", None):\n # Unfortunately couldn't find a way to get structured body from AWS\n # event. Meaning every body is unstructured to us.\n request[\"data\"] = AnnotatedValue(\"\", {\"rem\": [[\"!raw\", \"x\", 0, 0]]})\n\n if _should_send_default_pii():\n user_info = event.setdefault(\"user\", {})\n\n id = aws_event.get(\"identity\", {}).get(\"userArn\")\n if id is not None:\n user_info.setdefault(\"id\", id)\n\n ip = aws_event.get(\"identity\", {}).get(\"sourceIp\")\n if ip is not None:\n user_info.setdefault(\"ip_address\", ip)\n\n event[\"request\"] = request\n\n return event\n\n return event_processor\n\n\ndef _get_url(event, context):\n # type: (Any, Any) -> str\n path = event.get(\"path\", None)\n headers = event.get(\"headers\", {})\n host = headers.get(\"Host\", None)\n proto = headers.get(\"X-Forwarded-Proto\", None)\n if proto and host and path:\n return \"{}://{}{}\".format(proto, host, path)\n return \"awslambda:///{}\".format(context.function_name)\n\n\ndef _get_cloudwatch_logs_url(context, start_time):\n # type: (Any, datetime) -> str\n \"\"\"\n Generates a CloudWatchLogs console URL based on the context object\n\n Arguments:\n context {Any} -- context from lambda handler\n\n Returns:\n str -- AWS Console URL to logs.\n \"\"\"\n formatstring = \"%Y-%m-%dT%H:%M:%S\"\n\n url = (\n \"https://console.aws.amazon.com/cloudwatch/home?region={region}\"\n \"#logEventViewer:group={log_group};stream={log_stream}\"\n \";start={start_time};end={end_time}\"\n ).format(\n region=environ.get(\"AWS_REGION\"),\n log_group=context.log_group_name,\n log_stream=context.log_stream_name,\n start_time=(start_time - timedelta(seconds=1)).strftime(formatstring),\n end_time=(datetime.now() + timedelta(seconds=2)).strftime(formatstring),\n )\n\n return url\n", "path": "sentry_sdk/integrations/aws_lambda.py"}]} | 3,652 | 792 |
gh_patches_debug_21751 | rasdani/github-patches | git_diff | mozilla__bugbug-3891 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Train the component model on bugs from Fenix
It is now appropriate to include Fenix in the training dataset since it has been moved to Bugzilla.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bugbug/models/component.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # This Source Code Form is subject to the terms of the Mozilla Public
3 # License, v. 2.0. If a copy of the MPL was not distributed with this file,
4 # You can obtain one at http://mozilla.org/MPL/2.0/.
5
6 import logging
7 from collections import Counter
8 from datetime import datetime, timezone
9
10 import dateutil.parser
11 import xgboost
12 from dateutil.relativedelta import relativedelta
13 from sklearn.compose import ColumnTransformer
14 from sklearn.feature_extraction import DictVectorizer
15 from sklearn.pipeline import Pipeline
16
17 from bugbug import bug_features, bugzilla, feature_cleanup, utils
18 from bugbug.bugzilla import get_product_component_count
19 from bugbug.model import BugModel
20
21 logging.basicConfig(level=logging.INFO)
22 logger = logging.getLogger(__name__)
23
24
25 class ComponentModel(BugModel):
26 PRODUCTS = {
27 "Core",
28 "External Software Affecting Firefox",
29 "DevTools",
30 "Firefox",
31 "Toolkit",
32 "WebExtensions",
33 "Firefox Build System",
34 }
35
36 PRODUCT_COMPONENTS = {
37 "Cloud Services": {"Server: Firefox Accounts"},
38 }
39
40 CONFLATED_COMPONENTS = [
41 "Core::Audio/Video",
42 "Core::DOM",
43 "Core::Graphics",
44 "Core::IPC",
45 "Core::JavaScript",
46 "Core::Layout",
47 "Core::Networking",
48 "Core::Print",
49 "Core::WebRTC",
50 "Toolkit::Password Manager",
51 "DevTools",
52 "External Software Affecting Firefox",
53 "WebExtensions",
54 "Firefox Build System",
55 ]
56
57 CONFLATED_COMPONENTS_MAPPING = {
58 "Core::DOM": "Core::DOM: Core & HTML",
59 "Core::JavaScript": "Core::JavaScript Engine",
60 "Core::Print": "Core::Printing: Output",
61 "DevTools": "DevTools::General",
62 "External Software Affecting Firefox": "External Software Affecting Firefox::Other",
63 "WebExtensions": "WebExtensions::Untriaged",
64 "Firefox Build System": "Firefox Build System::General",
65 }
66
67 def __init__(self, lemmatization=False):
68 BugModel.__init__(self, lemmatization)
69
70 self.cross_validation_enabled = False
71 self.calculate_importance = False
72
73 feature_extractors = [
74 bug_features.HasSTR(),
75 bug_features.Severity(),
76 bug_features.Keywords(),
77 bug_features.HasCrashSignature(),
78 bug_features.HasURL(),
79 bug_features.HasW3CURL(),
80 bug_features.HasGithubURL(),
81 bug_features.Whiteboard(),
82 bug_features.Patches(),
83 bug_features.Landings(),
84 ]
85
86 cleanup_functions = [
87 feature_cleanup.fileref(),
88 feature_cleanup.url(),
89 feature_cleanup.synonyms(),
90 ]
91
92 self.extraction_pipeline = Pipeline(
93 [
94 (
95 "bug_extractor",
96 bug_features.BugExtractor(
97 feature_extractors, cleanup_functions, rollback=True
98 ),
99 ),
100 ]
101 )
102
103 self.clf = Pipeline(
104 [
105 (
106 "union",
107 ColumnTransformer(
108 [
109 ("data", DictVectorizer(), "data"),
110 ("title", self.text_vectorizer(min_df=0.0001), "title"),
111 (
112 "comments",
113 self.text_vectorizer(min_df=0.0001),
114 "comments",
115 ),
116 ]
117 ),
118 ),
119 (
120 "estimator",
121 xgboost.XGBClassifier(n_jobs=utils.get_physical_cpu_count()),
122 ),
123 ]
124 )
125
126 self.CONFLATED_COMPONENTS_INVERSE_MAPPING = {
127 v: k for k, v in self.CONFLATED_COMPONENTS_MAPPING.items()
128 }
129
130 def filter_component(self, product, component):
131 full_comp = f"{product}::{component}"
132
133 if full_comp in self.CONFLATED_COMPONENTS_INVERSE_MAPPING:
134 return self.CONFLATED_COMPONENTS_INVERSE_MAPPING[full_comp]
135
136 if (product, component) in self.meaningful_product_components:
137 return full_comp
138
139 for conflated_component in self.CONFLATED_COMPONENTS:
140 if full_comp.startswith(conflated_component):
141 return conflated_component
142
143 return None
144
145 def get_labels(self):
146 product_components = {}
147 for bug_data in bugzilla.get_bugs():
148 if dateutil.parser.parse(bug_data["creation_time"]) < datetime.now(
149 timezone.utc
150 ) - relativedelta(years=2):
151 continue
152
153 product_components[bug_data["id"]] = (
154 bug_data["product"],
155 bug_data["component"],
156 )
157
158 self.meaningful_product_components = self.get_meaningful_product_components(
159 (
160 (product, component)
161 for product, component in product_components.values()
162 if self.is_meaningful(product, component)
163 )
164 )
165
166 classes = {}
167 for bug_id, (product, component) in product_components.items():
168 component = self.filter_component(product, component)
169
170 if component:
171 classes[bug_id] = component
172
173 component_counts = Counter(classes.values()).most_common()
174 top_components = set(component for component, count in component_counts)
175
176 logger.info("%d components", len(top_components))
177 for component, count in component_counts:
178 logger.info("%s: %d", component, count)
179
180 # Assert there is at least one bug for each conflated component.
181 for conflated_component in self.CONFLATED_COMPONENTS:
182 assert any(
183 conflated_component == component
184 for component, count in component_counts
185 ), f"There should be at least one bug matching {conflated_component}*"
186
187 # Assert there is at least one bug for each component the conflated components are mapped to.
188 for conflated_component_mapping in self.CONFLATED_COMPONENTS_MAPPING.values():
189 assert any(
190 conflated_component_mapping == f"{product}::{component}"
191 for product, component in product_components.values()
192 ), f"There should be at least one bug in {conflated_component_mapping}"
193
194 # Assert all conflated components are either in conflated_components_mapping or exist as components.
195 for conflated_component in self.CONFLATED_COMPONENTS:
196 assert conflated_component in self.CONFLATED_COMPONENTS_MAPPING or any(
197 conflated_component == f"{product}::{component}"
198 for product, component in product_components.values()
199 ), f"It should be possible to map {conflated_component}"
200
201 classes = {
202 bug_id: component
203 for bug_id, component in classes.items()
204 if component in top_components
205 }
206
207 return classes, set(classes.values())
208
209 def is_meaningful(self, product, component):
210 return (
211 product in self.PRODUCTS
212 and component not in ["General", "Untriaged", "Foxfooding"]
213 ) or (
214 product in self.PRODUCT_COMPONENTS
215 and component in self.PRODUCT_COMPONENTS[product]
216 )
217
218 def get_meaningful_product_components(self, full_comp_tuples, threshold_ratio=100):
219 """Filter out components which does not have more than 1% of the most common component.
220
221 Returns:
222 a set of tuples which have at least 1% of the most common tuple
223 """
224 product_component_counts = Counter(full_comp_tuples).most_common()
225
226 max_count = product_component_counts[0][1]
227 threshold = max_count / threshold_ratio
228
229 active_product_components = bugzilla.get_active_product_components(
230 list(self.PRODUCTS) + list(self.PRODUCT_COMPONENTS)
231 )
232
233 return set(
234 product_component
235 for product_component, count in product_component_counts
236 if count > threshold and product_component in active_product_components
237 )
238
239 def get_feature_names(self):
240 return self.clf.named_steps["union"].get_feature_names_out()
241
242 def check(self):
243 success = super().check()
244
245 # Get the number of bugs per full component to fasten up the check
246 bugs_number = get_product_component_count()
247
248 # Check number 1, check that the most meaningful product components
249 # still have at least a bug in this component. If the check is failing
250 # that could mean that:
251 # - A component has been renamed / removed
252 # - A component is not used anymore by developers
253
254 for product, component in self.meaningful_product_components:
255 full_comp = f"{product}::{component}"
256
257 if full_comp not in bugs_number.keys():
258 logger.warning(
259 "Component %r of product %r doesn't exists, failure",
260 component,
261 product,
262 )
263 success = False
264
265 elif bugs_number[full_comp] <= 0:
266 logger.warning(
267 "Component %r of product %r have 0 bugs or less in it, failure",
268 component,
269 product,
270 )
271 success = False
272
273 # Check number 2, check that conflated components in
274 # CONFLATED_COMPONENTS match at least one component which has more
275 # than 0 bugs
276
277 for conflated_component in self.CONFLATED_COMPONENTS:
278 matching_components = [
279 full_comp
280 for full_comp in bugs_number
281 if full_comp.startswith(conflated_component)
282 ]
283
284 if not matching_components:
285 logger.warning("%s doesn't match any component", conflated_component)
286 success = False
287 continue
288
289 matching_components_values = [
290 bugs_number[full_comp]
291 for full_comp in matching_components
292 if bugs_number[full_comp] > 0
293 ]
294
295 if not matching_components_values:
296 logger.warning(
297 "%s should match at least one component with more than 0 bugs",
298 conflated_component,
299 )
300 success = False
301
302 # Check number 3, check that values of CONFLATED_COMPONENTS_MAPPING
303 # still exist as components and have more than 0 bugs
304
305 for full_comp in self.CONFLATED_COMPONENTS_MAPPING.values():
306 if full_comp not in bugs_number:
307 logger.warning(
308 "%s from conflated component mapping doesn't exists, failure",
309 full_comp,
310 )
311 success = False
312 elif bugs_number[full_comp] <= 0:
313 logger.warning(
314 "%s from conflated component mapping have less than 1 bug, failure",
315 full_comp,
316 )
317 success = False
318
319 # Check number 4, conflated components in CONFLATED_COMPONENTS either
320 # exist as components or are in CONFLATED_COMPONENTS_MAPPING
321
322 for conflated_component in self.CONFLATED_COMPONENTS:
323 in_mapping = conflated_component in self.CONFLATED_COMPONENTS_MAPPING
324
325 matching_components = [
326 full_comp
327 for full_comp in bugs_number
328 if full_comp.startswith(conflated_component)
329 ]
330
331 if not (matching_components or in_mapping):
332 logger.warning("It should be possible to map %s", conflated_component)
333 success = False
334 continue
335
336 # Check number 5, there is no component with many bugs that is not in
337 # meaningful_product_components
338
339 # Recompute the meaningful components
340
341 def generate_meaningful_tuples():
342 for full_comp, count in bugs_number.items():
343 product, component = full_comp.split("::", 1)
344
345 if not self.is_meaningful(product, component):
346 continue
347
348 if count > 0:
349 for i in range(count):
350 yield (product, component)
351
352 meaningful_product_components = self.get_meaningful_product_components(
353 generate_meaningful_tuples(), threshold_ratio=10
354 )
355
356 if not meaningful_product_components.issubset(
357 self.meaningful_product_components
358 ):
359 logger.warning("Meaningful product components mismatch")
360
361 new_meaningful_product_components = (
362 meaningful_product_components.difference(
363 self.meaningful_product_components
364 )
365 )
366 logger.info(
367 "New meaningful product components %r",
368 new_meaningful_product_components,
369 )
370
371 success = False
372
373 return success
374
375 def get_extra_data(self):
376 return {"conflated_components_mapping": self.CONFLATED_COMPONENTS_MAPPING}
377
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bugbug/models/component.py b/bugbug/models/component.py
--- a/bugbug/models/component.py
+++ b/bugbug/models/component.py
@@ -27,6 +27,7 @@
"Core",
"External Software Affecting Firefox",
"DevTools",
+ "Fenix",
"Firefox",
"Toolkit",
"WebExtensions",
@@ -52,6 +53,7 @@
"External Software Affecting Firefox",
"WebExtensions",
"Firefox Build System",
+ "Fenix",
]
CONFLATED_COMPONENTS_MAPPING = {
@@ -62,6 +64,7 @@
"External Software Affecting Firefox": "External Software Affecting Firefox::Other",
"WebExtensions": "WebExtensions::Untriaged",
"Firefox Build System": "Firefox Build System::General",
+ "Fenix": "Fenix::General",
}
def __init__(self, lemmatization=False):
| {"golden_diff": "diff --git a/bugbug/models/component.py b/bugbug/models/component.py\n--- a/bugbug/models/component.py\n+++ b/bugbug/models/component.py\n@@ -27,6 +27,7 @@\n \"Core\",\n \"External Software Affecting Firefox\",\n \"DevTools\",\n+ \"Fenix\",\n \"Firefox\",\n \"Toolkit\",\n \"WebExtensions\",\n@@ -52,6 +53,7 @@\n \"External Software Affecting Firefox\",\n \"WebExtensions\",\n \"Firefox Build System\",\n+ \"Fenix\",\n ]\n \n CONFLATED_COMPONENTS_MAPPING = {\n@@ -62,6 +64,7 @@\n \"External Software Affecting Firefox\": \"External Software Affecting Firefox::Other\",\n \"WebExtensions\": \"WebExtensions::Untriaged\",\n \"Firefox Build System\": \"Firefox Build System::General\",\n+ \"Fenix\": \"Fenix::General\",\n }\n \n def __init__(self, lemmatization=False):\n", "issue": "Train the component model on bugs from Fenix\nIt is now appropriate to include Fenix in the training dataset since it has been moved to Bugzilla.\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport logging\nfrom collections import Counter\nfrom datetime import datetime, timezone\n\nimport dateutil.parser\nimport xgboost\nfrom dateutil.relativedelta import relativedelta\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.feature_extraction import DictVectorizer\nfrom sklearn.pipeline import Pipeline\n\nfrom bugbug import bug_features, bugzilla, feature_cleanup, utils\nfrom bugbug.bugzilla import get_product_component_count\nfrom bugbug.model import BugModel\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n\nclass ComponentModel(BugModel):\n PRODUCTS = {\n \"Core\",\n \"External Software Affecting Firefox\",\n \"DevTools\",\n \"Firefox\",\n \"Toolkit\",\n \"WebExtensions\",\n \"Firefox Build System\",\n }\n\n PRODUCT_COMPONENTS = {\n \"Cloud Services\": {\"Server: Firefox Accounts\"},\n }\n\n CONFLATED_COMPONENTS = [\n \"Core::Audio/Video\",\n \"Core::DOM\",\n \"Core::Graphics\",\n \"Core::IPC\",\n \"Core::JavaScript\",\n \"Core::Layout\",\n \"Core::Networking\",\n \"Core::Print\",\n \"Core::WebRTC\",\n \"Toolkit::Password Manager\",\n \"DevTools\",\n \"External Software Affecting Firefox\",\n \"WebExtensions\",\n \"Firefox Build System\",\n ]\n\n CONFLATED_COMPONENTS_MAPPING = {\n \"Core::DOM\": \"Core::DOM: Core & HTML\",\n \"Core::JavaScript\": \"Core::JavaScript Engine\",\n \"Core::Print\": \"Core::Printing: Output\",\n \"DevTools\": \"DevTools::General\",\n \"External Software Affecting Firefox\": \"External Software Affecting Firefox::Other\",\n \"WebExtensions\": \"WebExtensions::Untriaged\",\n \"Firefox Build System\": \"Firefox Build System::General\",\n }\n\n def __init__(self, lemmatization=False):\n BugModel.__init__(self, lemmatization)\n\n self.cross_validation_enabled = False\n self.calculate_importance = False\n\n feature_extractors = [\n bug_features.HasSTR(),\n bug_features.Severity(),\n bug_features.Keywords(),\n bug_features.HasCrashSignature(),\n bug_features.HasURL(),\n bug_features.HasW3CURL(),\n bug_features.HasGithubURL(),\n bug_features.Whiteboard(),\n bug_features.Patches(),\n bug_features.Landings(),\n ]\n\n cleanup_functions = [\n feature_cleanup.fileref(),\n feature_cleanup.url(),\n feature_cleanup.synonyms(),\n ]\n\n self.extraction_pipeline = Pipeline(\n [\n (\n \"bug_extractor\",\n bug_features.BugExtractor(\n feature_extractors, cleanup_functions, rollback=True\n ),\n ),\n ]\n )\n\n self.clf = Pipeline(\n [\n (\n \"union\",\n ColumnTransformer(\n [\n (\"data\", DictVectorizer(), \"data\"),\n (\"title\", self.text_vectorizer(min_df=0.0001), \"title\"),\n (\n \"comments\",\n self.text_vectorizer(min_df=0.0001),\n \"comments\",\n ),\n ]\n ),\n ),\n (\n \"estimator\",\n xgboost.XGBClassifier(n_jobs=utils.get_physical_cpu_count()),\n ),\n ]\n )\n\n self.CONFLATED_COMPONENTS_INVERSE_MAPPING = {\n v: k for k, v in self.CONFLATED_COMPONENTS_MAPPING.items()\n }\n\n def filter_component(self, product, component):\n full_comp = f\"{product}::{component}\"\n\n if full_comp in self.CONFLATED_COMPONENTS_INVERSE_MAPPING:\n return self.CONFLATED_COMPONENTS_INVERSE_MAPPING[full_comp]\n\n if (product, component) in self.meaningful_product_components:\n return full_comp\n\n for conflated_component in self.CONFLATED_COMPONENTS:\n if full_comp.startswith(conflated_component):\n return conflated_component\n\n return None\n\n def get_labels(self):\n product_components = {}\n for bug_data in bugzilla.get_bugs():\n if dateutil.parser.parse(bug_data[\"creation_time\"]) < datetime.now(\n timezone.utc\n ) - relativedelta(years=2):\n continue\n\n product_components[bug_data[\"id\"]] = (\n bug_data[\"product\"],\n bug_data[\"component\"],\n )\n\n self.meaningful_product_components = self.get_meaningful_product_components(\n (\n (product, component)\n for product, component in product_components.values()\n if self.is_meaningful(product, component)\n )\n )\n\n classes = {}\n for bug_id, (product, component) in product_components.items():\n component = self.filter_component(product, component)\n\n if component:\n classes[bug_id] = component\n\n component_counts = Counter(classes.values()).most_common()\n top_components = set(component for component, count in component_counts)\n\n logger.info(\"%d components\", len(top_components))\n for component, count in component_counts:\n logger.info(\"%s: %d\", component, count)\n\n # Assert there is at least one bug for each conflated component.\n for conflated_component in self.CONFLATED_COMPONENTS:\n assert any(\n conflated_component == component\n for component, count in component_counts\n ), f\"There should be at least one bug matching {conflated_component}*\"\n\n # Assert there is at least one bug for each component the conflated components are mapped to.\n for conflated_component_mapping in self.CONFLATED_COMPONENTS_MAPPING.values():\n assert any(\n conflated_component_mapping == f\"{product}::{component}\"\n for product, component in product_components.values()\n ), f\"There should be at least one bug in {conflated_component_mapping}\"\n\n # Assert all conflated components are either in conflated_components_mapping or exist as components.\n for conflated_component in self.CONFLATED_COMPONENTS:\n assert conflated_component in self.CONFLATED_COMPONENTS_MAPPING or any(\n conflated_component == f\"{product}::{component}\"\n for product, component in product_components.values()\n ), f\"It should be possible to map {conflated_component}\"\n\n classes = {\n bug_id: component\n for bug_id, component in classes.items()\n if component in top_components\n }\n\n return classes, set(classes.values())\n\n def is_meaningful(self, product, component):\n return (\n product in self.PRODUCTS\n and component not in [\"General\", \"Untriaged\", \"Foxfooding\"]\n ) or (\n product in self.PRODUCT_COMPONENTS\n and component in self.PRODUCT_COMPONENTS[product]\n )\n\n def get_meaningful_product_components(self, full_comp_tuples, threshold_ratio=100):\n \"\"\"Filter out components which does not have more than 1% of the most common component.\n\n Returns:\n a set of tuples which have at least 1% of the most common tuple\n \"\"\"\n product_component_counts = Counter(full_comp_tuples).most_common()\n\n max_count = product_component_counts[0][1]\n threshold = max_count / threshold_ratio\n\n active_product_components = bugzilla.get_active_product_components(\n list(self.PRODUCTS) + list(self.PRODUCT_COMPONENTS)\n )\n\n return set(\n product_component\n for product_component, count in product_component_counts\n if count > threshold and product_component in active_product_components\n )\n\n def get_feature_names(self):\n return self.clf.named_steps[\"union\"].get_feature_names_out()\n\n def check(self):\n success = super().check()\n\n # Get the number of bugs per full component to fasten up the check\n bugs_number = get_product_component_count()\n\n # Check number 1, check that the most meaningful product components\n # still have at least a bug in this component. If the check is failing\n # that could mean that:\n # - A component has been renamed / removed\n # - A component is not used anymore by developers\n\n for product, component in self.meaningful_product_components:\n full_comp = f\"{product}::{component}\"\n\n if full_comp not in bugs_number.keys():\n logger.warning(\n \"Component %r of product %r doesn't exists, failure\",\n component,\n product,\n )\n success = False\n\n elif bugs_number[full_comp] <= 0:\n logger.warning(\n \"Component %r of product %r have 0 bugs or less in it, failure\",\n component,\n product,\n )\n success = False\n\n # Check number 2, check that conflated components in\n # CONFLATED_COMPONENTS match at least one component which has more\n # than 0 bugs\n\n for conflated_component in self.CONFLATED_COMPONENTS:\n matching_components = [\n full_comp\n for full_comp in bugs_number\n if full_comp.startswith(conflated_component)\n ]\n\n if not matching_components:\n logger.warning(\"%s doesn't match any component\", conflated_component)\n success = False\n continue\n\n matching_components_values = [\n bugs_number[full_comp]\n for full_comp in matching_components\n if bugs_number[full_comp] > 0\n ]\n\n if not matching_components_values:\n logger.warning(\n \"%s should match at least one component with more than 0 bugs\",\n conflated_component,\n )\n success = False\n\n # Check number 3, check that values of CONFLATED_COMPONENTS_MAPPING\n # still exist as components and have more than 0 bugs\n\n for full_comp in self.CONFLATED_COMPONENTS_MAPPING.values():\n if full_comp not in bugs_number:\n logger.warning(\n \"%s from conflated component mapping doesn't exists, failure\",\n full_comp,\n )\n success = False\n elif bugs_number[full_comp] <= 0:\n logger.warning(\n \"%s from conflated component mapping have less than 1 bug, failure\",\n full_comp,\n )\n success = False\n\n # Check number 4, conflated components in CONFLATED_COMPONENTS either\n # exist as components or are in CONFLATED_COMPONENTS_MAPPING\n\n for conflated_component in self.CONFLATED_COMPONENTS:\n in_mapping = conflated_component in self.CONFLATED_COMPONENTS_MAPPING\n\n matching_components = [\n full_comp\n for full_comp in bugs_number\n if full_comp.startswith(conflated_component)\n ]\n\n if not (matching_components or in_mapping):\n logger.warning(\"It should be possible to map %s\", conflated_component)\n success = False\n continue\n\n # Check number 5, there is no component with many bugs that is not in\n # meaningful_product_components\n\n # Recompute the meaningful components\n\n def generate_meaningful_tuples():\n for full_comp, count in bugs_number.items():\n product, component = full_comp.split(\"::\", 1)\n\n if not self.is_meaningful(product, component):\n continue\n\n if count > 0:\n for i in range(count):\n yield (product, component)\n\n meaningful_product_components = self.get_meaningful_product_components(\n generate_meaningful_tuples(), threshold_ratio=10\n )\n\n if not meaningful_product_components.issubset(\n self.meaningful_product_components\n ):\n logger.warning(\"Meaningful product components mismatch\")\n\n new_meaningful_product_components = (\n meaningful_product_components.difference(\n self.meaningful_product_components\n )\n )\n logger.info(\n \"New meaningful product components %r\",\n new_meaningful_product_components,\n )\n\n success = False\n\n return success\n\n def get_extra_data(self):\n return {\"conflated_components_mapping\": self.CONFLATED_COMPONENTS_MAPPING}\n", "path": "bugbug/models/component.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport logging\nfrom collections import Counter\nfrom datetime import datetime, timezone\n\nimport dateutil.parser\nimport xgboost\nfrom dateutil.relativedelta import relativedelta\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.feature_extraction import DictVectorizer\nfrom sklearn.pipeline import Pipeline\n\nfrom bugbug import bug_features, bugzilla, feature_cleanup, utils\nfrom bugbug.bugzilla import get_product_component_count\nfrom bugbug.model import BugModel\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n\nclass ComponentModel(BugModel):\n PRODUCTS = {\n \"Core\",\n \"External Software Affecting Firefox\",\n \"DevTools\",\n \"Fenix\",\n \"Firefox\",\n \"Toolkit\",\n \"WebExtensions\",\n \"Firefox Build System\",\n }\n\n PRODUCT_COMPONENTS = {\n \"Cloud Services\": {\"Server: Firefox Accounts\"},\n }\n\n CONFLATED_COMPONENTS = [\n \"Core::Audio/Video\",\n \"Core::DOM\",\n \"Core::Graphics\",\n \"Core::IPC\",\n \"Core::JavaScript\",\n \"Core::Layout\",\n \"Core::Networking\",\n \"Core::Print\",\n \"Core::WebRTC\",\n \"Toolkit::Password Manager\",\n \"DevTools\",\n \"External Software Affecting Firefox\",\n \"WebExtensions\",\n \"Firefox Build System\",\n \"Fenix\",\n ]\n\n CONFLATED_COMPONENTS_MAPPING = {\n \"Core::DOM\": \"Core::DOM: Core & HTML\",\n \"Core::JavaScript\": \"Core::JavaScript Engine\",\n \"Core::Print\": \"Core::Printing: Output\",\n \"DevTools\": \"DevTools::General\",\n \"External Software Affecting Firefox\": \"External Software Affecting Firefox::Other\",\n \"WebExtensions\": \"WebExtensions::Untriaged\",\n \"Firefox Build System\": \"Firefox Build System::General\",\n \"Fenix\": \"Fenix::General\",\n }\n\n def __init__(self, lemmatization=False):\n BugModel.__init__(self, lemmatization)\n\n self.cross_validation_enabled = False\n self.calculate_importance = False\n\n feature_extractors = [\n bug_features.HasSTR(),\n bug_features.Severity(),\n bug_features.Keywords(),\n bug_features.HasCrashSignature(),\n bug_features.HasURL(),\n bug_features.HasW3CURL(),\n bug_features.HasGithubURL(),\n bug_features.Whiteboard(),\n bug_features.Patches(),\n bug_features.Landings(),\n ]\n\n cleanup_functions = [\n feature_cleanup.fileref(),\n feature_cleanup.url(),\n feature_cleanup.synonyms(),\n ]\n\n self.extraction_pipeline = Pipeline(\n [\n (\n \"bug_extractor\",\n bug_features.BugExtractor(\n feature_extractors, cleanup_functions, rollback=True\n ),\n ),\n ]\n )\n\n self.clf = Pipeline(\n [\n (\n \"union\",\n ColumnTransformer(\n [\n (\"data\", DictVectorizer(), \"data\"),\n (\"title\", self.text_vectorizer(min_df=0.0001), \"title\"),\n (\n \"comments\",\n self.text_vectorizer(min_df=0.0001),\n \"comments\",\n ),\n ]\n ),\n ),\n (\n \"estimator\",\n xgboost.XGBClassifier(n_jobs=utils.get_physical_cpu_count()),\n ),\n ]\n )\n\n self.CONFLATED_COMPONENTS_INVERSE_MAPPING = {\n v: k for k, v in self.CONFLATED_COMPONENTS_MAPPING.items()\n }\n\n def filter_component(self, product, component):\n full_comp = f\"{product}::{component}\"\n\n if full_comp in self.CONFLATED_COMPONENTS_INVERSE_MAPPING:\n return self.CONFLATED_COMPONENTS_INVERSE_MAPPING[full_comp]\n\n if (product, component) in self.meaningful_product_components:\n return full_comp\n\n for conflated_component in self.CONFLATED_COMPONENTS:\n if full_comp.startswith(conflated_component):\n return conflated_component\n\n return None\n\n def get_labels(self):\n product_components = {}\n for bug_data in bugzilla.get_bugs():\n if dateutil.parser.parse(bug_data[\"creation_time\"]) < datetime.now(\n timezone.utc\n ) - relativedelta(years=2):\n continue\n\n product_components[bug_data[\"id\"]] = (\n bug_data[\"product\"],\n bug_data[\"component\"],\n )\n\n self.meaningful_product_components = self.get_meaningful_product_components(\n (\n (product, component)\n for product, component in product_components.values()\n if self.is_meaningful(product, component)\n )\n )\n\n classes = {}\n for bug_id, (product, component) in product_components.items():\n component = self.filter_component(product, component)\n\n if component:\n classes[bug_id] = component\n\n component_counts = Counter(classes.values()).most_common()\n top_components = set(component for component, count in component_counts)\n\n logger.info(\"%d components\", len(top_components))\n for component, count in component_counts:\n logger.info(\"%s: %d\", component, count)\n\n # Assert there is at least one bug for each conflated component.\n for conflated_component in self.CONFLATED_COMPONENTS:\n assert any(\n conflated_component == component\n for component, count in component_counts\n ), f\"There should be at least one bug matching {conflated_component}*\"\n\n # Assert there is at least one bug for each component the conflated components are mapped to.\n for conflated_component_mapping in self.CONFLATED_COMPONENTS_MAPPING.values():\n assert any(\n conflated_component_mapping == f\"{product}::{component}\"\n for product, component in product_components.values()\n ), f\"There should be at least one bug in {conflated_component_mapping}\"\n\n # Assert all conflated components are either in conflated_components_mapping or exist as components.\n for conflated_component in self.CONFLATED_COMPONENTS:\n assert conflated_component in self.CONFLATED_COMPONENTS_MAPPING or any(\n conflated_component == f\"{product}::{component}\"\n for product, component in product_components.values()\n ), f\"It should be possible to map {conflated_component}\"\n\n classes = {\n bug_id: component\n for bug_id, component in classes.items()\n if component in top_components\n }\n\n return classes, set(classes.values())\n\n def is_meaningful(self, product, component):\n return (\n product in self.PRODUCTS\n and component not in [\"General\", \"Untriaged\", \"Foxfooding\"]\n ) or (\n product in self.PRODUCT_COMPONENTS\n and component in self.PRODUCT_COMPONENTS[product]\n )\n\n def get_meaningful_product_components(self, full_comp_tuples, threshold_ratio=100):\n \"\"\"Filter out components which does not have more than 1% of the most common component.\n\n Returns:\n a set of tuples which have at least 1% of the most common tuple\n \"\"\"\n product_component_counts = Counter(full_comp_tuples).most_common()\n\n max_count = product_component_counts[0][1]\n threshold = max_count / threshold_ratio\n\n active_product_components = bugzilla.get_active_product_components(\n list(self.PRODUCTS) + list(self.PRODUCT_COMPONENTS)\n )\n\n return set(\n product_component\n for product_component, count in product_component_counts\n if count > threshold and product_component in active_product_components\n )\n\n def get_feature_names(self):\n return self.clf.named_steps[\"union\"].get_feature_names_out()\n\n def check(self):\n success = super().check()\n\n # Get the number of bugs per full component to fasten up the check\n bugs_number = get_product_component_count()\n\n # Check number 1, check that the most meaningful product components\n # still have at least a bug in this component. If the check is failing\n # that could mean that:\n # - A component has been renamed / removed\n # - A component is not used anymore by developers\n\n for product, component in self.meaningful_product_components:\n full_comp = f\"{product}::{component}\"\n\n if full_comp not in bugs_number.keys():\n logger.warning(\n \"Component %r of product %r doesn't exists, failure\",\n component,\n product,\n )\n success = False\n\n elif bugs_number[full_comp] <= 0:\n logger.warning(\n \"Component %r of product %r have 0 bugs or less in it, failure\",\n component,\n product,\n )\n success = False\n\n # Check number 2, check that conflated components in\n # CONFLATED_COMPONENTS match at least one component which has more\n # than 0 bugs\n\n for conflated_component in self.CONFLATED_COMPONENTS:\n matching_components = [\n full_comp\n for full_comp in bugs_number\n if full_comp.startswith(conflated_component)\n ]\n\n if not matching_components:\n logger.warning(\"%s doesn't match any component\", conflated_component)\n success = False\n continue\n\n matching_components_values = [\n bugs_number[full_comp]\n for full_comp in matching_components\n if bugs_number[full_comp] > 0\n ]\n\n if not matching_components_values:\n logger.warning(\n \"%s should match at least one component with more than 0 bugs\",\n conflated_component,\n )\n success = False\n\n # Check number 3, check that values of CONFLATED_COMPONENTS_MAPPING\n # still exist as components and have more than 0 bugs\n\n for full_comp in self.CONFLATED_COMPONENTS_MAPPING.values():\n if full_comp not in bugs_number:\n logger.warning(\n \"%s from conflated component mapping doesn't exists, failure\",\n full_comp,\n )\n success = False\n elif bugs_number[full_comp] <= 0:\n logger.warning(\n \"%s from conflated component mapping have less than 1 bug, failure\",\n full_comp,\n )\n success = False\n\n # Check number 4, conflated components in CONFLATED_COMPONENTS either\n # exist as components or are in CONFLATED_COMPONENTS_MAPPING\n\n for conflated_component in self.CONFLATED_COMPONENTS:\n in_mapping = conflated_component in self.CONFLATED_COMPONENTS_MAPPING\n\n matching_components = [\n full_comp\n for full_comp in bugs_number\n if full_comp.startswith(conflated_component)\n ]\n\n if not (matching_components or in_mapping):\n logger.warning(\"It should be possible to map %s\", conflated_component)\n success = False\n continue\n\n # Check number 5, there is no component with many bugs that is not in\n # meaningful_product_components\n\n # Recompute the meaningful components\n\n def generate_meaningful_tuples():\n for full_comp, count in bugs_number.items():\n product, component = full_comp.split(\"::\", 1)\n\n if not self.is_meaningful(product, component):\n continue\n\n if count > 0:\n for i in range(count):\n yield (product, component)\n\n meaningful_product_components = self.get_meaningful_product_components(\n generate_meaningful_tuples(), threshold_ratio=10\n )\n\n if not meaningful_product_components.issubset(\n self.meaningful_product_components\n ):\n logger.warning(\"Meaningful product components mismatch\")\n\n new_meaningful_product_components = (\n meaningful_product_components.difference(\n self.meaningful_product_components\n )\n )\n logger.info(\n \"New meaningful product components %r\",\n new_meaningful_product_components,\n )\n\n success = False\n\n return success\n\n def get_extra_data(self):\n return {\"conflated_components_mapping\": self.CONFLATED_COMPONENTS_MAPPING}\n", "path": "bugbug/models/component.py"}]} | 3,910 | 224 |
gh_patches_debug_27135 | rasdani/github-patches | git_diff | encode__starlette-350 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Async executor issue / Event loop is closed
Hi!
I'm testing asynchronous GraphQL resolvers using the approach described here: https://www.starlette.io/graphql/
When I run the example as is with a simple uvicorn setup, I get the following error:
```
ERROR: Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/graphql/execution/executor.py", line 447, in resolve_or_error
return executor.execute(resolve_fn, source, info, **args)
File "/usr/local/lib/python3.7/site-packages/graphql/execution/executors/asyncio.py", line 71, in execute
future = ensure_future(result, loop=self.loop)
File "/usr/local/lib/python3.7/asyncio/tasks.py", line 581, in ensure_future
task = loop.create_task(coro_or_future)
File "/usr/local/lib/python3.7/asyncio/base_events.py", line 392, in create_task
self._check_closed()
File "/usr/local/lib/python3.7/asyncio/base_events.py", line 469, in _check_closed
raise RuntimeError('Event loop is closed')
```
My understanding is that `uvicorn.run()` calls `uvloop_setup()`, which closes the default asyncio event loop before recreating one with the uvloop policy. Meanwhile, the `AsyncioExecutor` instance passed to the GraphQL app has already called `asyncio.get_event_loop()`, and stores the default asyncio event loop that is about to be closed.
See the problematic code below, as well as my workaround in the commented code: if I create the loop myself, pass it to the `AsyncioExecutor` instance, and to `uvicorn.run()`, my async resolver works.
Not sure if this is a bug, a documentation issue, or if I am missing something ;)
Let me know if I can help!
```python
from starlette.applications import Starlette
from uvicorn.loops.uvloop import uvloop_setup
from starlette.graphql import GraphQLApp
from graphql.execution.executors.asyncio import AsyncioExecutor
import graphene
import uvicorn
app = Starlette()
# loop = uvloop_setup()
class Query(graphene.ObjectType):
hello = graphene.String(name=graphene.String(default_value="stranger"))
async def resolve_hello(self, info, name):
# We can make asynchronous network calls here.
return "Hello " + name
# app.add_route('/', GraphQLApp(schema=graphene.Schema(query=Query), executor=AsyncioExecutor(loop)))
app.add_route('/', GraphQLApp(schema=graphene.Schema(query=Query), executor=AsyncioExecutor()))
if __name__ == '__main__':
# uvicorn.run(app, loop=loop, host='0.0.0.0', port=8000)
uvicorn.run(app, host='0.0.0.0', port=8000)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `starlette/graphql.py`
Content:
```
1 import functools
2 import json
3 import typing
4
5 from starlette import status
6 from starlette.background import BackgroundTasks
7 from starlette.concurrency import run_in_threadpool
8 from starlette.requests import Request
9 from starlette.responses import HTMLResponse, JSONResponse, PlainTextResponse, Response
10 from starlette.types import ASGIInstance, Receive, Scope, Send
11
12 try:
13 import graphene
14 from graphql.execution.executors.asyncio import AsyncioExecutor
15 from graphql.error import format_error as format_graphql_error
16 from graphql.error import GraphQLError
17 except ImportError: # pragma: nocover
18 graphene = None # type: ignore
19 AsyncioExecutor = None # type: ignore
20 format_graphql_error = None # type: ignore
21 GraphQLError = None # type: ignore
22
23
24 class GraphQLApp:
25 def __init__(self, schema: "graphene.Schema", executor: typing.Any = None) -> None:
26 self.schema = schema
27 self.executor = executor
28 self.is_async = isinstance(executor, AsyncioExecutor)
29
30 def __call__(self, scope: Scope) -> ASGIInstance:
31 return functools.partial(self.asgi, scope=scope)
32
33 async def asgi(self, receive: Receive, send: Send, scope: Scope) -> None:
34 request = Request(scope, receive=receive)
35 response = await self.handle_graphql(request)
36 await response(receive, send)
37
38 async def handle_graphql(self, request: Request) -> Response:
39 if request.method in ("GET", "HEAD"):
40 if "text/html" in request.headers.get("Accept", ""):
41 return await self.handle_graphiql(request)
42
43 data = request.query_params # type: typing.Mapping[str, typing.Any]
44
45 elif request.method == "POST":
46 content_type = request.headers.get("Content-Type", "")
47
48 if "application/json" in content_type:
49 data = await request.json()
50 elif "application/graphql" in content_type:
51 body = await request.body()
52 text = body.decode()
53 data = {"query": text}
54 elif "query" in request.query_params:
55 data = request.query_params
56 else:
57 return PlainTextResponse(
58 "Unsupported Media Type",
59 status_code=status.HTTP_415_UNSUPPORTED_MEDIA_TYPE,
60 )
61
62 else:
63 return PlainTextResponse(
64 "Method Not Allowed", status_code=status.HTTP_405_METHOD_NOT_ALLOWED
65 )
66
67 try:
68 query = data["query"]
69 variables = data.get("variables")
70 operation_name = data.get("operationName")
71 except KeyError:
72 return PlainTextResponse(
73 "No GraphQL query found in the request",
74 status_code=status.HTTP_400_BAD_REQUEST,
75 )
76
77 background = BackgroundTasks()
78 context = {"request": request, "background": background}
79
80 result = await self.execute(
81 query, variables=variables, context=context, operation_name=operation_name
82 )
83 error_data = (
84 [format_graphql_error(err) for err in result.errors]
85 if result.errors
86 else None
87 )
88 response_data = {"data": result.data, "errors": error_data}
89 status_code = (
90 status.HTTP_400_BAD_REQUEST if result.errors else status.HTTP_200_OK
91 )
92
93 return JSONResponse(
94 response_data, status_code=status_code, background=background
95 )
96
97 async def execute( # type: ignore
98 self, query, variables=None, context=None, operation_name=None
99 ):
100 if self.is_async:
101 return await self.schema.execute(
102 query,
103 variables=variables,
104 operation_name=operation_name,
105 executor=self.executor,
106 return_promise=True,
107 context=context,
108 )
109 else:
110 return await run_in_threadpool(
111 self.schema.execute,
112 query,
113 variables=variables,
114 operation_name=operation_name,
115 context=context,
116 )
117
118 async def handle_graphiql(self, request: Request) -> Response:
119 text = GRAPHIQL.replace("{{REQUEST_PATH}}", json.dumps(request.url.path))
120 return HTMLResponse(text)
121
122
123 GRAPHIQL = """
124 <!--
125 * Copyright (c) Facebook, Inc.
126 * All rights reserved.
127 *
128 * This source code is licensed under the license found in the
129 * LICENSE file in the root directory of this source tree.
130 -->
131 <!DOCTYPE html>
132 <html>
133 <head>
134 <style>
135 body {
136 height: 100%;
137 margin: 0;
138 width: 100%;
139 overflow: hidden;
140 }
141 #graphiql {
142 height: 100vh;
143 }
144 </style>
145 <!--
146 This GraphiQL example depends on Promise and fetch, which are available in
147 modern browsers, but can be "polyfilled" for older browsers.
148 GraphiQL itself depends on React DOM.
149 If you do not want to rely on a CDN, you can host these files locally or
150 include them directly in your favored resource bunder.
151 -->
152 <link href="//cdn.jsdelivr.net/npm/[email protected]/graphiql.css" rel="stylesheet"/>
153 <script src="//cdn.jsdelivr.net/npm/[email protected]/fetch.min.js"></script>
154 <script src="//cdn.jsdelivr.net/npm/[email protected]/umd/react.production.min.js"></script>
155 <script src="//cdn.jsdelivr.net/npm/[email protected]/umd/react-dom.production.min.js"></script>
156 <script src="//cdn.jsdelivr.net/npm/[email protected]/graphiql.min.js"></script>
157 </head>
158 <body>
159 <div id="graphiql">Loading...</div>
160 <script>
161 /**
162 * This GraphiQL example illustrates how to use some of GraphiQL's props
163 * in order to enable reading and updating the URL parameters, making
164 * link sharing of queries a little bit easier.
165 *
166 * This is only one example of this kind of feature, GraphiQL exposes
167 * various React params to enable interesting integrations.
168 */
169 // Parse the search string to get url parameters.
170 var search = window.location.search;
171 var parameters = {};
172 search.substr(1).split('&').forEach(function (entry) {
173 var eq = entry.indexOf('=');
174 if (eq >= 0) {
175 parameters[decodeURIComponent(entry.slice(0, eq))] =
176 decodeURIComponent(entry.slice(eq + 1));
177 }
178 });
179 // if variables was provided, try to format it.
180 if (parameters.variables) {
181 try {
182 parameters.variables =
183 JSON.stringify(JSON.parse(parameters.variables), null, 2);
184 } catch (e) {
185 // Do nothing, we want to display the invalid JSON as a string, rather
186 // than present an error.
187 }
188 }
189 // When the query and variables string is edited, update the URL bar so
190 // that it can be easily shared
191 function onEditQuery(newQuery) {
192 parameters.query = newQuery;
193 updateURL();
194 }
195 function onEditVariables(newVariables) {
196 parameters.variables = newVariables;
197 updateURL();
198 }
199 function onEditOperationName(newOperationName) {
200 parameters.operationName = newOperationName;
201 updateURL();
202 }
203 function updateURL() {
204 var newSearch = '?' + Object.keys(parameters).filter(function (key) {
205 return Boolean(parameters[key]);
206 }).map(function (key) {
207 return encodeURIComponent(key) + '=' +
208 encodeURIComponent(parameters[key]);
209 }).join('&');
210 history.replaceState(null, null, newSearch);
211 }
212 // Defines a GraphQL fetcher using the fetch API. You're not required to
213 // use fetch, and could instead implement graphQLFetcher however you like,
214 // as long as it returns a Promise or Observable.
215 function graphQLFetcher(graphQLParams) {
216 // This example expects a GraphQL server at the path /graphql.
217 // Change this to point wherever you host your GraphQL server.
218 return fetch({{REQUEST_PATH}}, {
219 method: 'post',
220 headers: {
221 'Accept': 'application/json',
222 'Content-Type': 'application/json',
223 },
224 body: JSON.stringify(graphQLParams),
225 credentials: 'include',
226 }).then(function (response) {
227 return response.text();
228 }).then(function (responseBody) {
229 try {
230 return JSON.parse(responseBody);
231 } catch (error) {
232 return responseBody;
233 }
234 });
235 }
236 // Render <GraphiQL /> into the body.
237 // See the README in the top level of this module to learn more about
238 // how you can customize GraphiQL by providing different values or
239 // additional child elements.
240 ReactDOM.render(
241 React.createElement(GraphiQL, {
242 fetcher: graphQLFetcher,
243 query: parameters.query,
244 variables: parameters.variables,
245 operationName: parameters.operationName,
246 onEditQuery: onEditQuery,
247 onEditVariables: onEditVariables,
248 onEditOperationName: onEditOperationName
249 }),
250 document.getElementById('graphiql')
251 );
252 </script>
253 </body>
254 </html>
255 """
256
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/starlette/graphql.py b/starlette/graphql.py
--- a/starlette/graphql.py
+++ b/starlette/graphql.py
@@ -22,15 +22,35 @@
class GraphQLApp:
- def __init__(self, schema: "graphene.Schema", executor: typing.Any = None) -> None:
+ def __init__(
+ self,
+ schema: "graphene.Schema",
+ executor: typing.Any = None,
+ executor_class: type = None,
+ ) -> None:
self.schema = schema
- self.executor = executor
- self.is_async = isinstance(executor, AsyncioExecutor)
+ if executor is None:
+ # New style in 0.10.0. Use 'executor_class'.
+ # See issue https://github.com/encode/starlette/issues/242
+ self.executor = executor
+ self.executor_class = executor_class
+ self.is_async = executor_class is not None and issubclass(
+ executor_class, AsyncioExecutor
+ )
+ else:
+ # Old style. Use 'executor'.
+ # We should remove this in the next median/major version bump.
+ self.executor = executor
+ self.executor_class = None
+ self.is_async = isinstance(executor, AsyncioExecutor)
def __call__(self, scope: Scope) -> ASGIInstance:
return functools.partial(self.asgi, scope=scope)
async def asgi(self, receive: Receive, send: Send, scope: Scope) -> None:
+ if self.executor is None and self.executor_class is not None:
+ self.executor = self.executor_class()
+
request = Request(scope, receive=receive)
response = await self.handle_graphql(request)
await response(receive, send)
| {"golden_diff": "diff --git a/starlette/graphql.py b/starlette/graphql.py\n--- a/starlette/graphql.py\n+++ b/starlette/graphql.py\n@@ -22,15 +22,35 @@\n \n \n class GraphQLApp:\n- def __init__(self, schema: \"graphene.Schema\", executor: typing.Any = None) -> None:\n+ def __init__(\n+ self,\n+ schema: \"graphene.Schema\",\n+ executor: typing.Any = None,\n+ executor_class: type = None,\n+ ) -> None:\n self.schema = schema\n- self.executor = executor\n- self.is_async = isinstance(executor, AsyncioExecutor)\n+ if executor is None:\n+ # New style in 0.10.0. Use 'executor_class'.\n+ # See issue https://github.com/encode/starlette/issues/242\n+ self.executor = executor\n+ self.executor_class = executor_class\n+ self.is_async = executor_class is not None and issubclass(\n+ executor_class, AsyncioExecutor\n+ )\n+ else:\n+ # Old style. Use 'executor'.\n+ # We should remove this in the next median/major version bump.\n+ self.executor = executor\n+ self.executor_class = None\n+ self.is_async = isinstance(executor, AsyncioExecutor)\n \n def __call__(self, scope: Scope) -> ASGIInstance:\n return functools.partial(self.asgi, scope=scope)\n \n async def asgi(self, receive: Receive, send: Send, scope: Scope) -> None:\n+ if self.executor is None and self.executor_class is not None:\n+ self.executor = self.executor_class()\n+\n request = Request(scope, receive=receive)\n response = await self.handle_graphql(request)\n await response(receive, send)\n", "issue": "Async executor issue / Event loop is closed\nHi!\r\n\r\nI'm testing asynchronous GraphQL resolvers using the approach described here: https://www.starlette.io/graphql/\r\n\r\nWhen I run the example as is with a simple uvicorn setup, I get the following error:\r\n\r\n```\r\nERROR: Traceback (most recent call last):\r\n File \"/usr/local/lib/python3.7/site-packages/graphql/execution/executor.py\", line 447, in resolve_or_error\r\n return executor.execute(resolve_fn, source, info, **args)\r\n File \"/usr/local/lib/python3.7/site-packages/graphql/execution/executors/asyncio.py\", line 71, in execute\r\n future = ensure_future(result, loop=self.loop)\r\n File \"/usr/local/lib/python3.7/asyncio/tasks.py\", line 581, in ensure_future\r\n task = loop.create_task(coro_or_future)\r\n File \"/usr/local/lib/python3.7/asyncio/base_events.py\", line 392, in create_task\r\n self._check_closed()\r\n File \"/usr/local/lib/python3.7/asyncio/base_events.py\", line 469, in _check_closed\r\n raise RuntimeError('Event loop is closed')\r\n```\r\n\r\nMy understanding is that `uvicorn.run()` calls `uvloop_setup()`, which closes the default asyncio event loop before recreating one with the uvloop policy. Meanwhile, the `AsyncioExecutor` instance passed to the GraphQL app has already called `asyncio.get_event_loop()`, and stores the default asyncio event loop that is about to be closed.\r\n\r\nSee the problematic code below, as well as my workaround in the commented code: if I create the loop myself, pass it to the `AsyncioExecutor` instance, and to `uvicorn.run()`, my async resolver works.\r\n\r\nNot sure if this is a bug, a documentation issue, or if I am missing something ;)\r\n\r\nLet me know if I can help!\r\n\r\n```python\r\nfrom starlette.applications import Starlette\r\nfrom uvicorn.loops.uvloop import uvloop_setup\r\nfrom starlette.graphql import GraphQLApp\r\nfrom graphql.execution.executors.asyncio import AsyncioExecutor\r\nimport graphene\r\nimport uvicorn\r\n\r\napp = Starlette()\r\n\r\n\r\n# loop = uvloop_setup()\r\n\r\n\r\nclass Query(graphene.ObjectType):\r\n hello = graphene.String(name=graphene.String(default_value=\"stranger\"))\r\n\r\n async def resolve_hello(self, info, name):\r\n # We can make asynchronous network calls here.\r\n return \"Hello \" + name\r\n\r\n\r\n# app.add_route('/', GraphQLApp(schema=graphene.Schema(query=Query), executor=AsyncioExecutor(loop)))\r\napp.add_route('/', GraphQLApp(schema=graphene.Schema(query=Query), executor=AsyncioExecutor()))\r\n\r\nif __name__ == '__main__':\r\n # uvicorn.run(app, loop=loop, host='0.0.0.0', port=8000)\r\n uvicorn.run(app, host='0.0.0.0', port=8000)\r\n```\n", "before_files": [{"content": "import functools\nimport json\nimport typing\n\nfrom starlette import status\nfrom starlette.background import BackgroundTasks\nfrom starlette.concurrency import run_in_threadpool\nfrom starlette.requests import Request\nfrom starlette.responses import HTMLResponse, JSONResponse, PlainTextResponse, Response\nfrom starlette.types import ASGIInstance, Receive, Scope, Send\n\ntry:\n import graphene\n from graphql.execution.executors.asyncio import AsyncioExecutor\n from graphql.error import format_error as format_graphql_error\n from graphql.error import GraphQLError\nexcept ImportError: # pragma: nocover\n graphene = None # type: ignore\n AsyncioExecutor = None # type: ignore\n format_graphql_error = None # type: ignore\n GraphQLError = None # type: ignore\n\n\nclass GraphQLApp:\n def __init__(self, schema: \"graphene.Schema\", executor: typing.Any = None) -> None:\n self.schema = schema\n self.executor = executor\n self.is_async = isinstance(executor, AsyncioExecutor)\n\n def __call__(self, scope: Scope) -> ASGIInstance:\n return functools.partial(self.asgi, scope=scope)\n\n async def asgi(self, receive: Receive, send: Send, scope: Scope) -> None:\n request = Request(scope, receive=receive)\n response = await self.handle_graphql(request)\n await response(receive, send)\n\n async def handle_graphql(self, request: Request) -> Response:\n if request.method in (\"GET\", \"HEAD\"):\n if \"text/html\" in request.headers.get(\"Accept\", \"\"):\n return await self.handle_graphiql(request)\n\n data = request.query_params # type: typing.Mapping[str, typing.Any]\n\n elif request.method == \"POST\":\n content_type = request.headers.get(\"Content-Type\", \"\")\n\n if \"application/json\" in content_type:\n data = await request.json()\n elif \"application/graphql\" in content_type:\n body = await request.body()\n text = body.decode()\n data = {\"query\": text}\n elif \"query\" in request.query_params:\n data = request.query_params\n else:\n return PlainTextResponse(\n \"Unsupported Media Type\",\n status_code=status.HTTP_415_UNSUPPORTED_MEDIA_TYPE,\n )\n\n else:\n return PlainTextResponse(\n \"Method Not Allowed\", status_code=status.HTTP_405_METHOD_NOT_ALLOWED\n )\n\n try:\n query = data[\"query\"]\n variables = data.get(\"variables\")\n operation_name = data.get(\"operationName\")\n except KeyError:\n return PlainTextResponse(\n \"No GraphQL query found in the request\",\n status_code=status.HTTP_400_BAD_REQUEST,\n )\n\n background = BackgroundTasks()\n context = {\"request\": request, \"background\": background}\n\n result = await self.execute(\n query, variables=variables, context=context, operation_name=operation_name\n )\n error_data = (\n [format_graphql_error(err) for err in result.errors]\n if result.errors\n else None\n )\n response_data = {\"data\": result.data, \"errors\": error_data}\n status_code = (\n status.HTTP_400_BAD_REQUEST if result.errors else status.HTTP_200_OK\n )\n\n return JSONResponse(\n response_data, status_code=status_code, background=background\n )\n\n async def execute( # type: ignore\n self, query, variables=None, context=None, operation_name=None\n ):\n if self.is_async:\n return await self.schema.execute(\n query,\n variables=variables,\n operation_name=operation_name,\n executor=self.executor,\n return_promise=True,\n context=context,\n )\n else:\n return await run_in_threadpool(\n self.schema.execute,\n query,\n variables=variables,\n operation_name=operation_name,\n context=context,\n )\n\n async def handle_graphiql(self, request: Request) -> Response:\n text = GRAPHIQL.replace(\"{{REQUEST_PATH}}\", json.dumps(request.url.path))\n return HTMLResponse(text)\n\n\nGRAPHIQL = \"\"\"\n<!--\n * Copyright (c) Facebook, Inc.\n * All rights reserved.\n *\n * This source code is licensed under the license found in the\n * LICENSE file in the root directory of this source tree.\n-->\n<!DOCTYPE html>\n<html>\n <head>\n <style>\n body {\n height: 100%;\n margin: 0;\n width: 100%;\n overflow: hidden;\n }\n #graphiql {\n height: 100vh;\n }\n </style>\n <!--\n This GraphiQL example depends on Promise and fetch, which are available in\n modern browsers, but can be \"polyfilled\" for older browsers.\n GraphiQL itself depends on React DOM.\n If you do not want to rely on a CDN, you can host these files locally or\n include them directly in your favored resource bunder.\n -->\n <link href=\"//cdn.jsdelivr.net/npm/[email protected]/graphiql.css\" rel=\"stylesheet\"/>\n <script src=\"//cdn.jsdelivr.net/npm/[email protected]/fetch.min.js\"></script>\n <script src=\"//cdn.jsdelivr.net/npm/[email protected]/umd/react.production.min.js\"></script>\n <script src=\"//cdn.jsdelivr.net/npm/[email protected]/umd/react-dom.production.min.js\"></script>\n <script src=\"//cdn.jsdelivr.net/npm/[email protected]/graphiql.min.js\"></script>\n </head>\n <body>\n <div id=\"graphiql\">Loading...</div>\n <script>\n /**\n * This GraphiQL example illustrates how to use some of GraphiQL's props\n * in order to enable reading and updating the URL parameters, making\n * link sharing of queries a little bit easier.\n *\n * This is only one example of this kind of feature, GraphiQL exposes\n * various React params to enable interesting integrations.\n */\n // Parse the search string to get url parameters.\n var search = window.location.search;\n var parameters = {};\n search.substr(1).split('&').forEach(function (entry) {\n var eq = entry.indexOf('=');\n if (eq >= 0) {\n parameters[decodeURIComponent(entry.slice(0, eq))] =\n decodeURIComponent(entry.slice(eq + 1));\n }\n });\n // if variables was provided, try to format it.\n if (parameters.variables) {\n try {\n parameters.variables =\n JSON.stringify(JSON.parse(parameters.variables), null, 2);\n } catch (e) {\n // Do nothing, we want to display the invalid JSON as a string, rather\n // than present an error.\n }\n }\n // When the query and variables string is edited, update the URL bar so\n // that it can be easily shared\n function onEditQuery(newQuery) {\n parameters.query = newQuery;\n updateURL();\n }\n function onEditVariables(newVariables) {\n parameters.variables = newVariables;\n updateURL();\n }\n function onEditOperationName(newOperationName) {\n parameters.operationName = newOperationName;\n updateURL();\n }\n function updateURL() {\n var newSearch = '?' + Object.keys(parameters).filter(function (key) {\n return Boolean(parameters[key]);\n }).map(function (key) {\n return encodeURIComponent(key) + '=' +\n encodeURIComponent(parameters[key]);\n }).join('&');\n history.replaceState(null, null, newSearch);\n }\n // Defines a GraphQL fetcher using the fetch API. You're not required to\n // use fetch, and could instead implement graphQLFetcher however you like,\n // as long as it returns a Promise or Observable.\n function graphQLFetcher(graphQLParams) {\n // This example expects a GraphQL server at the path /graphql.\n // Change this to point wherever you host your GraphQL server.\n return fetch({{REQUEST_PATH}}, {\n method: 'post',\n headers: {\n 'Accept': 'application/json',\n 'Content-Type': 'application/json',\n },\n body: JSON.stringify(graphQLParams),\n credentials: 'include',\n }).then(function (response) {\n return response.text();\n }).then(function (responseBody) {\n try {\n return JSON.parse(responseBody);\n } catch (error) {\n return responseBody;\n }\n });\n }\n // Render <GraphiQL /> into the body.\n // See the README in the top level of this module to learn more about\n // how you can customize GraphiQL by providing different values or\n // additional child elements.\n ReactDOM.render(\n React.createElement(GraphiQL, {\n fetcher: graphQLFetcher,\n query: parameters.query,\n variables: parameters.variables,\n operationName: parameters.operationName,\n onEditQuery: onEditQuery,\n onEditVariables: onEditVariables,\n onEditOperationName: onEditOperationName\n }),\n document.getElementById('graphiql')\n );\n </script>\n </body>\n</html>\n\"\"\"\n", "path": "starlette/graphql.py"}], "after_files": [{"content": "import functools\nimport json\nimport typing\n\nfrom starlette import status\nfrom starlette.background import BackgroundTasks\nfrom starlette.concurrency import run_in_threadpool\nfrom starlette.requests import Request\nfrom starlette.responses import HTMLResponse, JSONResponse, PlainTextResponse, Response\nfrom starlette.types import ASGIInstance, Receive, Scope, Send\n\ntry:\n import graphene\n from graphql.execution.executors.asyncio import AsyncioExecutor\n from graphql.error import format_error as format_graphql_error\n from graphql.error import GraphQLError\nexcept ImportError: # pragma: nocover\n graphene = None # type: ignore\n AsyncioExecutor = None # type: ignore\n format_graphql_error = None # type: ignore\n GraphQLError = None # type: ignore\n\n\nclass GraphQLApp:\n def __init__(\n self,\n schema: \"graphene.Schema\",\n executor: typing.Any = None,\n executor_class: type = None,\n ) -> None:\n self.schema = schema\n if executor is None:\n # New style in 0.10.0. Use 'executor_class'.\n # See issue https://github.com/encode/starlette/issues/242\n self.executor = executor\n self.executor_class = executor_class\n self.is_async = executor_class is not None and issubclass(\n executor_class, AsyncioExecutor\n )\n else:\n # Old style. Use 'executor'.\n # We should remove this in the next median/major version bump.\n self.executor = executor\n self.executor_class = None\n self.is_async = isinstance(executor, AsyncioExecutor)\n\n def __call__(self, scope: Scope) -> ASGIInstance:\n return functools.partial(self.asgi, scope=scope)\n\n async def asgi(self, receive: Receive, send: Send, scope: Scope) -> None:\n if self.executor is None and self.executor_class is not None:\n self.executor = self.executor_class()\n\n request = Request(scope, receive=receive)\n response = await self.handle_graphql(request)\n await response(receive, send)\n\n async def handle_graphql(self, request: Request) -> Response:\n if request.method in (\"GET\", \"HEAD\"):\n if \"text/html\" in request.headers.get(\"Accept\", \"\"):\n return await self.handle_graphiql(request)\n\n data = request.query_params # type: typing.Mapping[str, typing.Any]\n\n elif request.method == \"POST\":\n content_type = request.headers.get(\"Content-Type\", \"\")\n\n if \"application/json\" in content_type:\n data = await request.json()\n elif \"application/graphql\" in content_type:\n body = await request.body()\n text = body.decode()\n data = {\"query\": text}\n elif \"query\" in request.query_params:\n data = request.query_params\n else:\n return PlainTextResponse(\n \"Unsupported Media Type\",\n status_code=status.HTTP_415_UNSUPPORTED_MEDIA_TYPE,\n )\n\n else:\n return PlainTextResponse(\n \"Method Not Allowed\", status_code=status.HTTP_405_METHOD_NOT_ALLOWED\n )\n\n try:\n query = data[\"query\"]\n variables = data.get(\"variables\")\n operation_name = data.get(\"operationName\")\n except KeyError:\n return PlainTextResponse(\n \"No GraphQL query found in the request\",\n status_code=status.HTTP_400_BAD_REQUEST,\n )\n\n background = BackgroundTasks()\n context = {\"request\": request, \"background\": background}\n\n result = await self.execute(\n query, variables=variables, context=context, operation_name=operation_name\n )\n error_data = (\n [format_graphql_error(err) for err in result.errors]\n if result.errors\n else None\n )\n response_data = {\"data\": result.data, \"errors\": error_data}\n status_code = (\n status.HTTP_400_BAD_REQUEST if result.errors else status.HTTP_200_OK\n )\n\n return JSONResponse(\n response_data, status_code=status_code, background=background\n )\n\n async def execute( # type: ignore\n self, query, variables=None, context=None, operation_name=None\n ):\n if self.is_async:\n return await self.schema.execute(\n query,\n variables=variables,\n operation_name=operation_name,\n executor=self.executor,\n return_promise=True,\n context=context,\n )\n else:\n return await run_in_threadpool(\n self.schema.execute,\n query,\n variables=variables,\n operation_name=operation_name,\n context=context,\n )\n\n async def handle_graphiql(self, request: Request) -> Response:\n text = GRAPHIQL.replace(\"{{REQUEST_PATH}}\", json.dumps(request.url.path))\n return HTMLResponse(text)\n\n\nGRAPHIQL = \"\"\"\n<!--\n * Copyright (c) Facebook, Inc.\n * All rights reserved.\n *\n * This source code is licensed under the license found in the\n * LICENSE file in the root directory of this source tree.\n-->\n<!DOCTYPE html>\n<html>\n <head>\n <style>\n body {\n height: 100%;\n margin: 0;\n width: 100%;\n overflow: hidden;\n }\n #graphiql {\n height: 100vh;\n }\n </style>\n <!--\n This GraphiQL example depends on Promise and fetch, which are available in\n modern browsers, but can be \"polyfilled\" for older browsers.\n GraphiQL itself depends on React DOM.\n If you do not want to rely on a CDN, you can host these files locally or\n include them directly in your favored resource bunder.\n -->\n <link href=\"//cdn.jsdelivr.net/npm/[email protected]/graphiql.css\" rel=\"stylesheet\"/>\n <script src=\"//cdn.jsdelivr.net/npm/[email protected]/fetch.min.js\"></script>\n <script src=\"//cdn.jsdelivr.net/npm/[email protected]/umd/react.production.min.js\"></script>\n <script src=\"//cdn.jsdelivr.net/npm/[email protected]/umd/react-dom.production.min.js\"></script>\n <script src=\"//cdn.jsdelivr.net/npm/[email protected]/graphiql.min.js\"></script>\n </head>\n <body>\n <div id=\"graphiql\">Loading...</div>\n <script>\n /**\n * This GraphiQL example illustrates how to use some of GraphiQL's props\n * in order to enable reading and updating the URL parameters, making\n * link sharing of queries a little bit easier.\n *\n * This is only one example of this kind of feature, GraphiQL exposes\n * various React params to enable interesting integrations.\n */\n // Parse the search string to get url parameters.\n var search = window.location.search;\n var parameters = {};\n search.substr(1).split('&').forEach(function (entry) {\n var eq = entry.indexOf('=');\n if (eq >= 0) {\n parameters[decodeURIComponent(entry.slice(0, eq))] =\n decodeURIComponent(entry.slice(eq + 1));\n }\n });\n // if variables was provided, try to format it.\n if (parameters.variables) {\n try {\n parameters.variables =\n JSON.stringify(JSON.parse(parameters.variables), null, 2);\n } catch (e) {\n // Do nothing, we want to display the invalid JSON as a string, rather\n // than present an error.\n }\n }\n // When the query and variables string is edited, update the URL bar so\n // that it can be easily shared\n function onEditQuery(newQuery) {\n parameters.query = newQuery;\n updateURL();\n }\n function onEditVariables(newVariables) {\n parameters.variables = newVariables;\n updateURL();\n }\n function onEditOperationName(newOperationName) {\n parameters.operationName = newOperationName;\n updateURL();\n }\n function updateURL() {\n var newSearch = '?' + Object.keys(parameters).filter(function (key) {\n return Boolean(parameters[key]);\n }).map(function (key) {\n return encodeURIComponent(key) + '=' +\n encodeURIComponent(parameters[key]);\n }).join('&');\n history.replaceState(null, null, newSearch);\n }\n // Defines a GraphQL fetcher using the fetch API. You're not required to\n // use fetch, and could instead implement graphQLFetcher however you like,\n // as long as it returns a Promise or Observable.\n function graphQLFetcher(graphQLParams) {\n // This example expects a GraphQL server at the path /graphql.\n // Change this to point wherever you host your GraphQL server.\n return fetch({{REQUEST_PATH}}, {\n method: 'post',\n headers: {\n 'Accept': 'application/json',\n 'Content-Type': 'application/json',\n },\n body: JSON.stringify(graphQLParams),\n credentials: 'include',\n }).then(function (response) {\n return response.text();\n }).then(function (responseBody) {\n try {\n return JSON.parse(responseBody);\n } catch (error) {\n return responseBody;\n }\n });\n }\n // Render <GraphiQL /> into the body.\n // See the README in the top level of this module to learn more about\n // how you can customize GraphiQL by providing different values or\n // additional child elements.\n ReactDOM.render(\n React.createElement(GraphiQL, {\n fetcher: graphQLFetcher,\n query: parameters.query,\n variables: parameters.variables,\n operationName: parameters.operationName,\n onEditQuery: onEditQuery,\n onEditVariables: onEditVariables,\n onEditOperationName: onEditOperationName\n }),\n document.getElementById('graphiql')\n );\n </script>\n </body>\n</html>\n\"\"\"\n", "path": "starlette/graphql.py"}]} | 3,524 | 394 |
gh_patches_debug_20347 | rasdani/github-patches | git_diff | open-mmlab__mmdetection-9694 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Bug] Training Error
### Prerequisite
- [X] I have searched [Issues](https://github.com/open-mmlab/mmdetection/issues) and [Discussions](https://github.com/open-mmlab/mmdetection/discussions) but cannot get the expected help.
- [X] I have read the [FAQ documentation](https://mmdetection.readthedocs.io/en/latest/faq.html) but cannot get the expected help.
- [X] The bug has not been fixed in the [latest version (master)](https://github.com/open-mmlab/mmdetection) or [latest version (3.x)](https://github.com/open-mmlab/mmdetection/tree/dev-3.x).
### Task
I have modified the scripts/configs, or I'm working on my own tasks/models/datasets.
### Branch
master branch https://github.com/open-mmlab/mmdetection
### Environment
```
sys.platform: linux
Python: 3.7.15 (default, Nov 24 2022, 21:12:53) [GCC 11.2.0]
CUDA available: True
GPU 0,1,2,3,4,5,6,7: NVIDIA GeForce RTX 3090
CUDA_HOME: /usr/local/cuda
NVCC: Cuda compilation tools, release 11.6, V11.6.55
GCC: gcc (Ubuntu 11.3.0-1ubuntu1~22.04) 11.3.0
PyTorch: 1.13.1+cu117
PyTorch compiling details: PyTorch built with:
- GCC 9.3
- C++ Version: 201402
- Intel(R) Math Kernel Library Version 2020.0.0 Product Build 20191122 for Intel(R) 64 architecture applications
- Intel(R) MKL-DNN v2.6.0 (Git Hash 52b5f107dd9cf10910aaa19cb47f3abf9b349815)
- OpenMP 201511 (a.k.a. OpenMP 4.5)
- LAPACK is enabled (usually provided by MKL)
- NNPACK is enabled
- CPU capability usage: AVX2
- CUDA Runtime 11.7
- NVCC architecture flags: -gencode;arch=compute_37,code=sm_37;-gencode;arch=compute_50,code=sm_50;-gencode;arch=compute_60,code=sm_60;-gencode;arch=compute_70,code=sm_70;-gencode;arch=compute_75,code=sm_75;-gencode;arch=compute_80,code=sm_80;-gencode;arch=compute_86,code=sm_86
- CuDNN 8.5
- Magma 2.6.1
- Build settings: BLAS_INFO=mkl, BUILD_TYPE=Release, CUDA_VERSION=11.7, CUDNN_VERSION=8.5.0, CXX_COMPILER=/opt/rh/devtoolset-9/root/usr/bin/c++, CXX_FLAGS= -fabi-version=11 -Wno-deprecated -fvisibility-inlines-hidden -DUSE_PTHREADPOOL -fopenmp -DNDEBUG -DUSE_KINETO -DUSE_FBGEMM -DUSE_QNNPACK -DUSE_PYTORCH_QNNPACK -DUSE_XNNPACK -DSYMBOLICATE_MOBILE_DEBUG_HANDLE -DEDGE_PROFILER_USE_KINETO -O2 -fPIC -Wno-narrowing -Wall -Wextra -Werror=return-type -Werror=non-virtual-dtor -Wno-missing-field-initializers -Wno-type-limits -Wno-array-bounds -Wno-unknown-pragmas -Wunused-local-typedefs -Wno-unused-parameter -Wno-unused-function -Wno-unused-result -Wno-strict-overflow -Wno-strict-aliasing -Wno-error=deprecated-declarations -Wno-stringop-overflow -Wno-psabi -Wno-error=pedantic -Wno-error=redundant-decls -Wno-error=old-style-cast -fdiagnostics-color=always -faligned-new -Wno-unused-but-set-variable -Wno-maybe-uninitialized -fno-math-errno -fno-trapping-math -Werror=format -Werror=cast-function-type -Wno-stringop-overflow, LAPACK_INFO=mkl, PERF_WITH_AVX=1, PERF_WITH_AVX2=1, PERF_WITH_AVX512=1, TORCH_VERSION=1.13.1, USE_CUDA=ON, USE_CUDNN=ON, USE_EXCEPTION_PTR=1, USE_GFLAGS=OFF, USE_GLOG=OFF, USE_MKL=ON, USE_MKLDNN=ON, USE_MPI=OFF, USE_NCCL=ON, USE_NNPACK=ON, USE_OPENMP=ON, USE_ROCM=OFF,
TorchVision: 0.14.1+cu117
OpenCV: 4.7.0
MMCV: 1.7.1
MMCV Compiler: GCC 9.3
MMCV CUDA Compiler: 11.7
MMDetection: 2.28.0+b955832
```
### Reproduces the problem - code sample
```python
python train.py $CONFIG_FILE
```
### Reproduces the problem - command or script
```python
python train.py $CONFIG_FILE
```
### Reproduces the problem - error message
```
/anaconda3/envs/dl/lib/python3.7/site-packages/mmcv/__init__.py:21: UserWarning: On January 1, 2023, MMCV will release v2.0.0, in which it will remove components related to the training process and add a data transformation module. In addition, it will rename the package names mmcv to mmcv-lite and mmcv-full to mmcv. See https://github.com/open-mmlab/mmcv/blob/master/docs/en/compatibility.md for more details.
'On January 1, 2023, MMCV will release v2.0.0, in which it will remove '
Traceback (most recent call last):
File "train.py", line 20, in <module>
from mmdet.utils import (collect_env, get_device, get_root_logger,
ImportError: cannot import name 'rfnext_init_model' from 'mmdet.utils' (/mmdetection/mmdet/utils/__init__.py)
```
### Additional information
I clone the latest mmdetection package and intend to run a couple of benchmark. However, neither my configs nor the configs provided in the `mmdet/configs` yields the abovementioned error message.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mmdet/utils/__init__.py`
Content:
```
1 # Copyright (c) OpenMMLab. All rights reserved.
2 from .ascend_util import (batch_images_to_levels,
3 get_max_num_gt_division_factor, masked_fill)
4 from .collect_env import collect_env
5 from .compat_config import compat_cfg
6 from .logger import get_caller_name, get_root_logger, log_img_scale
7 from .memory import AvoidCUDAOOM, AvoidOOM
8 from .misc import find_latest_checkpoint, update_data_root
9 from .replace_cfg_vals import replace_cfg_vals
10 from .setup_env import setup_multi_processes
11 from .split_batch import split_batch
12 from .util_distribution import build_ddp, build_dp, get_device
13
14 __all__ = [
15 'get_root_logger', 'collect_env', 'find_latest_checkpoint',
16 'update_data_root', 'setup_multi_processes', 'get_caller_name',
17 'log_img_scale', 'compat_cfg', 'split_batch', 'build_ddp', 'build_dp',
18 'get_device', 'replace_cfg_vals', 'AvoidOOM', 'AvoidCUDAOOM',
19 'get_max_num_gt_division_factor', 'masked_fill', 'batch_images_to_levels'
20 ]
21
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mmdet/utils/__init__.py b/mmdet/utils/__init__.py
--- a/mmdet/utils/__init__.py
+++ b/mmdet/utils/__init__.py
@@ -7,6 +7,7 @@
from .memory import AvoidCUDAOOM, AvoidOOM
from .misc import find_latest_checkpoint, update_data_root
from .replace_cfg_vals import replace_cfg_vals
+from .rfnext import rfnext_init_model
from .setup_env import setup_multi_processes
from .split_batch import split_batch
from .util_distribution import build_ddp, build_dp, get_device
@@ -16,5 +17,6 @@
'update_data_root', 'setup_multi_processes', 'get_caller_name',
'log_img_scale', 'compat_cfg', 'split_batch', 'build_ddp', 'build_dp',
'get_device', 'replace_cfg_vals', 'AvoidOOM', 'AvoidCUDAOOM',
- 'get_max_num_gt_division_factor', 'masked_fill', 'batch_images_to_levels'
+ 'get_max_num_gt_division_factor', 'masked_fill', 'batch_images_to_levels',
+ 'rfnext_init_model'
]
| {"golden_diff": "diff --git a/mmdet/utils/__init__.py b/mmdet/utils/__init__.py\n--- a/mmdet/utils/__init__.py\n+++ b/mmdet/utils/__init__.py\n@@ -7,6 +7,7 @@\n from .memory import AvoidCUDAOOM, AvoidOOM\n from .misc import find_latest_checkpoint, update_data_root\n from .replace_cfg_vals import replace_cfg_vals\n+from .rfnext import rfnext_init_model\n from .setup_env import setup_multi_processes\n from .split_batch import split_batch\n from .util_distribution import build_ddp, build_dp, get_device\n@@ -16,5 +17,6 @@\n 'update_data_root', 'setup_multi_processes', 'get_caller_name',\n 'log_img_scale', 'compat_cfg', 'split_batch', 'build_ddp', 'build_dp',\n 'get_device', 'replace_cfg_vals', 'AvoidOOM', 'AvoidCUDAOOM',\n- 'get_max_num_gt_division_factor', 'masked_fill', 'batch_images_to_levels'\n+ 'get_max_num_gt_division_factor', 'masked_fill', 'batch_images_to_levels',\n+ 'rfnext_init_model'\n ]\n", "issue": "[Bug] Training Error\n### Prerequisite\n\n- [X] I have searched [Issues](https://github.com/open-mmlab/mmdetection/issues) and [Discussions](https://github.com/open-mmlab/mmdetection/discussions) but cannot get the expected help.\n- [X] I have read the [FAQ documentation](https://mmdetection.readthedocs.io/en/latest/faq.html) but cannot get the expected help.\n- [X] The bug has not been fixed in the [latest version (master)](https://github.com/open-mmlab/mmdetection) or [latest version (3.x)](https://github.com/open-mmlab/mmdetection/tree/dev-3.x).\n\n### Task\n\nI have modified the scripts/configs, or I'm working on my own tasks/models/datasets.\n\n### Branch\n\nmaster branch https://github.com/open-mmlab/mmdetection\n\n### Environment\n\n```\r\nsys.platform: linux\r\nPython: 3.7.15 (default, Nov 24 2022, 21:12:53) [GCC 11.2.0]\r\nCUDA available: True\r\nGPU 0,1,2,3,4,5,6,7: NVIDIA GeForce RTX 3090\r\nCUDA_HOME: /usr/local/cuda\r\nNVCC: Cuda compilation tools, release 11.6, V11.6.55\r\nGCC: gcc (Ubuntu 11.3.0-1ubuntu1~22.04) 11.3.0\r\nPyTorch: 1.13.1+cu117\r\nPyTorch compiling details: PyTorch built with:\r\n - GCC 9.3\r\n - C++ Version: 201402\r\n - Intel(R) Math Kernel Library Version 2020.0.0 Product Build 20191122 for Intel(R) 64 architecture applications\r\n - Intel(R) MKL-DNN v2.6.0 (Git Hash 52b5f107dd9cf10910aaa19cb47f3abf9b349815)\r\n - OpenMP 201511 (a.k.a. OpenMP 4.5)\r\n - LAPACK is enabled (usually provided by MKL)\r\n - NNPACK is enabled\r\n - CPU capability usage: AVX2\r\n - CUDA Runtime 11.7\r\n - NVCC architecture flags: -gencode;arch=compute_37,code=sm_37;-gencode;arch=compute_50,code=sm_50;-gencode;arch=compute_60,code=sm_60;-gencode;arch=compute_70,code=sm_70;-gencode;arch=compute_75,code=sm_75;-gencode;arch=compute_80,code=sm_80;-gencode;arch=compute_86,code=sm_86\r\n - CuDNN 8.5\r\n - Magma 2.6.1\r\n - Build settings: BLAS_INFO=mkl, BUILD_TYPE=Release, CUDA_VERSION=11.7, CUDNN_VERSION=8.5.0, CXX_COMPILER=/opt/rh/devtoolset-9/root/usr/bin/c++, CXX_FLAGS= -fabi-version=11 -Wno-deprecated -fvisibility-inlines-hidden -DUSE_PTHREADPOOL -fopenmp -DNDEBUG -DUSE_KINETO -DUSE_FBGEMM -DUSE_QNNPACK -DUSE_PYTORCH_QNNPACK -DUSE_XNNPACK -DSYMBOLICATE_MOBILE_DEBUG_HANDLE -DEDGE_PROFILER_USE_KINETO -O2 -fPIC -Wno-narrowing -Wall -Wextra -Werror=return-type -Werror=non-virtual-dtor -Wno-missing-field-initializers -Wno-type-limits -Wno-array-bounds -Wno-unknown-pragmas -Wunused-local-typedefs -Wno-unused-parameter -Wno-unused-function -Wno-unused-result -Wno-strict-overflow -Wno-strict-aliasing -Wno-error=deprecated-declarations -Wno-stringop-overflow -Wno-psabi -Wno-error=pedantic -Wno-error=redundant-decls -Wno-error=old-style-cast -fdiagnostics-color=always -faligned-new -Wno-unused-but-set-variable -Wno-maybe-uninitialized -fno-math-errno -fno-trapping-math -Werror=format -Werror=cast-function-type -Wno-stringop-overflow, LAPACK_INFO=mkl, PERF_WITH_AVX=1, PERF_WITH_AVX2=1, PERF_WITH_AVX512=1, TORCH_VERSION=1.13.1, USE_CUDA=ON, USE_CUDNN=ON, USE_EXCEPTION_PTR=1, USE_GFLAGS=OFF, USE_GLOG=OFF, USE_MKL=ON, USE_MKLDNN=ON, USE_MPI=OFF, USE_NCCL=ON, USE_NNPACK=ON, USE_OPENMP=ON, USE_ROCM=OFF,\r\n\r\nTorchVision: 0.14.1+cu117\r\nOpenCV: 4.7.0\r\nMMCV: 1.7.1\r\nMMCV Compiler: GCC 9.3\r\nMMCV CUDA Compiler: 11.7\r\nMMDetection: 2.28.0+b955832\r\n```\n\n### Reproduces the problem - code sample\n\n```python\r\npython train.py $CONFIG_FILE\r\n```\n\n### Reproduces the problem - command or script\n\n```python\r\npython train.py $CONFIG_FILE\r\n```\n\n### Reproduces the problem - error message\n\n```\r\n/anaconda3/envs/dl/lib/python3.7/site-packages/mmcv/__init__.py:21: UserWarning: On January 1, 2023, MMCV will release v2.0.0, in which it will remove components related to the training process and add a data transformation module. In addition, it will rename the package names mmcv to mmcv-lite and mmcv-full to mmcv. See https://github.com/open-mmlab/mmcv/blob/master/docs/en/compatibility.md for more details.\r\n 'On January 1, 2023, MMCV will release v2.0.0, in which it will remove '\r\nTraceback (most recent call last):\r\n File \"train.py\", line 20, in <module>\r\n from mmdet.utils import (collect_env, get_device, get_root_logger,\r\nImportError: cannot import name 'rfnext_init_model' from 'mmdet.utils' (/mmdetection/mmdet/utils/__init__.py)\r\n```\n\n### Additional information\n\nI clone the latest mmdetection package and intend to run a couple of benchmark. However, neither my configs nor the configs provided in the `mmdet/configs` yields the abovementioned error message.\n", "before_files": [{"content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .ascend_util import (batch_images_to_levels,\n get_max_num_gt_division_factor, masked_fill)\nfrom .collect_env import collect_env\nfrom .compat_config import compat_cfg\nfrom .logger import get_caller_name, get_root_logger, log_img_scale\nfrom .memory import AvoidCUDAOOM, AvoidOOM\nfrom .misc import find_latest_checkpoint, update_data_root\nfrom .replace_cfg_vals import replace_cfg_vals\nfrom .setup_env import setup_multi_processes\nfrom .split_batch import split_batch\nfrom .util_distribution import build_ddp, build_dp, get_device\n\n__all__ = [\n 'get_root_logger', 'collect_env', 'find_latest_checkpoint',\n 'update_data_root', 'setup_multi_processes', 'get_caller_name',\n 'log_img_scale', 'compat_cfg', 'split_batch', 'build_ddp', 'build_dp',\n 'get_device', 'replace_cfg_vals', 'AvoidOOM', 'AvoidCUDAOOM',\n 'get_max_num_gt_division_factor', 'masked_fill', 'batch_images_to_levels'\n]\n", "path": "mmdet/utils/__init__.py"}], "after_files": [{"content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .ascend_util import (batch_images_to_levels,\n get_max_num_gt_division_factor, masked_fill)\nfrom .collect_env import collect_env\nfrom .compat_config import compat_cfg\nfrom .logger import get_caller_name, get_root_logger, log_img_scale\nfrom .memory import AvoidCUDAOOM, AvoidOOM\nfrom .misc import find_latest_checkpoint, update_data_root\nfrom .replace_cfg_vals import replace_cfg_vals\nfrom .rfnext import rfnext_init_model\nfrom .setup_env import setup_multi_processes\nfrom .split_batch import split_batch\nfrom .util_distribution import build_ddp, build_dp, get_device\n\n__all__ = [\n 'get_root_logger', 'collect_env', 'find_latest_checkpoint',\n 'update_data_root', 'setup_multi_processes', 'get_caller_name',\n 'log_img_scale', 'compat_cfg', 'split_batch', 'build_ddp', 'build_dp',\n 'get_device', 'replace_cfg_vals', 'AvoidOOM', 'AvoidCUDAOOM',\n 'get_max_num_gt_division_factor', 'masked_fill', 'batch_images_to_levels',\n 'rfnext_init_model'\n]\n", "path": "mmdet/utils/__init__.py"}]} | 2,089 | 256 |
gh_patches_debug_22800 | rasdani/github-patches | git_diff | rlworkgroup__garage-2327 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Snapshotter loads wrong itr when asked to load last itr
Hi,
When trying to load 'last' snapshot there's a bug as it doesnt always load the last one.
consider the case we have itr_0.pkl,itr_20.pkl,itr_40.pkl,....,itr_120.pkl.
if we want to load the 'last' snapshot, the snapshotter load code search for 'params.pkl' and if not found it sorts the list of files with itr_{}.pkl template (`snapshotter.py` line 162):
```
files.sort()
```
and then take the last.
However, sorting this way will not yield the last iteration pkl but itr_80.pkl because this is the highest alphabetical value.
**a proposed fix** would be to replace line 162 with the following:
```
files.sort(key=lambda x: int(os.path.splitext(x)[0].split('itr_')[1]))
```
which isolate the iteration number and sort by its numerical value.
Thanks,
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/garage/experiment/snapshotter.py`
Content:
```
1 """Defines SnapshotConfig and Snapshotter."""
2 import collections
3 import errno
4 import os
5 import pathlib
6
7 import cloudpickle
8
9 SnapshotConfig = collections.namedtuple(
10 'SnapshotConfig', ['snapshot_dir', 'snapshot_mode', 'snapshot_gap'])
11
12
13 class Snapshotter:
14 """Snapshotter snapshots training data.
15
16 When training, it saves data to binary files. When resuming,
17 it loads from saved data.
18
19 Args:
20 snapshot_dir (str): Path to save the log and iteration snapshot.
21 snapshot_mode (str): Mode to save the snapshot. Can be either "all"
22 (all iterations will be saved), "last" (only the last iteration
23 will be saved), "gap" (every snapshot_gap iterations are saved),
24 "gap_and_last" (save the last iteration as 'params.pkl' and save
25 every snapshot_gap iteration separately), "gap_overwrite" (same as
26 gap but overwrites the last saved snapshot), or "none" (do not
27 save snapshots).
28 snapshot_gap (int): Gap between snapshot iterations. Wait this number
29 of iterations before taking another snapshot.
30
31 """
32
33 def __init__(self,
34 snapshot_dir=os.path.join(os.getcwd(),
35 'data/local/experiment'),
36 snapshot_mode='last',
37 snapshot_gap=1):
38 self._snapshot_dir = snapshot_dir
39 self._snapshot_mode = snapshot_mode
40 self._snapshot_gap = snapshot_gap
41
42 if snapshot_mode == 'gap_overwrite' and snapshot_gap <= 1:
43 raise ValueError('snapshot_gap must be > 1 when using '
44 'snapshot_mode="gap_overwrite". Use '
45 'snapshot_mode="last" to snapshot after '
46 'every iteration.')
47 if snapshot_mode == 'last' and snapshot_gap != 1:
48 raise ValueError('snapshot_gap should be set to 1 if using '
49 'snapshot_mode="last". Did you mean to'
50 ' use snapshot_mode="gap"?')
51
52 pathlib.Path(snapshot_dir).mkdir(parents=True, exist_ok=True)
53
54 @property
55 def snapshot_dir(self):
56 """Return the directory of snapshot.
57
58 Returns:
59 str: The directory of snapshot
60
61 """
62 return self._snapshot_dir
63
64 @property
65 def snapshot_mode(self):
66 """Return the type of snapshot.
67
68 Returns:
69 str: The type of snapshot. Can be "all", "last", "gap",
70 "gap_overwrite", "gap_and_last", or "none".
71
72 """
73 return self._snapshot_mode
74
75 @property
76 def snapshot_gap(self):
77 """Return the gap number of snapshot.
78
79 Returns:
80 int: The gap number of snapshot.
81
82 """
83 return self._snapshot_gap
84
85 def save_itr_params(self, itr, params):
86 """Save the parameters if at the right iteration.
87
88 Args:
89 itr (int): Number of iterations. Used as the index of snapshot.
90 params (obj): Content of snapshot to be saved.
91
92 Raises:
93 ValueError: If snapshot_mode is not one of "all", "last", "gap",
94 "gap_overwrite", "gap_and_last", or "none".
95
96 """
97 file_name = None
98
99 if self._snapshot_mode == 'all':
100 file_name = os.path.join(self._snapshot_dir, 'itr_%d.pkl' % itr)
101 elif self._snapshot_mode == 'gap_overwrite':
102 if itr % self._snapshot_gap == 0:
103 file_name = os.path.join(self._snapshot_dir, 'params.pkl')
104 elif self._snapshot_mode == 'last':
105 # override previous params
106 file_name = os.path.join(self._snapshot_dir, 'params.pkl')
107 elif self._snapshot_mode == 'gap':
108 if itr % self._snapshot_gap == 0:
109 file_name = os.path.join(self._snapshot_dir,
110 'itr_%d.pkl' % itr)
111 elif self._snapshot_mode == 'gap_and_last':
112 if itr % self._snapshot_gap == 0:
113 file_name = os.path.join(self._snapshot_dir,
114 'itr_%d.pkl' % itr)
115 file_name_last = os.path.join(self._snapshot_dir, 'params.pkl')
116 with open(file_name_last, 'wb') as file:
117 cloudpickle.dump(params, file)
118 elif self._snapshot_mode == 'none':
119 pass
120 else:
121 raise ValueError('Invalid snapshot mode {}'.format(
122 self._snapshot_mode))
123
124 if file_name:
125 with open(file_name, 'wb') as file:
126 cloudpickle.dump(params, file)
127
128 def load(self, load_dir, itr='last'):
129 # pylint: disable=no-self-use
130 """Load one snapshot of parameters from disk.
131
132 Args:
133 load_dir (str): Directory of the cloudpickle file
134 to resume experiment from.
135 itr (int or string): Iteration to load.
136 Can be an integer, 'last' or 'first'.
137
138 Returns:
139 dict: Loaded snapshot.
140
141 Raises:
142 ValueError: If itr is neither an integer nor
143 one of ("last", "first").
144 FileNotFoundError: If the snapshot file is not found in load_dir.
145 NotAFileError: If the snapshot exists but is not a file.
146
147 """
148 if isinstance(itr, int) or itr.isdigit():
149 load_from_file = os.path.join(load_dir, 'itr_{}.pkl'.format(itr))
150 else:
151 if itr not in ('last', 'first'):
152 raise ValueError(
153 "itr should be an integer or 'last' or 'first'")
154
155 load_from_file = os.path.join(load_dir, 'params.pkl')
156 if not os.path.isfile(load_from_file):
157 files = [f for f in os.listdir(load_dir) if f.endswith('.pkl')]
158 if not files:
159 raise FileNotFoundError(errno.ENOENT,
160 os.strerror(errno.ENOENT),
161 '*.pkl file in', load_dir)
162 files.sort()
163 load_from_file = files[0] if itr == 'first' else files[-1]
164 load_from_file = os.path.join(load_dir, load_from_file)
165
166 if not os.path.isfile(load_from_file):
167 raise NotAFileError('File not existing: ', load_from_file)
168
169 with open(load_from_file, 'rb') as file:
170 return cloudpickle.load(file)
171
172
173 class NotAFileError(Exception):
174 """Raise when the snapshot is not a file."""
175
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/garage/experiment/snapshotter.py b/src/garage/experiment/snapshotter.py
--- a/src/garage/experiment/snapshotter.py
+++ b/src/garage/experiment/snapshotter.py
@@ -159,7 +159,7 @@
raise FileNotFoundError(errno.ENOENT,
os.strerror(errno.ENOENT),
'*.pkl file in', load_dir)
- files.sort()
+ files.sort(key=_extract_snapshot_itr)
load_from_file = files[0] if itr == 'first' else files[-1]
load_from_file = os.path.join(load_dir, load_from_file)
@@ -170,5 +170,20 @@
return cloudpickle.load(file)
+def _extract_snapshot_itr(filename: str) -> int:
+ """Extracts the integer itr from a filename.
+
+ Args:
+ filename(str): The snapshot filename.
+
+ Returns:
+ int: The snapshot as an integer.
+
+ """
+ base = os.path.splitext(filename)[0]
+ digits = base.split('itr_')[1]
+ return int(digits)
+
+
class NotAFileError(Exception):
"""Raise when the snapshot is not a file."""
| {"golden_diff": "diff --git a/src/garage/experiment/snapshotter.py b/src/garage/experiment/snapshotter.py\n--- a/src/garage/experiment/snapshotter.py\n+++ b/src/garage/experiment/snapshotter.py\n@@ -159,7 +159,7 @@\n raise FileNotFoundError(errno.ENOENT,\n os.strerror(errno.ENOENT),\n '*.pkl file in', load_dir)\n- files.sort()\n+ files.sort(key=_extract_snapshot_itr)\n load_from_file = files[0] if itr == 'first' else files[-1]\n load_from_file = os.path.join(load_dir, load_from_file)\n \n@@ -170,5 +170,20 @@\n return cloudpickle.load(file)\n \n \n+def _extract_snapshot_itr(filename: str) -> int:\n+ \"\"\"Extracts the integer itr from a filename.\n+\n+ Args:\n+ filename(str): The snapshot filename.\n+\n+ Returns:\n+ int: The snapshot as an integer.\n+\n+ \"\"\"\n+ base = os.path.splitext(filename)[0]\n+ digits = base.split('itr_')[1]\n+ return int(digits)\n+\n+\n class NotAFileError(Exception):\n \"\"\"Raise when the snapshot is not a file.\"\"\"\n", "issue": "Snapshotter loads wrong itr when asked to load last itr\nHi,\r\nWhen trying to load 'last' snapshot there's a bug as it doesnt always load the last one.\r\nconsider the case we have itr_0.pkl,itr_20.pkl,itr_40.pkl,....,itr_120.pkl.\r\n\r\nif we want to load the 'last' snapshot, the snapshotter load code search for 'params.pkl' and if not found it sorts the list of files with itr_{}.pkl template (`snapshotter.py` line 162):\r\n```\r\nfiles.sort()\r\n```\r\nand then take the last.\r\n\r\nHowever, sorting this way will not yield the last iteration pkl but itr_80.pkl because this is the highest alphabetical value.\r\n\r\n**a proposed fix** would be to replace line 162 with the following:\r\n```\r\nfiles.sort(key=lambda x: int(os.path.splitext(x)[0].split('itr_')[1]))\r\n```\r\n\r\nwhich isolate the iteration number and sort by its numerical value.\r\n\r\nThanks,\r\n \n", "before_files": [{"content": "\"\"\"Defines SnapshotConfig and Snapshotter.\"\"\"\nimport collections\nimport errno\nimport os\nimport pathlib\n\nimport cloudpickle\n\nSnapshotConfig = collections.namedtuple(\n 'SnapshotConfig', ['snapshot_dir', 'snapshot_mode', 'snapshot_gap'])\n\n\nclass Snapshotter:\n \"\"\"Snapshotter snapshots training data.\n\n When training, it saves data to binary files. When resuming,\n it loads from saved data.\n\n Args:\n snapshot_dir (str): Path to save the log and iteration snapshot.\n snapshot_mode (str): Mode to save the snapshot. Can be either \"all\"\n (all iterations will be saved), \"last\" (only the last iteration\n will be saved), \"gap\" (every snapshot_gap iterations are saved),\n \"gap_and_last\" (save the last iteration as 'params.pkl' and save\n every snapshot_gap iteration separately), \"gap_overwrite\" (same as\n gap but overwrites the last saved snapshot), or \"none\" (do not\n save snapshots).\n snapshot_gap (int): Gap between snapshot iterations. Wait this number\n of iterations before taking another snapshot.\n\n \"\"\"\n\n def __init__(self,\n snapshot_dir=os.path.join(os.getcwd(),\n 'data/local/experiment'),\n snapshot_mode='last',\n snapshot_gap=1):\n self._snapshot_dir = snapshot_dir\n self._snapshot_mode = snapshot_mode\n self._snapshot_gap = snapshot_gap\n\n if snapshot_mode == 'gap_overwrite' and snapshot_gap <= 1:\n raise ValueError('snapshot_gap must be > 1 when using '\n 'snapshot_mode=\"gap_overwrite\". Use '\n 'snapshot_mode=\"last\" to snapshot after '\n 'every iteration.')\n if snapshot_mode == 'last' and snapshot_gap != 1:\n raise ValueError('snapshot_gap should be set to 1 if using '\n 'snapshot_mode=\"last\". Did you mean to'\n ' use snapshot_mode=\"gap\"?')\n\n pathlib.Path(snapshot_dir).mkdir(parents=True, exist_ok=True)\n\n @property\n def snapshot_dir(self):\n \"\"\"Return the directory of snapshot.\n\n Returns:\n str: The directory of snapshot\n\n \"\"\"\n return self._snapshot_dir\n\n @property\n def snapshot_mode(self):\n \"\"\"Return the type of snapshot.\n\n Returns:\n str: The type of snapshot. Can be \"all\", \"last\", \"gap\",\n \"gap_overwrite\", \"gap_and_last\", or \"none\".\n\n \"\"\"\n return self._snapshot_mode\n\n @property\n def snapshot_gap(self):\n \"\"\"Return the gap number of snapshot.\n\n Returns:\n int: The gap number of snapshot.\n\n \"\"\"\n return self._snapshot_gap\n\n def save_itr_params(self, itr, params):\n \"\"\"Save the parameters if at the right iteration.\n\n Args:\n itr (int): Number of iterations. Used as the index of snapshot.\n params (obj): Content of snapshot to be saved.\n\n Raises:\n ValueError: If snapshot_mode is not one of \"all\", \"last\", \"gap\",\n \"gap_overwrite\", \"gap_and_last\", or \"none\".\n\n \"\"\"\n file_name = None\n\n if self._snapshot_mode == 'all':\n file_name = os.path.join(self._snapshot_dir, 'itr_%d.pkl' % itr)\n elif self._snapshot_mode == 'gap_overwrite':\n if itr % self._snapshot_gap == 0:\n file_name = os.path.join(self._snapshot_dir, 'params.pkl')\n elif self._snapshot_mode == 'last':\n # override previous params\n file_name = os.path.join(self._snapshot_dir, 'params.pkl')\n elif self._snapshot_mode == 'gap':\n if itr % self._snapshot_gap == 0:\n file_name = os.path.join(self._snapshot_dir,\n 'itr_%d.pkl' % itr)\n elif self._snapshot_mode == 'gap_and_last':\n if itr % self._snapshot_gap == 0:\n file_name = os.path.join(self._snapshot_dir,\n 'itr_%d.pkl' % itr)\n file_name_last = os.path.join(self._snapshot_dir, 'params.pkl')\n with open(file_name_last, 'wb') as file:\n cloudpickle.dump(params, file)\n elif self._snapshot_mode == 'none':\n pass\n else:\n raise ValueError('Invalid snapshot mode {}'.format(\n self._snapshot_mode))\n\n if file_name:\n with open(file_name, 'wb') as file:\n cloudpickle.dump(params, file)\n\n def load(self, load_dir, itr='last'):\n # pylint: disable=no-self-use\n \"\"\"Load one snapshot of parameters from disk.\n\n Args:\n load_dir (str): Directory of the cloudpickle file\n to resume experiment from.\n itr (int or string): Iteration to load.\n Can be an integer, 'last' or 'first'.\n\n Returns:\n dict: Loaded snapshot.\n\n Raises:\n ValueError: If itr is neither an integer nor\n one of (\"last\", \"first\").\n FileNotFoundError: If the snapshot file is not found in load_dir.\n NotAFileError: If the snapshot exists but is not a file.\n\n \"\"\"\n if isinstance(itr, int) or itr.isdigit():\n load_from_file = os.path.join(load_dir, 'itr_{}.pkl'.format(itr))\n else:\n if itr not in ('last', 'first'):\n raise ValueError(\n \"itr should be an integer or 'last' or 'first'\")\n\n load_from_file = os.path.join(load_dir, 'params.pkl')\n if not os.path.isfile(load_from_file):\n files = [f for f in os.listdir(load_dir) if f.endswith('.pkl')]\n if not files:\n raise FileNotFoundError(errno.ENOENT,\n os.strerror(errno.ENOENT),\n '*.pkl file in', load_dir)\n files.sort()\n load_from_file = files[0] if itr == 'first' else files[-1]\n load_from_file = os.path.join(load_dir, load_from_file)\n\n if not os.path.isfile(load_from_file):\n raise NotAFileError('File not existing: ', load_from_file)\n\n with open(load_from_file, 'rb') as file:\n return cloudpickle.load(file)\n\n\nclass NotAFileError(Exception):\n \"\"\"Raise when the snapshot is not a file.\"\"\"\n", "path": "src/garage/experiment/snapshotter.py"}], "after_files": [{"content": "\"\"\"Defines SnapshotConfig and Snapshotter.\"\"\"\nimport collections\nimport errno\nimport os\nimport pathlib\n\nimport cloudpickle\n\nSnapshotConfig = collections.namedtuple(\n 'SnapshotConfig', ['snapshot_dir', 'snapshot_mode', 'snapshot_gap'])\n\n\nclass Snapshotter:\n \"\"\"Snapshotter snapshots training data.\n\n When training, it saves data to binary files. When resuming,\n it loads from saved data.\n\n Args:\n snapshot_dir (str): Path to save the log and iteration snapshot.\n snapshot_mode (str): Mode to save the snapshot. Can be either \"all\"\n (all iterations will be saved), \"last\" (only the last iteration\n will be saved), \"gap\" (every snapshot_gap iterations are saved),\n \"gap_and_last\" (save the last iteration as 'params.pkl' and save\n every snapshot_gap iteration separately), \"gap_overwrite\" (same as\n gap but overwrites the last saved snapshot), or \"none\" (do not\n save snapshots).\n snapshot_gap (int): Gap between snapshot iterations. Wait this number\n of iterations before taking another snapshot.\n\n \"\"\"\n\n def __init__(self,\n snapshot_dir=os.path.join(os.getcwd(),\n 'data/local/experiment'),\n snapshot_mode='last',\n snapshot_gap=1):\n self._snapshot_dir = snapshot_dir\n self._snapshot_mode = snapshot_mode\n self._snapshot_gap = snapshot_gap\n\n if snapshot_mode == 'gap_overwrite' and snapshot_gap <= 1:\n raise ValueError('snapshot_gap must be > 1 when using '\n 'snapshot_mode=\"gap_overwrite\". Use '\n 'snapshot_mode=\"last\" to snapshot after '\n 'every iteration.')\n if snapshot_mode == 'last' and snapshot_gap != 1:\n raise ValueError('snapshot_gap should be set to 1 if using '\n 'snapshot_mode=\"last\". Did you mean to'\n ' use snapshot_mode=\"gap\"?')\n\n pathlib.Path(snapshot_dir).mkdir(parents=True, exist_ok=True)\n\n @property\n def snapshot_dir(self):\n \"\"\"Return the directory of snapshot.\n\n Returns:\n str: The directory of snapshot\n\n \"\"\"\n return self._snapshot_dir\n\n @property\n def snapshot_mode(self):\n \"\"\"Return the type of snapshot.\n\n Returns:\n str: The type of snapshot. Can be \"all\", \"last\", \"gap\",\n \"gap_overwrite\", \"gap_and_last\", or \"none\".\n\n \"\"\"\n return self._snapshot_mode\n\n @property\n def snapshot_gap(self):\n \"\"\"Return the gap number of snapshot.\n\n Returns:\n int: The gap number of snapshot.\n\n \"\"\"\n return self._snapshot_gap\n\n def save_itr_params(self, itr, params):\n \"\"\"Save the parameters if at the right iteration.\n\n Args:\n itr (int): Number of iterations. Used as the index of snapshot.\n params (obj): Content of snapshot to be saved.\n\n Raises:\n ValueError: If snapshot_mode is not one of \"all\", \"last\", \"gap\",\n \"gap_overwrite\", \"gap_and_last\", or \"none\".\n\n \"\"\"\n file_name = None\n\n if self._snapshot_mode == 'all':\n file_name = os.path.join(self._snapshot_dir, 'itr_%d.pkl' % itr)\n elif self._snapshot_mode == 'gap_overwrite':\n if itr % self._snapshot_gap == 0:\n file_name = os.path.join(self._snapshot_dir, 'params.pkl')\n elif self._snapshot_mode == 'last':\n # override previous params\n file_name = os.path.join(self._snapshot_dir, 'params.pkl')\n elif self._snapshot_mode == 'gap':\n if itr % self._snapshot_gap == 0:\n file_name = os.path.join(self._snapshot_dir,\n 'itr_%d.pkl' % itr)\n elif self._snapshot_mode == 'gap_and_last':\n if itr % self._snapshot_gap == 0:\n file_name = os.path.join(self._snapshot_dir,\n 'itr_%d.pkl' % itr)\n file_name_last = os.path.join(self._snapshot_dir, 'params.pkl')\n with open(file_name_last, 'wb') as file:\n cloudpickle.dump(params, file)\n elif self._snapshot_mode == 'none':\n pass\n else:\n raise ValueError('Invalid snapshot mode {}'.format(\n self._snapshot_mode))\n\n if file_name:\n with open(file_name, 'wb') as file:\n cloudpickle.dump(params, file)\n\n def load(self, load_dir, itr='last'):\n # pylint: disable=no-self-use\n \"\"\"Load one snapshot of parameters from disk.\n\n Args:\n load_dir (str): Directory of the cloudpickle file\n to resume experiment from.\n itr (int or string): Iteration to load.\n Can be an integer, 'last' or 'first'.\n\n Returns:\n dict: Loaded snapshot.\n\n Raises:\n ValueError: If itr is neither an integer nor\n one of (\"last\", \"first\").\n FileNotFoundError: If the snapshot file is not found in load_dir.\n NotAFileError: If the snapshot exists but is not a file.\n\n \"\"\"\n if isinstance(itr, int) or itr.isdigit():\n load_from_file = os.path.join(load_dir, 'itr_{}.pkl'.format(itr))\n else:\n if itr not in ('last', 'first'):\n raise ValueError(\n \"itr should be an integer or 'last' or 'first'\")\n\n load_from_file = os.path.join(load_dir, 'params.pkl')\n if not os.path.isfile(load_from_file):\n files = [f for f in os.listdir(load_dir) if f.endswith('.pkl')]\n if not files:\n raise FileNotFoundError(errno.ENOENT,\n os.strerror(errno.ENOENT),\n '*.pkl file in', load_dir)\n files.sort(key=_extract_snapshot_itr)\n load_from_file = files[0] if itr == 'first' else files[-1]\n load_from_file = os.path.join(load_dir, load_from_file)\n\n if not os.path.isfile(load_from_file):\n raise NotAFileError('File not existing: ', load_from_file)\n\n with open(load_from_file, 'rb') as file:\n return cloudpickle.load(file)\n\n\ndef _extract_snapshot_itr(filename: str) -> int:\n \"\"\"Extracts the integer itr from a filename.\n\n Args:\n filename(str): The snapshot filename.\n\n Returns:\n int: The snapshot as an integer.\n\n \"\"\"\n base = os.path.splitext(filename)[0]\n digits = base.split('itr_')[1]\n return int(digits)\n\n\nclass NotAFileError(Exception):\n \"\"\"Raise when the snapshot is not a file.\"\"\"\n", "path": "src/garage/experiment/snapshotter.py"}]} | 2,264 | 274 |
gh_patches_debug_15319 | rasdani/github-patches | git_diff | ibis-project__ibis-1816 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
PKG: Add pre-commit, black and isort to setup.py
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2
3 import pathlib
4 import sys
5
6 from setuptools import find_packages, setup
7
8 import versioneer
9
10 LONG_DESCRIPTION = """
11 Ibis is a productivity-centric Python big data framework.
12
13 See http://docs.ibis-project.org
14 """
15
16 VERSION = sys.version_info.major, sys.version_info.minor
17
18 impala_requires = ['hdfs>=2.0.16', 'sqlalchemy', 'requests']
19 if VERSION == (3, 5):
20 impala_requires.append('impyla<0.14.2')
21 else:
22 impala_requires.append('impyla>=0.15.0')
23
24 sqlite_requires = ['sqlalchemy']
25 postgres_requires = sqlite_requires + ['psycopg2']
26 mysql_requires = sqlite_requires + ['pymysql']
27
28 if VERSION == (3, 5):
29 mapd_requires = ['pymapd>=0.8.3,<0.11.0']
30 else:
31 mapd_requires = ['pymapd>=0.12.0']
32 kerberos_requires = ['requests-kerberos']
33 visualization_requires = ['graphviz']
34 clickhouse_requires = ['clickhouse-driver>=0.0.8', 'clickhouse-cityhash']
35 bigquery_requires = ['google-cloud-bigquery>=1.0.0', 'pydata-google-auth']
36 hdf5_requires = ['tables>=3.0.0']
37
38 if VERSION == (3, 5):
39 parquet_requires = ['pyarrow<0.12.0']
40 else:
41 parquet_requires = ['pyarrow>=0.12.0']
42
43 all_requires = (
44 impala_requires
45 + postgres_requires
46 + mapd_requires
47 + mysql_requires
48 + kerberos_requires
49 + visualization_requires
50 + clickhouse_requires
51 + bigquery_requires
52 + hdf5_requires
53 + parquet_requires
54 )
55
56 develop_requires = all_requires + ['click', 'flake8', 'mypy', 'pytest>=3']
57
58 install_requires = [
59 line.strip()
60 for line in pathlib.Path(__file__)
61 .parent.joinpath('requirements.txt')
62 .read_text()
63 .splitlines()
64 ]
65
66 setup(
67 name='ibis-framework',
68 url='https://github.com/ibis-project/ibis',
69 packages=find_packages(),
70 version=versioneer.get_version(),
71 cmdclass=versioneer.get_cmdclass(),
72 install_requires=install_requires,
73 python_requires='>=3.5',
74 extras_require={
75 'all': all_requires,
76 'develop': develop_requires,
77 'impala': impala_requires,
78 'kerberos': kerberos_requires,
79 'postgres': postgres_requires,
80 'mapd': mapd_requires,
81 'mysql': mysql_requires,
82 'sqlite': sqlite_requires,
83 'visualization': visualization_requires,
84 'clickhouse': clickhouse_requires,
85 'bigquery': bigquery_requires,
86 'hdf5': hdf5_requires,
87 'parquet': parquet_requires,
88 },
89 description="Productivity-centric Python Big Data Framework",
90 long_description=LONG_DESCRIPTION,
91 classifiers=[
92 'Development Status :: 4 - Beta',
93 'Operating System :: OS Independent',
94 'Intended Audience :: Science/Research',
95 'Programming Language :: Python',
96 'Programming Language :: Python :: 3',
97 'Topic :: Scientific/Engineering',
98 ],
99 license='Apache License, Version 2.0',
100 maintainer="Phillip Cloud",
101 maintainer_email="[email protected]",
102 )
103
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -53,7 +53,14 @@
+ parquet_requires
)
-develop_requires = all_requires + ['click', 'flake8', 'mypy', 'pytest>=3']
+develop_requires = all_requires + [
+ 'click',
+ 'flake8',
+ 'isort',
+ 'mypy',
+ 'pre-commit',
+ 'pytest>=3',
+]
install_requires = [
line.strip()
@@ -73,7 +80,8 @@
python_requires='>=3.5',
extras_require={
'all': all_requires,
- 'develop': develop_requires,
+ 'develop:python_version > "3.5"': develop_requires + ['black'],
+ 'develop:python_version == "3.5"': develop_requires,
'impala': impala_requires,
'kerberos': kerberos_requires,
'postgres': postgres_requires,
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -53,7 +53,14 @@\n + parquet_requires\n )\n \n-develop_requires = all_requires + ['click', 'flake8', 'mypy', 'pytest>=3']\n+develop_requires = all_requires + [\n+ 'click',\n+ 'flake8',\n+ 'isort',\n+ 'mypy',\n+ 'pre-commit',\n+ 'pytest>=3',\n+]\n \n install_requires = [\n line.strip()\n@@ -73,7 +80,8 @@\n python_requires='>=3.5',\n extras_require={\n 'all': all_requires,\n- 'develop': develop_requires,\n+ 'develop:python_version > \"3.5\"': develop_requires + ['black'],\n+ 'develop:python_version == \"3.5\"': develop_requires,\n 'impala': impala_requires,\n 'kerberos': kerberos_requires,\n 'postgres': postgres_requires,\n", "issue": "PKG: Add pre-commit, black and isort to setup.py\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\nimport pathlib\nimport sys\n\nfrom setuptools import find_packages, setup\n\nimport versioneer\n\nLONG_DESCRIPTION = \"\"\"\nIbis is a productivity-centric Python big data framework.\n\nSee http://docs.ibis-project.org\n\"\"\"\n\nVERSION = sys.version_info.major, sys.version_info.minor\n\nimpala_requires = ['hdfs>=2.0.16', 'sqlalchemy', 'requests']\nif VERSION == (3, 5):\n impala_requires.append('impyla<0.14.2')\nelse:\n impala_requires.append('impyla>=0.15.0')\n\nsqlite_requires = ['sqlalchemy']\npostgres_requires = sqlite_requires + ['psycopg2']\nmysql_requires = sqlite_requires + ['pymysql']\n\nif VERSION == (3, 5):\n mapd_requires = ['pymapd>=0.8.3,<0.11.0']\nelse:\n mapd_requires = ['pymapd>=0.12.0']\nkerberos_requires = ['requests-kerberos']\nvisualization_requires = ['graphviz']\nclickhouse_requires = ['clickhouse-driver>=0.0.8', 'clickhouse-cityhash']\nbigquery_requires = ['google-cloud-bigquery>=1.0.0', 'pydata-google-auth']\nhdf5_requires = ['tables>=3.0.0']\n\nif VERSION == (3, 5):\n parquet_requires = ['pyarrow<0.12.0']\nelse:\n parquet_requires = ['pyarrow>=0.12.0']\n\nall_requires = (\n impala_requires\n + postgres_requires\n + mapd_requires\n + mysql_requires\n + kerberos_requires\n + visualization_requires\n + clickhouse_requires\n + bigquery_requires\n + hdf5_requires\n + parquet_requires\n)\n\ndevelop_requires = all_requires + ['click', 'flake8', 'mypy', 'pytest>=3']\n\ninstall_requires = [\n line.strip()\n for line in pathlib.Path(__file__)\n .parent.joinpath('requirements.txt')\n .read_text()\n .splitlines()\n]\n\nsetup(\n name='ibis-framework',\n url='https://github.com/ibis-project/ibis',\n packages=find_packages(),\n version=versioneer.get_version(),\n cmdclass=versioneer.get_cmdclass(),\n install_requires=install_requires,\n python_requires='>=3.5',\n extras_require={\n 'all': all_requires,\n 'develop': develop_requires,\n 'impala': impala_requires,\n 'kerberos': kerberos_requires,\n 'postgres': postgres_requires,\n 'mapd': mapd_requires,\n 'mysql': mysql_requires,\n 'sqlite': sqlite_requires,\n 'visualization': visualization_requires,\n 'clickhouse': clickhouse_requires,\n 'bigquery': bigquery_requires,\n 'hdf5': hdf5_requires,\n 'parquet': parquet_requires,\n },\n description=\"Productivity-centric Python Big Data Framework\",\n long_description=LONG_DESCRIPTION,\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Operating System :: OS Independent',\n 'Intended Audience :: Science/Research',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Topic :: Scientific/Engineering',\n ],\n license='Apache License, Version 2.0',\n maintainer=\"Phillip Cloud\",\n maintainer_email=\"[email protected]\",\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\nimport pathlib\nimport sys\n\nfrom setuptools import find_packages, setup\n\nimport versioneer\n\nLONG_DESCRIPTION = \"\"\"\nIbis is a productivity-centric Python big data framework.\n\nSee http://docs.ibis-project.org\n\"\"\"\n\nVERSION = sys.version_info.major, sys.version_info.minor\n\nimpala_requires = ['hdfs>=2.0.16', 'sqlalchemy', 'requests']\nif VERSION == (3, 5):\n impala_requires.append('impyla<0.14.2')\nelse:\n impala_requires.append('impyla>=0.15.0')\n\nsqlite_requires = ['sqlalchemy']\npostgres_requires = sqlite_requires + ['psycopg2']\nmysql_requires = sqlite_requires + ['pymysql']\n\nif VERSION == (3, 5):\n mapd_requires = ['pymapd>=0.8.3,<0.11.0']\nelse:\n mapd_requires = ['pymapd>=0.12.0']\nkerberos_requires = ['requests-kerberos']\nvisualization_requires = ['graphviz']\nclickhouse_requires = ['clickhouse-driver>=0.0.8', 'clickhouse-cityhash']\nbigquery_requires = ['google-cloud-bigquery>=1.0.0', 'pydata-google-auth']\nhdf5_requires = ['tables>=3.0.0']\n\nif VERSION == (3, 5):\n parquet_requires = ['pyarrow<0.12.0']\nelse:\n parquet_requires = ['pyarrow>=0.12.0']\n\nall_requires = (\n impala_requires\n + postgres_requires\n + mapd_requires\n + mysql_requires\n + kerberos_requires\n + visualization_requires\n + clickhouse_requires\n + bigquery_requires\n + hdf5_requires\n + parquet_requires\n)\n\ndevelop_requires = all_requires + [\n 'click',\n 'flake8',\n 'isort',\n 'mypy',\n 'pre-commit',\n 'pytest>=3',\n]\n\ninstall_requires = [\n line.strip()\n for line in pathlib.Path(__file__)\n .parent.joinpath('requirements.txt')\n .read_text()\n .splitlines()\n]\n\nsetup(\n name='ibis-framework',\n url='https://github.com/ibis-project/ibis',\n packages=find_packages(),\n version=versioneer.get_version(),\n cmdclass=versioneer.get_cmdclass(),\n install_requires=install_requires,\n python_requires='>=3.5',\n extras_require={\n 'all': all_requires,\n 'develop:python_version > \"3.5\"': develop_requires + ['black'],\n 'develop:python_version == \"3.5\"': develop_requires,\n 'impala': impala_requires,\n 'kerberos': kerberos_requires,\n 'postgres': postgres_requires,\n 'mapd': mapd_requires,\n 'mysql': mysql_requires,\n 'sqlite': sqlite_requires,\n 'visualization': visualization_requires,\n 'clickhouse': clickhouse_requires,\n 'bigquery': bigquery_requires,\n 'hdf5': hdf5_requires,\n 'parquet': parquet_requires,\n },\n description=\"Productivity-centric Python Big Data Framework\",\n long_description=LONG_DESCRIPTION,\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Operating System :: OS Independent',\n 'Intended Audience :: Science/Research',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Topic :: Scientific/Engineering',\n ],\n license='Apache License, Version 2.0',\n maintainer=\"Phillip Cloud\",\n maintainer_email=\"[email protected]\",\n)\n", "path": "setup.py"}]} | 1,220 | 220 |
gh_patches_debug_20919 | rasdani/github-patches | git_diff | scoutapp__scout_apm_python-298 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Report error requests on Pyramid
Currently if an error occurs in a Pyramid request, we don't report it. Error cases can be just as useful to see so we should try and do this. It looks like it's possible by rearranging the existing code.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/scout_apm/pyramid.py`
Content:
```
1 # coding=utf-8
2 from __future__ import absolute_import, division, print_function, unicode_literals
3
4 import scout_apm.core
5 from scout_apm.core.config import ScoutConfig
6 from scout_apm.core.tracked_request import TrackedRequest
7 from scout_apm.core.web_requests import (
8 create_filtered_path,
9 ignore_path,
10 track_amazon_request_queue_time,
11 track_request_queue_time,
12 )
13
14
15 def includeme(config):
16 configs = {}
17 pyramid_config = config.get_settings()
18 for name in pyramid_config:
19 if name.startswith("SCOUT_"):
20 value = pyramid_config[name]
21 clean_name = name.replace("SCOUT_", "").lower()
22 configs[clean_name] = value
23 ScoutConfig.set(**configs)
24
25 if scout_apm.core.install():
26 config.add_tween("scout_apm.pyramid.instruments")
27
28
29 def instruments(handler, registry):
30 def scout_tween(request):
31 tracked_request = TrackedRequest.instance()
32 span = tracked_request.start_span(operation="Controller/Pyramid")
33
34 try:
35 path = request.path
36 # mixed() returns values as *either* single items or lists
37 url_params = [
38 (k, v) for k, vs in request.GET.dict_of_lists().items() for v in vs
39 ]
40 tracked_request.tag("path", create_filtered_path(path, url_params))
41 if ignore_path(path):
42 tracked_request.tag("ignore_transaction", True)
43
44 try:
45 # Determine a remote IP to associate with the request. The value is
46 # spoofable by the requester so this is not suitable to use in any
47 # security sensitive context.
48 user_ip = (
49 request.headers.get("x-forwarded-for", default="").split(",")[0]
50 or request.headers.get("client-ip", default="").split(",")[0]
51 or request.remote_addr
52 )
53 except Exception:
54 pass
55 else:
56 tracked_request.tag("user_ip", user_ip)
57
58 tracked_queue_time = False
59 try:
60 queue_time = request.headers.get(
61 "x-queue-start", default=""
62 ) or request.headers.get("x-request-start", default="")
63 except Exception:
64 pass
65 else:
66 tracked_queue_time = track_request_queue_time(
67 queue_time, tracked_request
68 )
69 if not tracked_queue_time:
70 try:
71 amazon_queue_time = request.headers.get(
72 "x-amzn-trace-id", default=""
73 )
74 except Exception:
75 pass
76 else:
77 track_amazon_request_queue_time(amazon_queue_time, tracked_request)
78
79 try:
80 response = handler(request)
81 except Exception:
82 tracked_request.tag("error", "true")
83 raise
84
85 # This happens further down the call chain. So time it starting
86 # above, but only name it if it gets to here.
87 if request.matched_route is not None:
88 tracked_request.mark_real_request()
89 span.operation = "Controller/" + request.matched_route.name
90
91 finally:
92 tracked_request.stop_span()
93
94 return response
95
96 return scout_tween
97
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/scout_apm/pyramid.py b/src/scout_apm/pyramid.py
--- a/src/scout_apm/pyramid.py
+++ b/src/scout_apm/pyramid.py
@@ -77,17 +77,18 @@
track_amazon_request_queue_time(amazon_queue_time, tracked_request)
try:
- response = handler(request)
+ try:
+ response = handler(request)
+ finally:
+ # Routing further down the call chain. So time it starting
+ # above, but only name it if it gets a name
+ if request.matched_route is not None:
+ tracked_request.mark_real_request()
+ span.operation = "Controller/" + request.matched_route.name
except Exception:
tracked_request.tag("error", "true")
raise
- # This happens further down the call chain. So time it starting
- # above, but only name it if it gets to here.
- if request.matched_route is not None:
- tracked_request.mark_real_request()
- span.operation = "Controller/" + request.matched_route.name
-
finally:
tracked_request.stop_span()
| {"golden_diff": "diff --git a/src/scout_apm/pyramid.py b/src/scout_apm/pyramid.py\n--- a/src/scout_apm/pyramid.py\n+++ b/src/scout_apm/pyramid.py\n@@ -77,17 +77,18 @@\n track_amazon_request_queue_time(amazon_queue_time, tracked_request)\n \n try:\n- response = handler(request)\n+ try:\n+ response = handler(request)\n+ finally:\n+ # Routing further down the call chain. So time it starting\n+ # above, but only name it if it gets a name\n+ if request.matched_route is not None:\n+ tracked_request.mark_real_request()\n+ span.operation = \"Controller/\" + request.matched_route.name\n except Exception:\n tracked_request.tag(\"error\", \"true\")\n raise\n \n- # This happens further down the call chain. So time it starting\n- # above, but only name it if it gets to here.\n- if request.matched_route is not None:\n- tracked_request.mark_real_request()\n- span.operation = \"Controller/\" + request.matched_route.name\n-\n finally:\n tracked_request.stop_span()\n", "issue": "Report error requests on Pyramid\nCurrently if an error occurs in a Pyramid request, we don't report it. Error cases can be just as useful to see so we should try and do this. It looks like it's possible by rearranging the existing code.\n", "before_files": [{"content": "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport scout_apm.core\nfrom scout_apm.core.config import ScoutConfig\nfrom scout_apm.core.tracked_request import TrackedRequest\nfrom scout_apm.core.web_requests import (\n create_filtered_path,\n ignore_path,\n track_amazon_request_queue_time,\n track_request_queue_time,\n)\n\n\ndef includeme(config):\n configs = {}\n pyramid_config = config.get_settings()\n for name in pyramid_config:\n if name.startswith(\"SCOUT_\"):\n value = pyramid_config[name]\n clean_name = name.replace(\"SCOUT_\", \"\").lower()\n configs[clean_name] = value\n ScoutConfig.set(**configs)\n\n if scout_apm.core.install():\n config.add_tween(\"scout_apm.pyramid.instruments\")\n\n\ndef instruments(handler, registry):\n def scout_tween(request):\n tracked_request = TrackedRequest.instance()\n span = tracked_request.start_span(operation=\"Controller/Pyramid\")\n\n try:\n path = request.path\n # mixed() returns values as *either* single items or lists\n url_params = [\n (k, v) for k, vs in request.GET.dict_of_lists().items() for v in vs\n ]\n tracked_request.tag(\"path\", create_filtered_path(path, url_params))\n if ignore_path(path):\n tracked_request.tag(\"ignore_transaction\", True)\n\n try:\n # Determine a remote IP to associate with the request. The value is\n # spoofable by the requester so this is not suitable to use in any\n # security sensitive context.\n user_ip = (\n request.headers.get(\"x-forwarded-for\", default=\"\").split(\",\")[0]\n or request.headers.get(\"client-ip\", default=\"\").split(\",\")[0]\n or request.remote_addr\n )\n except Exception:\n pass\n else:\n tracked_request.tag(\"user_ip\", user_ip)\n\n tracked_queue_time = False\n try:\n queue_time = request.headers.get(\n \"x-queue-start\", default=\"\"\n ) or request.headers.get(\"x-request-start\", default=\"\")\n except Exception:\n pass\n else:\n tracked_queue_time = track_request_queue_time(\n queue_time, tracked_request\n )\n if not tracked_queue_time:\n try:\n amazon_queue_time = request.headers.get(\n \"x-amzn-trace-id\", default=\"\"\n )\n except Exception:\n pass\n else:\n track_amazon_request_queue_time(amazon_queue_time, tracked_request)\n\n try:\n response = handler(request)\n except Exception:\n tracked_request.tag(\"error\", \"true\")\n raise\n\n # This happens further down the call chain. So time it starting\n # above, but only name it if it gets to here.\n if request.matched_route is not None:\n tracked_request.mark_real_request()\n span.operation = \"Controller/\" + request.matched_route.name\n\n finally:\n tracked_request.stop_span()\n\n return response\n\n return scout_tween\n", "path": "src/scout_apm/pyramid.py"}], "after_files": [{"content": "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport scout_apm.core\nfrom scout_apm.core.config import ScoutConfig\nfrom scout_apm.core.tracked_request import TrackedRequest\nfrom scout_apm.core.web_requests import (\n create_filtered_path,\n ignore_path,\n track_amazon_request_queue_time,\n track_request_queue_time,\n)\n\n\ndef includeme(config):\n configs = {}\n pyramid_config = config.get_settings()\n for name in pyramid_config:\n if name.startswith(\"SCOUT_\"):\n value = pyramid_config[name]\n clean_name = name.replace(\"SCOUT_\", \"\").lower()\n configs[clean_name] = value\n ScoutConfig.set(**configs)\n\n if scout_apm.core.install():\n config.add_tween(\"scout_apm.pyramid.instruments\")\n\n\ndef instruments(handler, registry):\n def scout_tween(request):\n tracked_request = TrackedRequest.instance()\n span = tracked_request.start_span(operation=\"Controller/Pyramid\")\n\n try:\n path = request.path\n # mixed() returns values as *either* single items or lists\n url_params = [\n (k, v) for k, vs in request.GET.dict_of_lists().items() for v in vs\n ]\n tracked_request.tag(\"path\", create_filtered_path(path, url_params))\n if ignore_path(path):\n tracked_request.tag(\"ignore_transaction\", True)\n\n try:\n # Determine a remote IP to associate with the request. The value is\n # spoofable by the requester so this is not suitable to use in any\n # security sensitive context.\n user_ip = (\n request.headers.get(\"x-forwarded-for\", default=\"\").split(\",\")[0]\n or request.headers.get(\"client-ip\", default=\"\").split(\",\")[0]\n or request.remote_addr\n )\n except Exception:\n pass\n else:\n tracked_request.tag(\"user_ip\", user_ip)\n\n tracked_queue_time = False\n try:\n queue_time = request.headers.get(\n \"x-queue-start\", default=\"\"\n ) or request.headers.get(\"x-request-start\", default=\"\")\n except Exception:\n pass\n else:\n tracked_queue_time = track_request_queue_time(\n queue_time, tracked_request\n )\n if not tracked_queue_time:\n try:\n amazon_queue_time = request.headers.get(\n \"x-amzn-trace-id\", default=\"\"\n )\n except Exception:\n pass\n else:\n track_amazon_request_queue_time(amazon_queue_time, tracked_request)\n\n try:\n try:\n response = handler(request)\n finally:\n # Routing further down the call chain. So time it starting\n # above, but only name it if it gets a name\n if request.matched_route is not None:\n tracked_request.mark_real_request()\n span.operation = \"Controller/\" + request.matched_route.name\n except Exception:\n tracked_request.tag(\"error\", \"true\")\n raise\n\n finally:\n tracked_request.stop_span()\n\n return response\n\n return scout_tween\n", "path": "src/scout_apm/pyramid.py"}]} | 1,153 | 255 |
gh_patches_debug_7153 | rasdani/github-patches | git_diff | ipython__ipython-7855 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Contents Manager does not get passed config ?
I can't seem to have my custom contents manager receive a config.
In sessionmanager we seem to have `contents_manager = Instance('IPython.html.services.contents.manager.ContentsManager', args=())` but nowhere is it passed parent or config. Is that normal ?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `IPython/html/services/sessions/sessionmanager.py`
Content:
```
1 """A base class session manager."""
2
3 # Copyright (c) IPython Development Team.
4 # Distributed under the terms of the Modified BSD License.
5
6 import uuid
7 import sqlite3
8
9 from tornado import web
10
11 from IPython.config.configurable import LoggingConfigurable
12 from IPython.utils.py3compat import unicode_type
13 from IPython.utils.traitlets import Instance
14
15
16 class SessionManager(LoggingConfigurable):
17
18 kernel_manager = Instance('IPython.html.services.kernels.kernelmanager.MappingKernelManager')
19 contents_manager = Instance('IPython.html.services.contents.manager.ContentsManager', args=())
20
21 # Session database initialized below
22 _cursor = None
23 _connection = None
24 _columns = {'session_id', 'path', 'kernel_id'}
25
26 @property
27 def cursor(self):
28 """Start a cursor and create a database called 'session'"""
29 if self._cursor is None:
30 self._cursor = self.connection.cursor()
31 self._cursor.execute("""CREATE TABLE session
32 (session_id, path, kernel_id)""")
33 return self._cursor
34
35 @property
36 def connection(self):
37 """Start a database connection"""
38 if self._connection is None:
39 self._connection = sqlite3.connect(':memory:')
40 self._connection.row_factory = sqlite3.Row
41 return self._connection
42
43 def __del__(self):
44 """Close connection once SessionManager closes"""
45 self.cursor.close()
46
47 def session_exists(self, path):
48 """Check to see if the session for a given notebook exists"""
49 self.cursor.execute("SELECT * FROM session WHERE path=?", (path,))
50 reply = self.cursor.fetchone()
51 if reply is None:
52 return False
53 else:
54 return True
55
56 def new_session_id(self):
57 "Create a uuid for a new session"
58 return unicode_type(uuid.uuid4())
59
60 def create_session(self, path=None, kernel_name=None):
61 """Creates a session and returns its model"""
62 session_id = self.new_session_id()
63 # allow nbm to specify kernels cwd
64 kernel_path = self.contents_manager.get_kernel_path(path=path)
65 kernel_id = self.kernel_manager.start_kernel(path=kernel_path,
66 kernel_name=kernel_name)
67 return self.save_session(session_id, path=path,
68 kernel_id=kernel_id)
69
70 def save_session(self, session_id, path=None, kernel_id=None):
71 """Saves the items for the session with the given session_id
72
73 Given a session_id (and any other of the arguments), this method
74 creates a row in the sqlite session database that holds the information
75 for a session.
76
77 Parameters
78 ----------
79 session_id : str
80 uuid for the session; this method must be given a session_id
81 path : str
82 the path for the given notebook
83 kernel_id : str
84 a uuid for the kernel associated with this session
85
86 Returns
87 -------
88 model : dict
89 a dictionary of the session model
90 """
91 self.cursor.execute("INSERT INTO session VALUES (?,?,?)",
92 (session_id, path, kernel_id)
93 )
94 return self.get_session(session_id=session_id)
95
96 def get_session(self, **kwargs):
97 """Returns the model for a particular session.
98
99 Takes a keyword argument and searches for the value in the session
100 database, then returns the rest of the session's info.
101
102 Parameters
103 ----------
104 **kwargs : keyword argument
105 must be given one of the keywords and values from the session database
106 (i.e. session_id, path, kernel_id)
107
108 Returns
109 -------
110 model : dict
111 returns a dictionary that includes all the information from the
112 session described by the kwarg.
113 """
114 if not kwargs:
115 raise TypeError("must specify a column to query")
116
117 conditions = []
118 for column in kwargs.keys():
119 if column not in self._columns:
120 raise TypeError("No such column: %r", column)
121 conditions.append("%s=?" % column)
122
123 query = "SELECT * FROM session WHERE %s" % (' AND '.join(conditions))
124
125 self.cursor.execute(query, list(kwargs.values()))
126 try:
127 row = self.cursor.fetchone()
128 except KeyError:
129 # The kernel is missing, so the session just got deleted.
130 row = None
131
132 if row is None:
133 q = []
134 for key, value in kwargs.items():
135 q.append("%s=%r" % (key, value))
136
137 raise web.HTTPError(404, u'Session not found: %s' % (', '.join(q)))
138
139 return self.row_to_model(row)
140
141 def update_session(self, session_id, **kwargs):
142 """Updates the values in the session database.
143
144 Changes the values of the session with the given session_id
145 with the values from the keyword arguments.
146
147 Parameters
148 ----------
149 session_id : str
150 a uuid that identifies a session in the sqlite3 database
151 **kwargs : str
152 the key must correspond to a column title in session database,
153 and the value replaces the current value in the session
154 with session_id.
155 """
156 self.get_session(session_id=session_id)
157
158 if not kwargs:
159 # no changes
160 return
161
162 sets = []
163 for column in kwargs.keys():
164 if column not in self._columns:
165 raise TypeError("No such column: %r" % column)
166 sets.append("%s=?" % column)
167 query = "UPDATE session SET %s WHERE session_id=?" % (', '.join(sets))
168 self.cursor.execute(query, list(kwargs.values()) + [session_id])
169
170 def row_to_model(self, row):
171 """Takes sqlite database session row and turns it into a dictionary"""
172 if row['kernel_id'] not in self.kernel_manager:
173 # The kernel was killed or died without deleting the session.
174 # We can't use delete_session here because that tries to find
175 # and shut down the kernel.
176 self.cursor.execute("DELETE FROM session WHERE session_id=?",
177 (row['session_id'],))
178 raise KeyError
179
180 model = {
181 'id': row['session_id'],
182 'notebook': {
183 'path': row['path']
184 },
185 'kernel': self.kernel_manager.kernel_model(row['kernel_id'])
186 }
187 return model
188
189 def list_sessions(self):
190 """Returns a list of dictionaries containing all the information from
191 the session database"""
192 c = self.cursor.execute("SELECT * FROM session")
193 result = []
194 # We need to use fetchall() here, because row_to_model can delete rows,
195 # which messes up the cursor if we're iterating over rows.
196 for row in c.fetchall():
197 try:
198 result.append(self.row_to_model(row))
199 except KeyError:
200 pass
201 return result
202
203 def delete_session(self, session_id):
204 """Deletes the row in the session database with given session_id"""
205 # Check that session exists before deleting
206 session = self.get_session(session_id=session_id)
207 self.kernel_manager.shutdown_kernel(session['kernel']['id'])
208 self.cursor.execute("DELETE FROM session WHERE session_id=?", (session_id,))
209
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/IPython/html/services/sessions/sessionmanager.py b/IPython/html/services/sessions/sessionmanager.py
--- a/IPython/html/services/sessions/sessionmanager.py
+++ b/IPython/html/services/sessions/sessionmanager.py
@@ -16,7 +16,7 @@
class SessionManager(LoggingConfigurable):
kernel_manager = Instance('IPython.html.services.kernels.kernelmanager.MappingKernelManager')
- contents_manager = Instance('IPython.html.services.contents.manager.ContentsManager', args=())
+ contents_manager = Instance('IPython.html.services.contents.manager.ContentsManager')
# Session database initialized below
_cursor = None
| {"golden_diff": "diff --git a/IPython/html/services/sessions/sessionmanager.py b/IPython/html/services/sessions/sessionmanager.py\n--- a/IPython/html/services/sessions/sessionmanager.py\n+++ b/IPython/html/services/sessions/sessionmanager.py\n@@ -16,7 +16,7 @@\n class SessionManager(LoggingConfigurable):\n \n kernel_manager = Instance('IPython.html.services.kernels.kernelmanager.MappingKernelManager')\n- contents_manager = Instance('IPython.html.services.contents.manager.ContentsManager', args=())\n+ contents_manager = Instance('IPython.html.services.contents.manager.ContentsManager')\n \n # Session database initialized below\n _cursor = None\n", "issue": "Contents Manager does not get passed config ?\nI can't seem to have my custom contents manager receive a config.\n\nIn sessionmanager we seem to have `contents_manager = Instance('IPython.html.services.contents.manager.ContentsManager', args=())` but nowhere is it passed parent or config. Is that normal ?\n\n", "before_files": [{"content": "\"\"\"A base class session manager.\"\"\"\n\n# Copyright (c) IPython Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nimport uuid\nimport sqlite3\n\nfrom tornado import web\n\nfrom IPython.config.configurable import LoggingConfigurable\nfrom IPython.utils.py3compat import unicode_type\nfrom IPython.utils.traitlets import Instance\n\n\nclass SessionManager(LoggingConfigurable):\n\n kernel_manager = Instance('IPython.html.services.kernels.kernelmanager.MappingKernelManager')\n contents_manager = Instance('IPython.html.services.contents.manager.ContentsManager', args=())\n \n # Session database initialized below\n _cursor = None\n _connection = None\n _columns = {'session_id', 'path', 'kernel_id'}\n \n @property\n def cursor(self):\n \"\"\"Start a cursor and create a database called 'session'\"\"\"\n if self._cursor is None:\n self._cursor = self.connection.cursor()\n self._cursor.execute(\"\"\"CREATE TABLE session \n (session_id, path, kernel_id)\"\"\")\n return self._cursor\n\n @property\n def connection(self):\n \"\"\"Start a database connection\"\"\"\n if self._connection is None:\n self._connection = sqlite3.connect(':memory:')\n self._connection.row_factory = sqlite3.Row\n return self._connection\n \n def __del__(self):\n \"\"\"Close connection once SessionManager closes\"\"\"\n self.cursor.close()\n\n def session_exists(self, path):\n \"\"\"Check to see if the session for a given notebook exists\"\"\"\n self.cursor.execute(\"SELECT * FROM session WHERE path=?\", (path,))\n reply = self.cursor.fetchone()\n if reply is None:\n return False\n else:\n return True\n\n def new_session_id(self):\n \"Create a uuid for a new session\"\n return unicode_type(uuid.uuid4())\n\n def create_session(self, path=None, kernel_name=None):\n \"\"\"Creates a session and returns its model\"\"\"\n session_id = self.new_session_id()\n # allow nbm to specify kernels cwd\n kernel_path = self.contents_manager.get_kernel_path(path=path)\n kernel_id = self.kernel_manager.start_kernel(path=kernel_path,\n kernel_name=kernel_name)\n return self.save_session(session_id, path=path,\n kernel_id=kernel_id)\n\n def save_session(self, session_id, path=None, kernel_id=None):\n \"\"\"Saves the items for the session with the given session_id\n \n Given a session_id (and any other of the arguments), this method\n creates a row in the sqlite session database that holds the information\n for a session.\n \n Parameters\n ----------\n session_id : str\n uuid for the session; this method must be given a session_id\n path : str\n the path for the given notebook\n kernel_id : str\n a uuid for the kernel associated with this session\n \n Returns\n -------\n model : dict\n a dictionary of the session model\n \"\"\"\n self.cursor.execute(\"INSERT INTO session VALUES (?,?,?)\",\n (session_id, path, kernel_id)\n )\n return self.get_session(session_id=session_id)\n\n def get_session(self, **kwargs):\n \"\"\"Returns the model for a particular session.\n \n Takes a keyword argument and searches for the value in the session\n database, then returns the rest of the session's info.\n\n Parameters\n ----------\n **kwargs : keyword argument\n must be given one of the keywords and values from the session database\n (i.e. session_id, path, kernel_id)\n\n Returns\n -------\n model : dict\n returns a dictionary that includes all the information from the \n session described by the kwarg.\n \"\"\"\n if not kwargs:\n raise TypeError(\"must specify a column to query\")\n\n conditions = []\n for column in kwargs.keys():\n if column not in self._columns:\n raise TypeError(\"No such column: %r\", column)\n conditions.append(\"%s=?\" % column)\n\n query = \"SELECT * FROM session WHERE %s\" % (' AND '.join(conditions))\n\n self.cursor.execute(query, list(kwargs.values()))\n try:\n row = self.cursor.fetchone()\n except KeyError:\n # The kernel is missing, so the session just got deleted.\n row = None\n\n if row is None:\n q = []\n for key, value in kwargs.items():\n q.append(\"%s=%r\" % (key, value))\n\n raise web.HTTPError(404, u'Session not found: %s' % (', '.join(q)))\n\n return self.row_to_model(row)\n\n def update_session(self, session_id, **kwargs):\n \"\"\"Updates the values in the session database.\n \n Changes the values of the session with the given session_id\n with the values from the keyword arguments. \n \n Parameters\n ----------\n session_id : str\n a uuid that identifies a session in the sqlite3 database\n **kwargs : str\n the key must correspond to a column title in session database,\n and the value replaces the current value in the session \n with session_id.\n \"\"\"\n self.get_session(session_id=session_id)\n\n if not kwargs:\n # no changes\n return\n\n sets = []\n for column in kwargs.keys():\n if column not in self._columns:\n raise TypeError(\"No such column: %r\" % column)\n sets.append(\"%s=?\" % column)\n query = \"UPDATE session SET %s WHERE session_id=?\" % (', '.join(sets))\n self.cursor.execute(query, list(kwargs.values()) + [session_id])\n\n def row_to_model(self, row):\n \"\"\"Takes sqlite database session row and turns it into a dictionary\"\"\"\n if row['kernel_id'] not in self.kernel_manager:\n # The kernel was killed or died without deleting the session.\n # We can't use delete_session here because that tries to find\n # and shut down the kernel.\n self.cursor.execute(\"DELETE FROM session WHERE session_id=?\", \n (row['session_id'],))\n raise KeyError\n\n model = {\n 'id': row['session_id'],\n 'notebook': {\n 'path': row['path']\n },\n 'kernel': self.kernel_manager.kernel_model(row['kernel_id'])\n }\n return model\n\n def list_sessions(self):\n \"\"\"Returns a list of dictionaries containing all the information from\n the session database\"\"\"\n c = self.cursor.execute(\"SELECT * FROM session\")\n result = []\n # We need to use fetchall() here, because row_to_model can delete rows,\n # which messes up the cursor if we're iterating over rows.\n for row in c.fetchall():\n try:\n result.append(self.row_to_model(row))\n except KeyError:\n pass\n return result\n\n def delete_session(self, session_id):\n \"\"\"Deletes the row in the session database with given session_id\"\"\"\n # Check that session exists before deleting\n session = self.get_session(session_id=session_id)\n self.kernel_manager.shutdown_kernel(session['kernel']['id'])\n self.cursor.execute(\"DELETE FROM session WHERE session_id=?\", (session_id,))\n", "path": "IPython/html/services/sessions/sessionmanager.py"}], "after_files": [{"content": "\"\"\"A base class session manager.\"\"\"\n\n# Copyright (c) IPython Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nimport uuid\nimport sqlite3\n\nfrom tornado import web\n\nfrom IPython.config.configurable import LoggingConfigurable\nfrom IPython.utils.py3compat import unicode_type\nfrom IPython.utils.traitlets import Instance\n\n\nclass SessionManager(LoggingConfigurable):\n\n kernel_manager = Instance('IPython.html.services.kernels.kernelmanager.MappingKernelManager')\n contents_manager = Instance('IPython.html.services.contents.manager.ContentsManager')\n \n # Session database initialized below\n _cursor = None\n _connection = None\n _columns = {'session_id', 'path', 'kernel_id'}\n \n @property\n def cursor(self):\n \"\"\"Start a cursor and create a database called 'session'\"\"\"\n if self._cursor is None:\n self._cursor = self.connection.cursor()\n self._cursor.execute(\"\"\"CREATE TABLE session \n (session_id, path, kernel_id)\"\"\")\n return self._cursor\n\n @property\n def connection(self):\n \"\"\"Start a database connection\"\"\"\n if self._connection is None:\n self._connection = sqlite3.connect(':memory:')\n self._connection.row_factory = sqlite3.Row\n return self._connection\n \n def __del__(self):\n \"\"\"Close connection once SessionManager closes\"\"\"\n self.cursor.close()\n\n def session_exists(self, path):\n \"\"\"Check to see if the session for a given notebook exists\"\"\"\n self.cursor.execute(\"SELECT * FROM session WHERE path=?\", (path,))\n reply = self.cursor.fetchone()\n if reply is None:\n return False\n else:\n return True\n\n def new_session_id(self):\n \"Create a uuid for a new session\"\n return unicode_type(uuid.uuid4())\n\n def create_session(self, path=None, kernel_name=None):\n \"\"\"Creates a session and returns its model\"\"\"\n session_id = self.new_session_id()\n # allow nbm to specify kernels cwd\n kernel_path = self.contents_manager.get_kernel_path(path=path)\n kernel_id = self.kernel_manager.start_kernel(path=kernel_path,\n kernel_name=kernel_name)\n return self.save_session(session_id, path=path,\n kernel_id=kernel_id)\n\n def save_session(self, session_id, path=None, kernel_id=None):\n \"\"\"Saves the items for the session with the given session_id\n \n Given a session_id (and any other of the arguments), this method\n creates a row in the sqlite session database that holds the information\n for a session.\n \n Parameters\n ----------\n session_id : str\n uuid for the session; this method must be given a session_id\n path : str\n the path for the given notebook\n kernel_id : str\n a uuid for the kernel associated with this session\n \n Returns\n -------\n model : dict\n a dictionary of the session model\n \"\"\"\n self.cursor.execute(\"INSERT INTO session VALUES (?,?,?)\",\n (session_id, path, kernel_id)\n )\n return self.get_session(session_id=session_id)\n\n def get_session(self, **kwargs):\n \"\"\"Returns the model for a particular session.\n \n Takes a keyword argument and searches for the value in the session\n database, then returns the rest of the session's info.\n\n Parameters\n ----------\n **kwargs : keyword argument\n must be given one of the keywords and values from the session database\n (i.e. session_id, path, kernel_id)\n\n Returns\n -------\n model : dict\n returns a dictionary that includes all the information from the \n session described by the kwarg.\n \"\"\"\n if not kwargs:\n raise TypeError(\"must specify a column to query\")\n\n conditions = []\n for column in kwargs.keys():\n if column not in self._columns:\n raise TypeError(\"No such column: %r\", column)\n conditions.append(\"%s=?\" % column)\n\n query = \"SELECT * FROM session WHERE %s\" % (' AND '.join(conditions))\n\n self.cursor.execute(query, list(kwargs.values()))\n try:\n row = self.cursor.fetchone()\n except KeyError:\n # The kernel is missing, so the session just got deleted.\n row = None\n\n if row is None:\n q = []\n for key, value in kwargs.items():\n q.append(\"%s=%r\" % (key, value))\n\n raise web.HTTPError(404, u'Session not found: %s' % (', '.join(q)))\n\n return self.row_to_model(row)\n\n def update_session(self, session_id, **kwargs):\n \"\"\"Updates the values in the session database.\n \n Changes the values of the session with the given session_id\n with the values from the keyword arguments. \n \n Parameters\n ----------\n session_id : str\n a uuid that identifies a session in the sqlite3 database\n **kwargs : str\n the key must correspond to a column title in session database,\n and the value replaces the current value in the session \n with session_id.\n \"\"\"\n self.get_session(session_id=session_id)\n\n if not kwargs:\n # no changes\n return\n\n sets = []\n for column in kwargs.keys():\n if column not in self._columns:\n raise TypeError(\"No such column: %r\" % column)\n sets.append(\"%s=?\" % column)\n query = \"UPDATE session SET %s WHERE session_id=?\" % (', '.join(sets))\n self.cursor.execute(query, list(kwargs.values()) + [session_id])\n\n def row_to_model(self, row):\n \"\"\"Takes sqlite database session row and turns it into a dictionary\"\"\"\n if row['kernel_id'] not in self.kernel_manager:\n # The kernel was killed or died without deleting the session.\n # We can't use delete_session here because that tries to find\n # and shut down the kernel.\n self.cursor.execute(\"DELETE FROM session WHERE session_id=?\", \n (row['session_id'],))\n raise KeyError\n\n model = {\n 'id': row['session_id'],\n 'notebook': {\n 'path': row['path']\n },\n 'kernel': self.kernel_manager.kernel_model(row['kernel_id'])\n }\n return model\n\n def list_sessions(self):\n \"\"\"Returns a list of dictionaries containing all the information from\n the session database\"\"\"\n c = self.cursor.execute(\"SELECT * FROM session\")\n result = []\n # We need to use fetchall() here, because row_to_model can delete rows,\n # which messes up the cursor if we're iterating over rows.\n for row in c.fetchall():\n try:\n result.append(self.row_to_model(row))\n except KeyError:\n pass\n return result\n\n def delete_session(self, session_id):\n \"\"\"Deletes the row in the session database with given session_id\"\"\"\n # Check that session exists before deleting\n session = self.get_session(session_id=session_id)\n self.kernel_manager.shutdown_kernel(session['kernel']['id'])\n self.cursor.execute(\"DELETE FROM session WHERE session_id=?\", (session_id,))\n", "path": "IPython/html/services/sessions/sessionmanager.py"}]} | 2,370 | 138 |
gh_patches_debug_9995 | rasdani/github-patches | git_diff | scikit-image__scikit-image-1052 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Error revealed by numpy 1.9.0r1
```
======================================================================
ERROR: test_join.test_relabel_sequential_offset1
----------------------------------------------------------------------
Traceback (most recent call last):
File "X:\Python27-x64\lib\site-packages\nose\case.py", line 197, in runTest
self.test(*self.arg)
File "X:\Python27-x64\lib\site-packages\skimage\segmentation\tests\test_join.py", line 30, in test_relabel_sequential_offset1
ar_relab, fw, inv = relabel_sequential(ar)
File "X:\Python27-x64\lib\site-packages\skimage\segmentation\_join.py", line 127, in relabel_sequential
forward_map[labels0] = np.arange(offset, offset + len(labels0) + 1)
ValueError: shape mismatch: value array of shape (6,) could not be broadcast to indexing result of shape (5,)
```
The `+ 1` is incorrect.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `skimage/segmentation/_join.py`
Content:
```
1 import numpy as np
2 from skimage._shared.utils import deprecated
3
4
5 def join_segmentations(s1, s2):
6 """Return the join of the two input segmentations.
7
8 The join J of S1 and S2 is defined as the segmentation in which two
9 voxels are in the same segment if and only if they are in the same
10 segment in *both* S1 and S2.
11
12 Parameters
13 ----------
14 s1, s2 : numpy arrays
15 s1 and s2 are label fields of the same shape.
16
17 Returns
18 -------
19 j : numpy array
20 The join segmentation of s1 and s2.
21
22 Examples
23 --------
24 >>> from skimage.segmentation import join_segmentations
25 >>> s1 = np.array([[0, 0, 1, 1],
26 ... [0, 2, 1, 1],
27 ... [2, 2, 2, 1]])
28 >>> s2 = np.array([[0, 1, 1, 0],
29 ... [0, 1, 1, 0],
30 ... [0, 1, 1, 1]])
31 >>> join_segmentations(s1, s2)
32 array([[0, 1, 3, 2],
33 [0, 5, 3, 2],
34 [4, 5, 5, 3]])
35 """
36 if s1.shape != s2.shape:
37 raise ValueError("Cannot join segmentations of different shape. " +
38 "s1.shape: %s, s2.shape: %s" % (s1.shape, s2.shape))
39 s1 = relabel_sequential(s1)[0]
40 s2 = relabel_sequential(s2)[0]
41 j = (s2.max() + 1) * s1 + s2
42 j = relabel_sequential(j)[0]
43 return j
44
45
46 @deprecated('relabel_sequential')
47 def relabel_from_one(label_field):
48 """Convert labels in an arbitrary label field to {1, ... number_of_labels}.
49
50 This function is deprecated, see ``relabel_sequential`` for more.
51 """
52 return relabel_sequential(label_field, offset=1)
53
54
55 def relabel_sequential(label_field, offset=1):
56 """Relabel arbitrary labels to {`offset`, ... `offset` + number_of_labels}.
57
58 This function also returns the forward map (mapping the original labels to
59 the reduced labels) and the inverse map (mapping the reduced labels back
60 to the original ones).
61
62 Parameters
63 ----------
64 label_field : numpy array of int, arbitrary shape
65 An array of labels.
66 offset : int, optional
67 The return labels will start at `offset`, which should be
68 strictly positive.
69
70 Returns
71 -------
72 relabeled : numpy array of int, same shape as `label_field`
73 The input label field with labels mapped to
74 {offset, ..., number_of_labels + offset - 1}.
75 forward_map : numpy array of int, shape ``(label_field.max() + 1,)``
76 The map from the original label space to the returned label
77 space. Can be used to re-apply the same mapping. See examples
78 for usage.
79 inverse_map : 1D numpy array of int, of length offset + number of labels
80 The map from the new label space to the original space. This
81 can be used to reconstruct the original label field from the
82 relabeled one.
83
84 Notes
85 -----
86 The label 0 is assumed to denote the background and is never remapped.
87
88 The forward map can be extremely big for some inputs, since its
89 length is given by the maximum of the label field. However, in most
90 situations, ``label_field.max()`` is much smaller than
91 ``label_field.size``, and in these cases the forward map is
92 guaranteed to be smaller than either the input or output images.
93
94 Examples
95 --------
96 >>> from skimage.segmentation import relabel_sequential
97 >>> label_field = np.array([1, 1, 5, 5, 8, 99, 42])
98 >>> relab, fw, inv = relabel_sequential(label_field)
99 >>> relab
100 array([1, 1, 2, 2, 3, 5, 4])
101 >>> fw
102 array([0, 1, 0, 0, 0, 2, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
103 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0,
104 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
106 0, 0, 0, 0, 0, 0, 0, 5])
107 >>> inv
108 array([ 0, 1, 5, 8, 42, 99])
109 >>> (fw[label_field] == relab).all()
110 True
111 >>> (inv[relab] == label_field).all()
112 True
113 >>> relab, fw, inv = relabel_sequential(label_field, offset=5)
114 >>> relab
115 array([5, 5, 6, 6, 7, 9, 8])
116 """
117 m = label_field.max()
118 if not np.issubdtype(label_field.dtype, np.int):
119 new_type = np.min_scalar_type(int(m))
120 label_field = label_field.astype(new_type)
121 m = m.astype(new_type) # Ensures m is an integer
122 labels = np.unique(label_field)
123 labels0 = labels[labels != 0]
124 if m == len(labels0): # nothing to do, already 1...n labels
125 return label_field, labels, labels
126 forward_map = np.zeros(m + 1, int)
127 forward_map[labels0] = np.arange(offset, offset + len(labels0) + 1)
128 if not (labels == 0).any():
129 labels = np.concatenate(([0], labels))
130 inverse_map = np.zeros(offset - 1 + len(labels), dtype=np.intp)
131 inverse_map[(offset - 1):] = labels
132 relabeled = forward_map[label_field]
133 return relabeled, forward_map, inverse_map
134
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/skimage/segmentation/_join.py b/skimage/segmentation/_join.py
--- a/skimage/segmentation/_join.py
+++ b/skimage/segmentation/_join.py
@@ -124,7 +124,7 @@
if m == len(labels0): # nothing to do, already 1...n labels
return label_field, labels, labels
forward_map = np.zeros(m + 1, int)
- forward_map[labels0] = np.arange(offset, offset + len(labels0) + 1)
+ forward_map[labels0] = np.arange(offset, offset + len(labels0))
if not (labels == 0).any():
labels = np.concatenate(([0], labels))
inverse_map = np.zeros(offset - 1 + len(labels), dtype=np.intp)
| {"golden_diff": "diff --git a/skimage/segmentation/_join.py b/skimage/segmentation/_join.py\n--- a/skimage/segmentation/_join.py\n+++ b/skimage/segmentation/_join.py\n@@ -124,7 +124,7 @@\n if m == len(labels0): # nothing to do, already 1...n labels\n return label_field, labels, labels\n forward_map = np.zeros(m + 1, int)\n- forward_map[labels0] = np.arange(offset, offset + len(labels0) + 1)\n+ forward_map[labels0] = np.arange(offset, offset + len(labels0))\n if not (labels == 0).any():\n labels = np.concatenate(([0], labels))\n inverse_map = np.zeros(offset - 1 + len(labels), dtype=np.intp)\n", "issue": "Error revealed by numpy 1.9.0r1\n```\n======================================================================\nERROR: test_join.test_relabel_sequential_offset1\n----------------------------------------------------------------------\nTraceback (most recent call last):\n File \"X:\\Python27-x64\\lib\\site-packages\\nose\\case.py\", line 197, in runTest\n self.test(*self.arg)\n File \"X:\\Python27-x64\\lib\\site-packages\\skimage\\segmentation\\tests\\test_join.py\", line 30, in test_relabel_sequential_offset1\n ar_relab, fw, inv = relabel_sequential(ar)\n File \"X:\\Python27-x64\\lib\\site-packages\\skimage\\segmentation\\_join.py\", line 127, in relabel_sequential\n forward_map[labels0] = np.arange(offset, offset + len(labels0) + 1)\nValueError: shape mismatch: value array of shape (6,) could not be broadcast to indexing result of shape (5,)\n```\n\nThe `+ 1` is incorrect.\n\n", "before_files": [{"content": "import numpy as np\nfrom skimage._shared.utils import deprecated\n\n\ndef join_segmentations(s1, s2):\n \"\"\"Return the join of the two input segmentations.\n\n The join J of S1 and S2 is defined as the segmentation in which two\n voxels are in the same segment if and only if they are in the same\n segment in *both* S1 and S2.\n\n Parameters\n ----------\n s1, s2 : numpy arrays\n s1 and s2 are label fields of the same shape.\n\n Returns\n -------\n j : numpy array\n The join segmentation of s1 and s2.\n\n Examples\n --------\n >>> from skimage.segmentation import join_segmentations\n >>> s1 = np.array([[0, 0, 1, 1],\n ... [0, 2, 1, 1],\n ... [2, 2, 2, 1]])\n >>> s2 = np.array([[0, 1, 1, 0],\n ... [0, 1, 1, 0],\n ... [0, 1, 1, 1]])\n >>> join_segmentations(s1, s2)\n array([[0, 1, 3, 2],\n [0, 5, 3, 2],\n [4, 5, 5, 3]])\n \"\"\"\n if s1.shape != s2.shape:\n raise ValueError(\"Cannot join segmentations of different shape. \" +\n \"s1.shape: %s, s2.shape: %s\" % (s1.shape, s2.shape))\n s1 = relabel_sequential(s1)[0]\n s2 = relabel_sequential(s2)[0]\n j = (s2.max() + 1) * s1 + s2\n j = relabel_sequential(j)[0]\n return j\n\n\n@deprecated('relabel_sequential')\ndef relabel_from_one(label_field):\n \"\"\"Convert labels in an arbitrary label field to {1, ... number_of_labels}.\n\n This function is deprecated, see ``relabel_sequential`` for more.\n \"\"\"\n return relabel_sequential(label_field, offset=1)\n\n\ndef relabel_sequential(label_field, offset=1):\n \"\"\"Relabel arbitrary labels to {`offset`, ... `offset` + number_of_labels}.\n\n This function also returns the forward map (mapping the original labels to\n the reduced labels) and the inverse map (mapping the reduced labels back\n to the original ones).\n\n Parameters\n ----------\n label_field : numpy array of int, arbitrary shape\n An array of labels.\n offset : int, optional\n The return labels will start at `offset`, which should be\n strictly positive.\n\n Returns\n -------\n relabeled : numpy array of int, same shape as `label_field`\n The input label field with labels mapped to\n {offset, ..., number_of_labels + offset - 1}.\n forward_map : numpy array of int, shape ``(label_field.max() + 1,)``\n The map from the original label space to the returned label\n space. Can be used to re-apply the same mapping. See examples\n for usage.\n inverse_map : 1D numpy array of int, of length offset + number of labels\n The map from the new label space to the original space. This\n can be used to reconstruct the original label field from the\n relabeled one.\n\n Notes\n -----\n The label 0 is assumed to denote the background and is never remapped.\n\n The forward map can be extremely big for some inputs, since its\n length is given by the maximum of the label field. However, in most\n situations, ``label_field.max()`` is much smaller than\n ``label_field.size``, and in these cases the forward map is\n guaranteed to be smaller than either the input or output images.\n\n Examples\n --------\n >>> from skimage.segmentation import relabel_sequential\n >>> label_field = np.array([1, 1, 5, 5, 8, 99, 42])\n >>> relab, fw, inv = relabel_sequential(label_field)\n >>> relab\n array([1, 1, 2, 2, 3, 5, 4])\n >>> fw\n array([0, 1, 0, 0, 0, 2, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 5])\n >>> inv\n array([ 0, 1, 5, 8, 42, 99])\n >>> (fw[label_field] == relab).all()\n True\n >>> (inv[relab] == label_field).all()\n True\n >>> relab, fw, inv = relabel_sequential(label_field, offset=5)\n >>> relab\n array([5, 5, 6, 6, 7, 9, 8])\n \"\"\"\n m = label_field.max()\n if not np.issubdtype(label_field.dtype, np.int):\n new_type = np.min_scalar_type(int(m))\n label_field = label_field.astype(new_type)\n m = m.astype(new_type) # Ensures m is an integer\n labels = np.unique(label_field)\n labels0 = labels[labels != 0]\n if m == len(labels0): # nothing to do, already 1...n labels\n return label_field, labels, labels\n forward_map = np.zeros(m + 1, int)\n forward_map[labels0] = np.arange(offset, offset + len(labels0) + 1)\n if not (labels == 0).any():\n labels = np.concatenate(([0], labels))\n inverse_map = np.zeros(offset - 1 + len(labels), dtype=np.intp)\n inverse_map[(offset - 1):] = labels\n relabeled = forward_map[label_field]\n return relabeled, forward_map, inverse_map\n", "path": "skimage/segmentation/_join.py"}], "after_files": [{"content": "import numpy as np\nfrom skimage._shared.utils import deprecated\n\n\ndef join_segmentations(s1, s2):\n \"\"\"Return the join of the two input segmentations.\n\n The join J of S1 and S2 is defined as the segmentation in which two\n voxels are in the same segment if and only if they are in the same\n segment in *both* S1 and S2.\n\n Parameters\n ----------\n s1, s2 : numpy arrays\n s1 and s2 are label fields of the same shape.\n\n Returns\n -------\n j : numpy array\n The join segmentation of s1 and s2.\n\n Examples\n --------\n >>> from skimage.segmentation import join_segmentations\n >>> s1 = np.array([[0, 0, 1, 1],\n ... [0, 2, 1, 1],\n ... [2, 2, 2, 1]])\n >>> s2 = np.array([[0, 1, 1, 0],\n ... [0, 1, 1, 0],\n ... [0, 1, 1, 1]])\n >>> join_segmentations(s1, s2)\n array([[0, 1, 3, 2],\n [0, 5, 3, 2],\n [4, 5, 5, 3]])\n \"\"\"\n if s1.shape != s2.shape:\n raise ValueError(\"Cannot join segmentations of different shape. \" +\n \"s1.shape: %s, s2.shape: %s\" % (s1.shape, s2.shape))\n s1 = relabel_sequential(s1)[0]\n s2 = relabel_sequential(s2)[0]\n j = (s2.max() + 1) * s1 + s2\n j = relabel_sequential(j)[0]\n return j\n\n\n@deprecated('relabel_sequential')\ndef relabel_from_one(label_field):\n \"\"\"Convert labels in an arbitrary label field to {1, ... number_of_labels}.\n\n This function is deprecated, see ``relabel_sequential`` for more.\n \"\"\"\n return relabel_sequential(label_field, offset=1)\n\n\ndef relabel_sequential(label_field, offset=1):\n \"\"\"Relabel arbitrary labels to {`offset`, ... `offset` + number_of_labels}.\n\n This function also returns the forward map (mapping the original labels to\n the reduced labels) and the inverse map (mapping the reduced labels back\n to the original ones).\n\n Parameters\n ----------\n label_field : numpy array of int, arbitrary shape\n An array of labels.\n offset : int, optional\n The return labels will start at `offset`, which should be\n strictly positive.\n\n Returns\n -------\n relabeled : numpy array of int, same shape as `label_field`\n The input label field with labels mapped to\n {offset, ..., number_of_labels + offset - 1}.\n forward_map : numpy array of int, shape ``(label_field.max() + 1,)``\n The map from the original label space to the returned label\n space. Can be used to re-apply the same mapping. See examples\n for usage.\n inverse_map : 1D numpy array of int, of length offset + number of labels\n The map from the new label space to the original space. This\n can be used to reconstruct the original label field from the\n relabeled one.\n\n Notes\n -----\n The label 0 is assumed to denote the background and is never remapped.\n\n The forward map can be extremely big for some inputs, since its\n length is given by the maximum of the label field. However, in most\n situations, ``label_field.max()`` is much smaller than\n ``label_field.size``, and in these cases the forward map is\n guaranteed to be smaller than either the input or output images.\n\n Examples\n --------\n >>> from skimage.segmentation import relabel_sequential\n >>> label_field = np.array([1, 1, 5, 5, 8, 99, 42])\n >>> relab, fw, inv = relabel_sequential(label_field)\n >>> relab\n array([1, 1, 2, 2, 3, 5, 4])\n >>> fw\n array([0, 1, 0, 0, 0, 2, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 5])\n >>> inv\n array([ 0, 1, 5, 8, 42, 99])\n >>> (fw[label_field] == relab).all()\n True\n >>> (inv[relab] == label_field).all()\n True\n >>> relab, fw, inv = relabel_sequential(label_field, offset=5)\n >>> relab\n array([5, 5, 6, 6, 7, 9, 8])\n \"\"\"\n m = label_field.max()\n if not np.issubdtype(label_field.dtype, np.int):\n new_type = np.min_scalar_type(int(m))\n label_field = label_field.astype(new_type)\n m = m.astype(new_type) # Ensures m is an integer\n labels = np.unique(label_field)\n labels0 = labels[labels != 0]\n if m == len(labels0): # nothing to do, already 1...n labels\n return label_field, labels, labels\n forward_map = np.zeros(m + 1, int)\n forward_map[labels0] = np.arange(offset, offset + len(labels0))\n if not (labels == 0).any():\n labels = np.concatenate(([0], labels))\n inverse_map = np.zeros(offset - 1 + len(labels), dtype=np.intp)\n inverse_map[(offset - 1):] = labels\n relabeled = forward_map[label_field]\n return relabeled, forward_map, inverse_map\n", "path": "skimage/segmentation/_join.py"}]} | 2,377 | 187 |
gh_patches_debug_17705 | rasdani/github-patches | git_diff | open-mmlab__mmsegmentation-260 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
关于新增的RandomRotate
好像pipeline的__init__.py里面忘记导入这个变换了,导致现在无法使用。
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mmseg/datasets/pipelines/__init__.py`
Content:
```
1 from .compose import Compose
2 from .formating import (Collect, ImageToTensor, ToDataContainer, ToTensor,
3 Transpose, to_tensor)
4 from .loading import LoadAnnotations, LoadImageFromFile
5 from .test_time_aug import MultiScaleFlipAug
6 from .transforms import (Normalize, Pad, PhotoMetricDistortion, RandomCrop,
7 RandomFlip, Resize, SegRescale)
8
9 __all__ = [
10 'Compose', 'to_tensor', 'ToTensor', 'ImageToTensor', 'ToDataContainer',
11 'Transpose', 'Collect', 'LoadAnnotations', 'LoadImageFromFile',
12 'MultiScaleFlipAug', 'Resize', 'RandomFlip', 'Pad', 'RandomCrop',
13 'Normalize', 'SegRescale', 'PhotoMetricDistortion'
14 ]
15
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mmseg/datasets/pipelines/__init__.py b/mmseg/datasets/pipelines/__init__.py
--- a/mmseg/datasets/pipelines/__init__.py
+++ b/mmseg/datasets/pipelines/__init__.py
@@ -4,11 +4,13 @@
from .loading import LoadAnnotations, LoadImageFromFile
from .test_time_aug import MultiScaleFlipAug
from .transforms import (Normalize, Pad, PhotoMetricDistortion, RandomCrop,
- RandomFlip, Resize, SegRescale)
+ RandomFlip, RandomRotate, Rerange, Resize, RGB2Gray,
+ SegRescale)
__all__ = [
'Compose', 'to_tensor', 'ToTensor', 'ImageToTensor', 'ToDataContainer',
'Transpose', 'Collect', 'LoadAnnotations', 'LoadImageFromFile',
'MultiScaleFlipAug', 'Resize', 'RandomFlip', 'Pad', 'RandomCrop',
- 'Normalize', 'SegRescale', 'PhotoMetricDistortion'
+ 'Normalize', 'SegRescale', 'PhotoMetricDistortion', 'RandomRotate',
+ 'Rerange', 'RGB2Gray'
]
| {"golden_diff": "diff --git a/mmseg/datasets/pipelines/__init__.py b/mmseg/datasets/pipelines/__init__.py\n--- a/mmseg/datasets/pipelines/__init__.py\n+++ b/mmseg/datasets/pipelines/__init__.py\n@@ -4,11 +4,13 @@\n from .loading import LoadAnnotations, LoadImageFromFile\n from .test_time_aug import MultiScaleFlipAug\n from .transforms import (Normalize, Pad, PhotoMetricDistortion, RandomCrop,\n- RandomFlip, Resize, SegRescale)\n+ RandomFlip, RandomRotate, Rerange, Resize, RGB2Gray,\n+ SegRescale)\n \n __all__ = [\n 'Compose', 'to_tensor', 'ToTensor', 'ImageToTensor', 'ToDataContainer',\n 'Transpose', 'Collect', 'LoadAnnotations', 'LoadImageFromFile',\n 'MultiScaleFlipAug', 'Resize', 'RandomFlip', 'Pad', 'RandomCrop',\n- 'Normalize', 'SegRescale', 'PhotoMetricDistortion'\n+ 'Normalize', 'SegRescale', 'PhotoMetricDistortion', 'RandomRotate',\n+ 'Rerange', 'RGB2Gray'\n ]\n", "issue": "\u5173\u4e8e\u65b0\u589e\u7684RandomRotate\n\u597d\u50cfpipeline\u7684__init__.py\u91cc\u9762\u5fd8\u8bb0\u5bfc\u5165\u8fd9\u4e2a\u53d8\u6362\u4e86\uff0c\u5bfc\u81f4\u73b0\u5728\u65e0\u6cd5\u4f7f\u7528\u3002\n", "before_files": [{"content": "from .compose import Compose\nfrom .formating import (Collect, ImageToTensor, ToDataContainer, ToTensor,\n Transpose, to_tensor)\nfrom .loading import LoadAnnotations, LoadImageFromFile\nfrom .test_time_aug import MultiScaleFlipAug\nfrom .transforms import (Normalize, Pad, PhotoMetricDistortion, RandomCrop,\n RandomFlip, Resize, SegRescale)\n\n__all__ = [\n 'Compose', 'to_tensor', 'ToTensor', 'ImageToTensor', 'ToDataContainer',\n 'Transpose', 'Collect', 'LoadAnnotations', 'LoadImageFromFile',\n 'MultiScaleFlipAug', 'Resize', 'RandomFlip', 'Pad', 'RandomCrop',\n 'Normalize', 'SegRescale', 'PhotoMetricDistortion'\n]\n", "path": "mmseg/datasets/pipelines/__init__.py"}], "after_files": [{"content": "from .compose import Compose\nfrom .formating import (Collect, ImageToTensor, ToDataContainer, ToTensor,\n Transpose, to_tensor)\nfrom .loading import LoadAnnotations, LoadImageFromFile\nfrom .test_time_aug import MultiScaleFlipAug\nfrom .transforms import (Normalize, Pad, PhotoMetricDistortion, RandomCrop,\n RandomFlip, RandomRotate, Rerange, Resize, RGB2Gray,\n SegRescale)\n\n__all__ = [\n 'Compose', 'to_tensor', 'ToTensor', 'ImageToTensor', 'ToDataContainer',\n 'Transpose', 'Collect', 'LoadAnnotations', 'LoadImageFromFile',\n 'MultiScaleFlipAug', 'Resize', 'RandomFlip', 'Pad', 'RandomCrop',\n 'Normalize', 'SegRescale', 'PhotoMetricDistortion', 'RandomRotate',\n 'Rerange', 'RGB2Gray'\n]\n", "path": "mmseg/datasets/pipelines/__init__.py"}]} | 473 | 252 |
gh_patches_debug_23700 | rasdani/github-patches | git_diff | sunpy__sunpy-1408 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Download of sample data is repeated for each server
Why do we have `sunpy.data.download_sample_data()` download all of the sample data files twice, once for each of the two servers (data.sunpy.org and hesperia.gsfc.nasa.gov)? This seems silly.
Lines 60–61 in `sunpy/data/_sample.py`:
``` python
for base_url in _base_urls:
for file_name in _files.itervalues():
```
Output:
```
>>> import sunpy.data
>>> sunpy.data.download_sample_data()
Downloading sample files to c:/Users/Albert\sunpy\data/sample_data
Downloading http://data.sunpy.org/sample-data/BIR_20110922_103000_01.fit
|===========================================| 760k/760k (100.00%) 4s
Downloading http://data.sunpy.org/sample-data/swap_lv1_20120101_001607.fits
|===========================================| 2.1M/2.1M (100.00%) 4s
Downloading http://data.sunpy.org/sample-data/eit_l1_20020625_100011.fits
|===========================================| 8.3M/8.3M (100.00%) 10s
Downloading http://data.sunpy.org/sample-data/aia.lev1.193A_2013-09-21T16_00_06.84Z.image_
lev1.fits.zip
|===========================================| 12M/ 12M (100.00%) 22s
Unpacking: aia.lev1.193A_2013-09-21T16_00_06.84Z.image_lev1.fits
Downloading http://data.sunpy.org/sample-data/hsi_calib_ev_20020220_1106_20020220_1106_25_
40.fits
|===========================================| 207k/207k (100.00%) 0s
Downloading http://data.sunpy.org/sample-data/AIA20110319_105400_0171.fits
|===========================================| 4.2M/4.2M (100.00%) 6s
Downloading http://data.sunpy.org/sample-data/hsi_image_20101016_191218.fits
|===========================================| 95k/ 95k (100.00%) 0s
Downloading http://hesperia.gsfc.nasa.gov/~schriste/sunpy-sample-data/BIR_20110922_103000_
01.fit
|===========================================| 760k/760k (100.00%) 0s
Downloading http://hesperia.gsfc.nasa.gov/~schriste/sunpy-sample-data/swap_lv1_20120101_00
1607.fits
|===========================================| 2.1M/2.1M (100.00%) 2s
Downloading http://hesperia.gsfc.nasa.gov/~schriste/sunpy-sample-data/eit_l1_20020625_1000
11.fits
|===========================================| 8.3M/8.3M (100.00%) 6s
Downloading http://hesperia.gsfc.nasa.gov/~schriste/sunpy-sample-data/aia.lev1.193A_2013-0
9-21T16_00_06.84Z.image_lev1.fits.zip
|===========================================| 12M/ 12M (100.00%) 10s
Unpacking: aia.lev1.193A_2013-09-21T16_00_06.84Z.image_lev1.fits
Downloading http://hesperia.gsfc.nasa.gov/~schriste/sunpy-sample-data/hsi_calib_ev_2002022
0_1106_20020220_1106_25_40.fits
|===========================================| 207k/207k (100.00%) 0s
Downloading http://hesperia.gsfc.nasa.gov/~schriste/sunpy-sample-data/AIA20110319_105400_0
171.fits
|===========================================| 4.2M/4.2M (100.00%) 3s
Downloading http://hesperia.gsfc.nasa.gov/~schriste/sunpy-sample-data/hsi_image_20101016_1
91218.fits
|===========================================| 95k/ 95k (100.00%) 0s
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sunpy/data/_sample.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """SunPy sample data files"""
3 from __future__ import absolute_import
4
5 from os import remove
6 import os.path
7 from zipfile import ZipFile
8 from urllib2 import URLError
9 from shutil import move
10
11 from astropy.utils.data import download_file
12
13 from sunpy.util.net import url_exists
14 from sunpy import config
15
16 __author__ = "Steven Christe"
17 __email__ = "[email protected]"
18
19
20 sampledata_dir = config.get("downloads", "sample_dir")
21
22 # urls to search for the sample data
23 _base_urls = (
24 'http://data.sunpy.org/sample-data/',
25 'http://hesperia.gsfc.nasa.gov/~schriste/sunpy-sample-data/')
26
27 # keys are file shortcuts
28 # values consist of filename as well as optional file extension if files are
29 # hosted compressed. This extension is removed after download.
30 _files = {
31 "AIA_171_IMAGE": ("AIA20110319_105400_0171.fits", ""),
32 "RHESSI_IMAGE": ("hsi_image_20101016_191218.fits", ""),
33 "EIT_195_IMAGE": ("eit_l1_20020625_100011.fits", ""),
34 "CALLISTO_IMAGE": ("BIR_20110922_103000_01.fit", ""),
35 "RHESSI_EVENT_LIST": ("hsi_calib_ev_20020220_1106_20020220_1106_25_40.fits", ""),
36 "SWAP_LEVEL1_IMAGE": ("swap_lv1_20120101_001607.fits", ""),
37 "AIA_193_IMAGE": ("aia.lev1.193A_2013-09-21T16_00_06.84Z.image_lev1.fits", ".zip")
38 }
39
40 sample_files = {}
41 for key in _files:
42 sample_files[key] = os.path.abspath(os.path.join(sampledata_dir, _files[key][0]))
43
44
45 def download_sample_data(progress=True):
46 """
47 Download the sample data.
48
49 Parameters
50 ----------
51 progress: bool
52 Show a progress bar during download
53
54 Returns
55 -------
56 None
57 """
58 number_of_files_fetched = 0
59 print("Downloading sample files to " + sampledata_dir)
60 for base_url in _base_urls:
61 for file_name in _files.itervalues():
62 full_file_name = file_name[0] + file_name[1]
63 if url_exists(os.path.join(base_url, full_file_name)):
64 f = download_file(os.path.join(base_url, full_file_name))
65 real_name, ext = os.path.splitext(full_file_name)
66
67 if file_name[1] == '.zip':
68 print("Unpacking: %s" % real_name)
69 with ZipFile(f, 'r') as zip_file:
70 zip_file.extract(real_name, sampledata_dir)
71 remove(f)
72 else:
73 # move files to the data directory
74 move(f, os.path.join(sampledata_dir, file_name[0]))
75 # increment the number of files obtained to check later
76 number_of_files_fetched += 1
77
78 if number_of_files_fetched < len(_files.keys()):
79 raise URLError("Could not download all samples files. Problem with accessing sample data servers.")
80
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sunpy/data/_sample.py b/sunpy/data/_sample.py
--- a/sunpy/data/_sample.py
+++ b/sunpy/data/_sample.py
@@ -57,8 +57,8 @@
"""
number_of_files_fetched = 0
print("Downloading sample files to " + sampledata_dir)
- for base_url in _base_urls:
- for file_name in _files.itervalues():
+ for file_name in _files.itervalues():
+ for base_url in _base_urls:
full_file_name = file_name[0] + file_name[1]
if url_exists(os.path.join(base_url, full_file_name)):
f = download_file(os.path.join(base_url, full_file_name))
@@ -74,6 +74,7 @@
move(f, os.path.join(sampledata_dir, file_name[0]))
# increment the number of files obtained to check later
number_of_files_fetched += 1
+ break
if number_of_files_fetched < len(_files.keys()):
raise URLError("Could not download all samples files. Problem with accessing sample data servers.")
| {"golden_diff": "diff --git a/sunpy/data/_sample.py b/sunpy/data/_sample.py\n--- a/sunpy/data/_sample.py\n+++ b/sunpy/data/_sample.py\n@@ -57,8 +57,8 @@\n \"\"\"\n number_of_files_fetched = 0\n print(\"Downloading sample files to \" + sampledata_dir)\n- for base_url in _base_urls:\n- for file_name in _files.itervalues():\n+ for file_name in _files.itervalues():\n+ for base_url in _base_urls:\n full_file_name = file_name[0] + file_name[1]\n if url_exists(os.path.join(base_url, full_file_name)):\n f = download_file(os.path.join(base_url, full_file_name))\n@@ -74,6 +74,7 @@\n move(f, os.path.join(sampledata_dir, file_name[0]))\n # increment the number of files obtained to check later\n number_of_files_fetched += 1\n+ break\n \n if number_of_files_fetched < len(_files.keys()):\n raise URLError(\"Could not download all samples files. Problem with accessing sample data servers.\")\n", "issue": "Download of sample data is repeated for each server\nWhy do we have `sunpy.data.download_sample_data()` download all of the sample data files twice, once for each of the two servers (data.sunpy.org and hesperia.gsfc.nasa.gov)? This seems silly.\n\nLines 60\u201361 in `sunpy/data/_sample.py`:\n\n``` python\n for base_url in _base_urls:\n for file_name in _files.itervalues():\n```\n\nOutput:\n\n```\n>>> import sunpy.data\n>>> sunpy.data.download_sample_data()\nDownloading sample files to c:/Users/Albert\\sunpy\\data/sample_data\nDownloading http://data.sunpy.org/sample-data/BIR_20110922_103000_01.fit\n|===========================================| 760k/760k (100.00%) 4s\nDownloading http://data.sunpy.org/sample-data/swap_lv1_20120101_001607.fits\n|===========================================| 2.1M/2.1M (100.00%) 4s\nDownloading http://data.sunpy.org/sample-data/eit_l1_20020625_100011.fits\n|===========================================| 8.3M/8.3M (100.00%) 10s\nDownloading http://data.sunpy.org/sample-data/aia.lev1.193A_2013-09-21T16_00_06.84Z.image_\nlev1.fits.zip\n|===========================================| 12M/ 12M (100.00%) 22s\nUnpacking: aia.lev1.193A_2013-09-21T16_00_06.84Z.image_lev1.fits\nDownloading http://data.sunpy.org/sample-data/hsi_calib_ev_20020220_1106_20020220_1106_25_\n40.fits\n|===========================================| 207k/207k (100.00%) 0s\nDownloading http://data.sunpy.org/sample-data/AIA20110319_105400_0171.fits\n|===========================================| 4.2M/4.2M (100.00%) 6s\nDownloading http://data.sunpy.org/sample-data/hsi_image_20101016_191218.fits\n|===========================================| 95k/ 95k (100.00%) 0s\nDownloading http://hesperia.gsfc.nasa.gov/~schriste/sunpy-sample-data/BIR_20110922_103000_\n01.fit\n|===========================================| 760k/760k (100.00%) 0s\nDownloading http://hesperia.gsfc.nasa.gov/~schriste/sunpy-sample-data/swap_lv1_20120101_00\n1607.fits\n|===========================================| 2.1M/2.1M (100.00%) 2s\nDownloading http://hesperia.gsfc.nasa.gov/~schriste/sunpy-sample-data/eit_l1_20020625_1000\n11.fits\n|===========================================| 8.3M/8.3M (100.00%) 6s\nDownloading http://hesperia.gsfc.nasa.gov/~schriste/sunpy-sample-data/aia.lev1.193A_2013-0\n9-21T16_00_06.84Z.image_lev1.fits.zip\n|===========================================| 12M/ 12M (100.00%) 10s\nUnpacking: aia.lev1.193A_2013-09-21T16_00_06.84Z.image_lev1.fits\nDownloading http://hesperia.gsfc.nasa.gov/~schriste/sunpy-sample-data/hsi_calib_ev_2002022\n0_1106_20020220_1106_25_40.fits\n|===========================================| 207k/207k (100.00%) 0s\nDownloading http://hesperia.gsfc.nasa.gov/~schriste/sunpy-sample-data/AIA20110319_105400_0\n171.fits\n|===========================================| 4.2M/4.2M (100.00%) 3s\nDownloading http://hesperia.gsfc.nasa.gov/~schriste/sunpy-sample-data/hsi_image_20101016_1\n91218.fits\n|===========================================| 95k/ 95k (100.00%) 0s\n```\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"SunPy sample data files\"\"\"\nfrom __future__ import absolute_import\n\nfrom os import remove\nimport os.path\nfrom zipfile import ZipFile\nfrom urllib2 import URLError\nfrom shutil import move\n\nfrom astropy.utils.data import download_file\n\nfrom sunpy.util.net import url_exists\nfrom sunpy import config\n\n__author__ = \"Steven Christe\"\n__email__ = \"[email protected]\"\n\n\nsampledata_dir = config.get(\"downloads\", \"sample_dir\")\n\n# urls to search for the sample data\n_base_urls = (\n 'http://data.sunpy.org/sample-data/',\n 'http://hesperia.gsfc.nasa.gov/~schriste/sunpy-sample-data/')\n\n# keys are file shortcuts\n# values consist of filename as well as optional file extension if files are\n# hosted compressed. This extension is removed after download.\n_files = {\n \"AIA_171_IMAGE\": (\"AIA20110319_105400_0171.fits\", \"\"),\n \"RHESSI_IMAGE\": (\"hsi_image_20101016_191218.fits\", \"\"),\n \"EIT_195_IMAGE\": (\"eit_l1_20020625_100011.fits\", \"\"),\n \"CALLISTO_IMAGE\": (\"BIR_20110922_103000_01.fit\", \"\"),\n \"RHESSI_EVENT_LIST\": (\"hsi_calib_ev_20020220_1106_20020220_1106_25_40.fits\", \"\"),\n \"SWAP_LEVEL1_IMAGE\": (\"swap_lv1_20120101_001607.fits\", \"\"),\n \"AIA_193_IMAGE\": (\"aia.lev1.193A_2013-09-21T16_00_06.84Z.image_lev1.fits\", \".zip\")\n}\n\nsample_files = {}\nfor key in _files:\n sample_files[key] = os.path.abspath(os.path.join(sampledata_dir, _files[key][0]))\n\n\ndef download_sample_data(progress=True):\n \"\"\"\n Download the sample data.\n\n Parameters\n ----------\n progress: bool\n Show a progress bar during download\n\n Returns\n -------\n None\n \"\"\"\n number_of_files_fetched = 0\n print(\"Downloading sample files to \" + sampledata_dir)\n for base_url in _base_urls:\n for file_name in _files.itervalues():\n full_file_name = file_name[0] + file_name[1]\n if url_exists(os.path.join(base_url, full_file_name)):\n f = download_file(os.path.join(base_url, full_file_name))\n real_name, ext = os.path.splitext(full_file_name)\n\n if file_name[1] == '.zip':\n print(\"Unpacking: %s\" % real_name)\n with ZipFile(f, 'r') as zip_file:\n zip_file.extract(real_name, sampledata_dir)\n remove(f)\n else:\n # move files to the data directory\n move(f, os.path.join(sampledata_dir, file_name[0]))\n # increment the number of files obtained to check later\n number_of_files_fetched += 1\n\n if number_of_files_fetched < len(_files.keys()):\n raise URLError(\"Could not download all samples files. Problem with accessing sample data servers.\")\n", "path": "sunpy/data/_sample.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"SunPy sample data files\"\"\"\nfrom __future__ import absolute_import\n\nfrom os import remove\nimport os.path\nfrom zipfile import ZipFile\nfrom urllib2 import URLError\nfrom shutil import move\n\nfrom astropy.utils.data import download_file\n\nfrom sunpy.util.net import url_exists\nfrom sunpy import config\n\n__author__ = \"Steven Christe\"\n__email__ = \"[email protected]\"\n\n\nsampledata_dir = config.get(\"downloads\", \"sample_dir\")\n\n# urls to search for the sample data\n_base_urls = (\n 'http://data.sunpy.org/sample-data/',\n 'http://hesperia.gsfc.nasa.gov/~schriste/sunpy-sample-data/')\n\n# keys are file shortcuts\n# values consist of filename as well as optional file extension if files are\n# hosted compressed. This extension is removed after download.\n_files = {\n \"AIA_171_IMAGE\": (\"AIA20110319_105400_0171.fits\", \"\"),\n \"RHESSI_IMAGE\": (\"hsi_image_20101016_191218.fits\", \"\"),\n \"EIT_195_IMAGE\": (\"eit_l1_20020625_100011.fits\", \"\"),\n \"CALLISTO_IMAGE\": (\"BIR_20110922_103000_01.fit\", \"\"),\n \"RHESSI_EVENT_LIST\": (\"hsi_calib_ev_20020220_1106_20020220_1106_25_40.fits\", \"\"),\n \"SWAP_LEVEL1_IMAGE\": (\"swap_lv1_20120101_001607.fits\", \"\"),\n \"AIA_193_IMAGE\": (\"aia.lev1.193A_2013-09-21T16_00_06.84Z.image_lev1.fits\", \".zip\")\n}\n\nsample_files = {}\nfor key in _files:\n sample_files[key] = os.path.abspath(os.path.join(sampledata_dir, _files[key][0]))\n\n\ndef download_sample_data(progress=True):\n \"\"\"\n Download the sample data.\n\n Parameters\n ----------\n progress: bool\n Show a progress bar during download\n\n Returns\n -------\n None\n \"\"\"\n number_of_files_fetched = 0\n print(\"Downloading sample files to \" + sampledata_dir)\n for file_name in _files.itervalues():\n for base_url in _base_urls:\n full_file_name = file_name[0] + file_name[1]\n if url_exists(os.path.join(base_url, full_file_name)):\n f = download_file(os.path.join(base_url, full_file_name))\n real_name, ext = os.path.splitext(full_file_name)\n\n if file_name[1] == '.zip':\n print(\"Unpacking: %s\" % real_name)\n with ZipFile(f, 'r') as zip_file:\n zip_file.extract(real_name, sampledata_dir)\n remove(f)\n else:\n # move files to the data directory\n move(f, os.path.join(sampledata_dir, file_name[0]))\n # increment the number of files obtained to check later\n number_of_files_fetched += 1\n break\n\n if number_of_files_fetched < len(_files.keys()):\n raise URLError(\"Could not download all samples files. Problem with accessing sample data servers.\")\n", "path": "sunpy/data/_sample.py"}]} | 2,379 | 252 |
gh_patches_debug_13185 | rasdani/github-patches | git_diff | vyperlang__vyper-1720 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Code that ends in a comment (with no newline) cannot be compiled
This was previously [reported](https://github.com/ethereum/vyper/issues/1161) for Vyper, but it’s actually [a Python bug](https://bugs.python.org/issue35107):
```python
@public
def __init__():
pass
# BUG (no newline)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `vyper/compiler.py`
Content:
```
1 from collections import (
2 OrderedDict,
3 deque,
4 )
5 from typing import (
6 Any,
7 Callable,
8 Sequence,
9 Union,
10 )
11 import warnings
12
13 import asttokens
14
15 from vyper import (
16 compile_lll,
17 optimizer,
18 )
19 from vyper.ast_utils import (
20 ast_to_dict,
21 )
22 from vyper.opcodes import (
23 opcodes,
24 )
25 from vyper.parser import (
26 parser,
27 )
28 from vyper.signatures import (
29 sig_utils,
30 )
31 from vyper.signatures.interface import (
32 extract_external_interface,
33 extract_interface_str,
34 )
35 from vyper.typing import (
36 ContractCodes,
37 InterfaceDict,
38 InterfaceImports,
39 OutputDict,
40 OutputFormats,
41 )
42
43
44 def __compile(code, interface_codes=None, *args, **kwargs):
45 ast = parser.parse_to_ast(code)
46 lll = parser.parse_tree_to_lll(
47 ast,
48 code,
49 interface_codes=interface_codes,
50 runtime_only=kwargs.get('bytecode_runtime', False)
51 )
52 opt_lll = optimizer.optimize(lll)
53 asm = compile_lll.compile_to_assembly(opt_lll)
54
55 def find_nested_opcode(asm_list, key):
56 if key in asm_list:
57 return True
58 else:
59 sublists = [sub for sub in asm_list if isinstance(sub, list)]
60 return any(find_nested_opcode(x, key) for x in sublists)
61
62 if find_nested_opcode(asm, 'DEBUG'):
63 warnings.warn(
64 'This code contains DEBUG opcodes! The DEBUG opcode will only work in '
65 'a supported EVM! It will FAIL on all other nodes!'
66 )
67
68 c, line_number_map = compile_lll.assembly_to_evm(asm)
69 return c
70
71
72 def gas_estimate(origcode, *args, **kwargs):
73 o = {}
74 code = optimizer.optimize(parser.parse_to_lll(origcode, *args, **kwargs))
75
76 # Extract the stuff inside the LLL bracket
77 if code.value == 'seq':
78 if len(code.args) > 0 and code.args[-1].value == 'return':
79 code = code.args[-1].args[1].args[0]
80
81 assert code.value == 'seq'
82 for arg in code.args:
83 if arg.func_name is not None:
84 o[arg.func_name] = arg.total_gas
85 return o
86
87
88 def mk_full_signature(code, *args, **kwargs):
89 abi = sig_utils.mk_full_signature(parser.parse_to_ast(code), *args, **kwargs)
90 # Add gas estimates for each function to ABI
91 gas_estimates = gas_estimate(code, *args, **kwargs)
92 for func in abi:
93 try:
94 func_signature = func['name']
95 except KeyError:
96 # constructor and fallback functions don't have a name
97 continue
98
99 func_name, _, _ = func_signature.partition('(')
100 # This check ensures we skip __init__ since it has no estimate
101 if func_name in gas_estimates:
102 # TODO: mutation
103 func['gas'] = gas_estimates[func_name]
104 return abi
105
106
107 def get_asm(asm_list):
108 output_string = ''
109 skip_newlines = 0
110 for node in asm_list:
111 if isinstance(node, list):
112 output_string += get_asm(node)
113 continue
114
115 is_push = isinstance(node, str) and node.startswith('PUSH')
116
117 output_string += str(node) + ' '
118 if skip_newlines:
119 skip_newlines -= 1
120 elif is_push:
121 skip_newlines = int(node[4:]) - 1
122 else:
123 output_string += '\n'
124 return output_string
125
126
127 def get_source_map(code, contract_name, interface_codes=None, runtime_only=True, source_id=0):
128 asm_list = compile_lll.compile_to_assembly(
129 optimizer.optimize(
130 parser.parse_to_lll(
131 code,
132 runtime_only=runtime_only,
133 interface_codes=interface_codes)))
134 c, line_number_map = compile_lll.assembly_to_evm(asm_list)
135 # Sort line_number_map
136 out = OrderedDict()
137 for k in sorted(line_number_map.keys()):
138 out[k] = line_number_map[k]
139
140 out['pc_pos_map_compressed'] = compress_source_map(
141 code,
142 out['pc_pos_map'],
143 out['pc_jump_map'],
144 source_id
145 )
146 out['pc_pos_map'] = dict((k, v) for k, v in out['pc_pos_map'].items() if v)
147 return out
148
149
150 def compress_source_map(code, pos_map, jump_map, source_id):
151 linenos = asttokens.LineNumbers(code)
152 compressed_map = f"-1:-1:{source_id}:-;"
153 last_pos = [-1, -1, source_id]
154
155 for pc in sorted(pos_map)[1:]:
156 current_pos = [-1, -1, source_id]
157 if pos_map[pc]:
158 current_pos[0] = linenos.line_to_offset(*pos_map[pc][:2])
159 current_pos[1] = linenos.line_to_offset(*pos_map[pc][2:])-current_pos[0]
160
161 if pc in jump_map:
162 current_pos.append(jump_map[pc])
163
164 for i in range(2, -1, -1):
165 if current_pos[i] != last_pos[i]:
166 last_pos[i] = current_pos[i]
167 elif len(current_pos) == i+1:
168 current_pos.pop()
169 else:
170 current_pos[i] = ""
171
172 compressed_map += ":".join(str(i) for i in current_pos) + ";"
173
174 return compressed_map
175
176
177 def expand_source_map(compressed_map):
178 source_map = [_expand_row(i) if i else None for i in compressed_map.split(';')[:-1]]
179
180 for i, value in enumerate(source_map[1:], 1):
181 if value is None:
182 source_map[i] = source_map[i - 1][:3] + [None]
183 continue
184 for x in range(3):
185 if source_map[i][x] is None:
186 source_map[i][x] = source_map[i - 1][x]
187
188 return source_map
189
190
191 def _expand_row(row):
192 result = [None] * 4
193 for i, value in enumerate(row.split(':')):
194 if value:
195 result[i] = value if i == 3 else int(value)
196 return result
197
198
199 def get_opcodes(code, contract_name, bytecodes_runtime=False, interface_codes=None):
200 bytecode = __compile(
201 code,
202 bytecode_runtime=bytecodes_runtime,
203 interface_codes=interface_codes
204 ).hex().upper()
205 bytecode = deque(bytecode[i:i + 2] for i in range(0, len(bytecode), 2))
206 opcode_map = dict((v[0], k) for k, v in opcodes.items())
207 opcode_str = ""
208
209 while bytecode:
210 op = int(bytecode.popleft(), 16)
211 opcode_str += opcode_map[op] + " "
212 if "PUSH" not in opcode_map[op]:
213 continue
214 push_len = int(opcode_map[op][4:])
215 opcode_str += "0x" + "".join(bytecode.popleft() for i in range(push_len)) + " "
216
217 return opcode_str[:-1]
218
219
220 def _mk_abi_output(code, contract_name, interface_codes, source_id):
221 return mk_full_signature(code, interface_codes=interface_codes)
222
223
224 def _mk_bytecode_output(code, contract_name, interface_codes, source_id):
225 return '0x' + __compile(code, interface_codes=interface_codes).hex()
226
227
228 def _mk_bytecode_runtime_output(code, contract_name, interface_codes, source_id):
229 return '0x' + __compile(code, bytecode_runtime=True, interface_codes=interface_codes).hex()
230
231
232 def _mk_ir_output(code, contract_name, interface_codes, source_id):
233 return optimizer.optimize(parser.parse_to_lll(code, interface_codes=interface_codes))
234
235
236 def _mk_asm_output(code, contract_name, interface_codes, source_id):
237 return get_asm(compile_lll.compile_to_assembly(
238 optimizer.optimize(parser.parse_to_lll(code, interface_codes=interface_codes))
239 ))
240
241
242 def _mk_source_map_output(code, contract_name, interface_codes, source_id):
243 return get_source_map(
244 code,
245 contract_name,
246 interface_codes=interface_codes,
247 runtime_only=True,
248 source_id=source_id
249 )
250
251
252 def _mk_method_identifiers_output(code, contract_name, interface_codes, source_id):
253 return sig_utils.mk_method_identifiers(code, interface_codes=interface_codes)
254
255
256 def _mk_interface_output(code, contract_name, interface_codes, source_id):
257 return extract_interface_str(code, contract_name, interface_codes=interface_codes)
258
259
260 def _mk_external_interface_output(code, contract_name, interface_codes, source_id):
261 return extract_external_interface(code, contract_name, interface_codes=interface_codes)
262
263
264 def _mk_opcodes(code, contract_name, interface_codes, source_id):
265 return get_opcodes(code, contract_name, interface_codes=interface_codes)
266
267
268 def _mk_opcodes_runtime(code, contract_name, interface_codes, source_id):
269 return get_opcodes(code, contract_name, bytecodes_runtime=True, interface_codes=interface_codes)
270
271
272 def _mk_ast_dict(code, contract_name, interface_codes, source_id):
273 o = {
274 'contract_name': contract_name,
275 'ast': ast_to_dict(parser.parse_to_ast(code, source_id))
276 }
277 return o
278
279
280 output_formats_map = {
281 'abi': _mk_abi_output,
282 'ast_dict': _mk_ast_dict,
283 'bytecode': _mk_bytecode_output,
284 'bytecode_runtime': _mk_bytecode_runtime_output,
285 'ir': _mk_ir_output,
286 'asm': _mk_asm_output,
287 'source_map': _mk_source_map_output,
288 'method_identifiers': _mk_method_identifiers_output,
289 'interface': _mk_interface_output,
290 'external_interface': _mk_external_interface_output,
291 'opcodes': _mk_opcodes,
292 'opcodes_runtime': _mk_opcodes_runtime,
293 }
294
295
296 def compile_codes(contract_sources: ContractCodes,
297 output_formats: Union[OutputDict, OutputFormats, None] = None,
298 exc_handler: Union[Callable, None] = None,
299 interface_codes: Union[InterfaceDict, InterfaceImports, None] = None,
300 initial_id: int = 0) -> OrderedDict:
301
302 if output_formats is None:
303 output_formats = ('bytecode',)
304 if isinstance(output_formats, Sequence):
305 output_formats = dict((k, output_formats) for k in contract_sources.keys())
306
307 out: OrderedDict = OrderedDict()
308 for source_id, contract_name in enumerate(sorted(contract_sources), start=initial_id):
309 code = contract_sources[contract_name]
310 for output_format in output_formats[contract_name]:
311 if output_format not in output_formats_map:
312 raise ValueError(f'Unsupported format type {repr(output_format)}')
313
314 try:
315 interfaces: Any = interface_codes
316 if (
317 isinstance(interfaces, dict) and
318 contract_name in interfaces and
319 isinstance(interfaces[contract_name], dict)
320 ):
321 interfaces = interfaces[contract_name]
322 out.setdefault(contract_name, {})
323 out[contract_name][output_format] = output_formats_map[output_format](
324 code=code,
325 contract_name=contract_name,
326 interface_codes=interfaces,
327 source_id=source_id
328 )
329 except Exception as exc:
330 if exc_handler is not None:
331 exc_handler(contract_name, exc)
332 else:
333 raise exc
334
335 return out
336
337
338 UNKNOWN_CONTRACT_NAME = '<unknown>'
339
340
341 def compile_code(code, output_formats=None, interface_codes=None):
342 contract_sources = {UNKNOWN_CONTRACT_NAME: code}
343
344 return compile_codes(
345 contract_sources,
346 output_formats,
347 interface_codes=interface_codes,
348 )[UNKNOWN_CONTRACT_NAME]
349
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/vyper/compiler.py b/vyper/compiler.py
--- a/vyper/compiler.py
+++ b/vyper/compiler.py
@@ -321,7 +321,9 @@
interfaces = interfaces[contract_name]
out.setdefault(contract_name, {})
out[contract_name][output_format] = output_formats_map[output_format](
- code=code,
+ # trailing newline fixes python parsing bug when source ends in a comment
+ # https://bugs.python.org/issue35107
+ code=f"{code}\n",
contract_name=contract_name,
interface_codes=interfaces,
source_id=source_id
| {"golden_diff": "diff --git a/vyper/compiler.py b/vyper/compiler.py\n--- a/vyper/compiler.py\n+++ b/vyper/compiler.py\n@@ -321,7 +321,9 @@\n interfaces = interfaces[contract_name]\n out.setdefault(contract_name, {})\n out[contract_name][output_format] = output_formats_map[output_format](\n- code=code,\n+ # trailing newline fixes python parsing bug when source ends in a comment\n+ # https://bugs.python.org/issue35107\n+ code=f\"{code}\\n\",\n contract_name=contract_name,\n interface_codes=interfaces,\n source_id=source_id\n", "issue": "Code that ends in a comment (with no newline) cannot be compiled\nThis was previously [reported](https://github.com/ethereum/vyper/issues/1161) for Vyper, but it\u2019s actually [a Python bug](https://bugs.python.org/issue35107):\r\n\r\n```python\r\n@public\r\ndef __init__():\r\n pass\r\n\r\n# BUG (no newline)\r\n```\n", "before_files": [{"content": "from collections import (\n OrderedDict,\n deque,\n)\nfrom typing import (\n Any,\n Callable,\n Sequence,\n Union,\n)\nimport warnings\n\nimport asttokens\n\nfrom vyper import (\n compile_lll,\n optimizer,\n)\nfrom vyper.ast_utils import (\n ast_to_dict,\n)\nfrom vyper.opcodes import (\n opcodes,\n)\nfrom vyper.parser import (\n parser,\n)\nfrom vyper.signatures import (\n sig_utils,\n)\nfrom vyper.signatures.interface import (\n extract_external_interface,\n extract_interface_str,\n)\nfrom vyper.typing import (\n ContractCodes,\n InterfaceDict,\n InterfaceImports,\n OutputDict,\n OutputFormats,\n)\n\n\ndef __compile(code, interface_codes=None, *args, **kwargs):\n ast = parser.parse_to_ast(code)\n lll = parser.parse_tree_to_lll(\n ast,\n code,\n interface_codes=interface_codes,\n runtime_only=kwargs.get('bytecode_runtime', False)\n )\n opt_lll = optimizer.optimize(lll)\n asm = compile_lll.compile_to_assembly(opt_lll)\n\n def find_nested_opcode(asm_list, key):\n if key in asm_list:\n return True\n else:\n sublists = [sub for sub in asm_list if isinstance(sub, list)]\n return any(find_nested_opcode(x, key) for x in sublists)\n\n if find_nested_opcode(asm, 'DEBUG'):\n warnings.warn(\n 'This code contains DEBUG opcodes! The DEBUG opcode will only work in '\n 'a supported EVM! It will FAIL on all other nodes!'\n )\n\n c, line_number_map = compile_lll.assembly_to_evm(asm)\n return c\n\n\ndef gas_estimate(origcode, *args, **kwargs):\n o = {}\n code = optimizer.optimize(parser.parse_to_lll(origcode, *args, **kwargs))\n\n # Extract the stuff inside the LLL bracket\n if code.value == 'seq':\n if len(code.args) > 0 and code.args[-1].value == 'return':\n code = code.args[-1].args[1].args[0]\n\n assert code.value == 'seq'\n for arg in code.args:\n if arg.func_name is not None:\n o[arg.func_name] = arg.total_gas\n return o\n\n\ndef mk_full_signature(code, *args, **kwargs):\n abi = sig_utils.mk_full_signature(parser.parse_to_ast(code), *args, **kwargs)\n # Add gas estimates for each function to ABI\n gas_estimates = gas_estimate(code, *args, **kwargs)\n for func in abi:\n try:\n func_signature = func['name']\n except KeyError:\n # constructor and fallback functions don't have a name\n continue\n\n func_name, _, _ = func_signature.partition('(')\n # This check ensures we skip __init__ since it has no estimate\n if func_name in gas_estimates:\n # TODO: mutation\n func['gas'] = gas_estimates[func_name]\n return abi\n\n\ndef get_asm(asm_list):\n output_string = ''\n skip_newlines = 0\n for node in asm_list:\n if isinstance(node, list):\n output_string += get_asm(node)\n continue\n\n is_push = isinstance(node, str) and node.startswith('PUSH')\n\n output_string += str(node) + ' '\n if skip_newlines:\n skip_newlines -= 1\n elif is_push:\n skip_newlines = int(node[4:]) - 1\n else:\n output_string += '\\n'\n return output_string\n\n\ndef get_source_map(code, contract_name, interface_codes=None, runtime_only=True, source_id=0):\n asm_list = compile_lll.compile_to_assembly(\n optimizer.optimize(\n parser.parse_to_lll(\n code,\n runtime_only=runtime_only,\n interface_codes=interface_codes)))\n c, line_number_map = compile_lll.assembly_to_evm(asm_list)\n # Sort line_number_map\n out = OrderedDict()\n for k in sorted(line_number_map.keys()):\n out[k] = line_number_map[k]\n\n out['pc_pos_map_compressed'] = compress_source_map(\n code,\n out['pc_pos_map'],\n out['pc_jump_map'],\n source_id\n )\n out['pc_pos_map'] = dict((k, v) for k, v in out['pc_pos_map'].items() if v)\n return out\n\n\ndef compress_source_map(code, pos_map, jump_map, source_id):\n linenos = asttokens.LineNumbers(code)\n compressed_map = f\"-1:-1:{source_id}:-;\"\n last_pos = [-1, -1, source_id]\n\n for pc in sorted(pos_map)[1:]:\n current_pos = [-1, -1, source_id]\n if pos_map[pc]:\n current_pos[0] = linenos.line_to_offset(*pos_map[pc][:2])\n current_pos[1] = linenos.line_to_offset(*pos_map[pc][2:])-current_pos[0]\n\n if pc in jump_map:\n current_pos.append(jump_map[pc])\n\n for i in range(2, -1, -1):\n if current_pos[i] != last_pos[i]:\n last_pos[i] = current_pos[i]\n elif len(current_pos) == i+1:\n current_pos.pop()\n else:\n current_pos[i] = \"\"\n\n compressed_map += \":\".join(str(i) for i in current_pos) + \";\"\n\n return compressed_map\n\n\ndef expand_source_map(compressed_map):\n source_map = [_expand_row(i) if i else None for i in compressed_map.split(';')[:-1]]\n\n for i, value in enumerate(source_map[1:], 1):\n if value is None:\n source_map[i] = source_map[i - 1][:3] + [None]\n continue\n for x in range(3):\n if source_map[i][x] is None:\n source_map[i][x] = source_map[i - 1][x]\n\n return source_map\n\n\ndef _expand_row(row):\n result = [None] * 4\n for i, value in enumerate(row.split(':')):\n if value:\n result[i] = value if i == 3 else int(value)\n return result\n\n\ndef get_opcodes(code, contract_name, bytecodes_runtime=False, interface_codes=None):\n bytecode = __compile(\n code,\n bytecode_runtime=bytecodes_runtime,\n interface_codes=interface_codes\n ).hex().upper()\n bytecode = deque(bytecode[i:i + 2] for i in range(0, len(bytecode), 2))\n opcode_map = dict((v[0], k) for k, v in opcodes.items())\n opcode_str = \"\"\n\n while bytecode:\n op = int(bytecode.popleft(), 16)\n opcode_str += opcode_map[op] + \" \"\n if \"PUSH\" not in opcode_map[op]:\n continue\n push_len = int(opcode_map[op][4:])\n opcode_str += \"0x\" + \"\".join(bytecode.popleft() for i in range(push_len)) + \" \"\n\n return opcode_str[:-1]\n\n\ndef _mk_abi_output(code, contract_name, interface_codes, source_id):\n return mk_full_signature(code, interface_codes=interface_codes)\n\n\ndef _mk_bytecode_output(code, contract_name, interface_codes, source_id):\n return '0x' + __compile(code, interface_codes=interface_codes).hex()\n\n\ndef _mk_bytecode_runtime_output(code, contract_name, interface_codes, source_id):\n return '0x' + __compile(code, bytecode_runtime=True, interface_codes=interface_codes).hex()\n\n\ndef _mk_ir_output(code, contract_name, interface_codes, source_id):\n return optimizer.optimize(parser.parse_to_lll(code, interface_codes=interface_codes))\n\n\ndef _mk_asm_output(code, contract_name, interface_codes, source_id):\n return get_asm(compile_lll.compile_to_assembly(\n optimizer.optimize(parser.parse_to_lll(code, interface_codes=interface_codes))\n ))\n\n\ndef _mk_source_map_output(code, contract_name, interface_codes, source_id):\n return get_source_map(\n code,\n contract_name,\n interface_codes=interface_codes,\n runtime_only=True,\n source_id=source_id\n )\n\n\ndef _mk_method_identifiers_output(code, contract_name, interface_codes, source_id):\n return sig_utils.mk_method_identifiers(code, interface_codes=interface_codes)\n\n\ndef _mk_interface_output(code, contract_name, interface_codes, source_id):\n return extract_interface_str(code, contract_name, interface_codes=interface_codes)\n\n\ndef _mk_external_interface_output(code, contract_name, interface_codes, source_id):\n return extract_external_interface(code, contract_name, interface_codes=interface_codes)\n\n\ndef _mk_opcodes(code, contract_name, interface_codes, source_id):\n return get_opcodes(code, contract_name, interface_codes=interface_codes)\n\n\ndef _mk_opcodes_runtime(code, contract_name, interface_codes, source_id):\n return get_opcodes(code, contract_name, bytecodes_runtime=True, interface_codes=interface_codes)\n\n\ndef _mk_ast_dict(code, contract_name, interface_codes, source_id):\n o = {\n 'contract_name': contract_name,\n 'ast': ast_to_dict(parser.parse_to_ast(code, source_id))\n }\n return o\n\n\noutput_formats_map = {\n 'abi': _mk_abi_output,\n 'ast_dict': _mk_ast_dict,\n 'bytecode': _mk_bytecode_output,\n 'bytecode_runtime': _mk_bytecode_runtime_output,\n 'ir': _mk_ir_output,\n 'asm': _mk_asm_output,\n 'source_map': _mk_source_map_output,\n 'method_identifiers': _mk_method_identifiers_output,\n 'interface': _mk_interface_output,\n 'external_interface': _mk_external_interface_output,\n 'opcodes': _mk_opcodes,\n 'opcodes_runtime': _mk_opcodes_runtime,\n}\n\n\ndef compile_codes(contract_sources: ContractCodes,\n output_formats: Union[OutputDict, OutputFormats, None] = None,\n exc_handler: Union[Callable, None] = None,\n interface_codes: Union[InterfaceDict, InterfaceImports, None] = None,\n initial_id: int = 0) -> OrderedDict:\n\n if output_formats is None:\n output_formats = ('bytecode',)\n if isinstance(output_formats, Sequence):\n output_formats = dict((k, output_formats) for k in contract_sources.keys())\n\n out: OrderedDict = OrderedDict()\n for source_id, contract_name in enumerate(sorted(contract_sources), start=initial_id):\n code = contract_sources[contract_name]\n for output_format in output_formats[contract_name]:\n if output_format not in output_formats_map:\n raise ValueError(f'Unsupported format type {repr(output_format)}')\n\n try:\n interfaces: Any = interface_codes\n if (\n isinstance(interfaces, dict) and\n contract_name in interfaces and\n isinstance(interfaces[contract_name], dict)\n ):\n interfaces = interfaces[contract_name]\n out.setdefault(contract_name, {})\n out[contract_name][output_format] = output_formats_map[output_format](\n code=code,\n contract_name=contract_name,\n interface_codes=interfaces,\n source_id=source_id\n )\n except Exception as exc:\n if exc_handler is not None:\n exc_handler(contract_name, exc)\n else:\n raise exc\n\n return out\n\n\nUNKNOWN_CONTRACT_NAME = '<unknown>'\n\n\ndef compile_code(code, output_formats=None, interface_codes=None):\n contract_sources = {UNKNOWN_CONTRACT_NAME: code}\n\n return compile_codes(\n contract_sources,\n output_formats,\n interface_codes=interface_codes,\n )[UNKNOWN_CONTRACT_NAME]\n", "path": "vyper/compiler.py"}], "after_files": [{"content": "from collections import (\n OrderedDict,\n deque,\n)\nfrom typing import (\n Any,\n Callable,\n Sequence,\n Union,\n)\nimport warnings\n\nimport asttokens\n\nfrom vyper import (\n compile_lll,\n optimizer,\n)\nfrom vyper.ast_utils import (\n ast_to_dict,\n)\nfrom vyper.opcodes import (\n opcodes,\n)\nfrom vyper.parser import (\n parser,\n)\nfrom vyper.signatures import (\n sig_utils,\n)\nfrom vyper.signatures.interface import (\n extract_external_interface,\n extract_interface_str,\n)\nfrom vyper.typing import (\n ContractCodes,\n InterfaceDict,\n InterfaceImports,\n OutputDict,\n OutputFormats,\n)\n\n\ndef __compile(code, interface_codes=None, *args, **kwargs):\n ast = parser.parse_to_ast(code)\n lll = parser.parse_tree_to_lll(\n ast,\n code,\n interface_codes=interface_codes,\n runtime_only=kwargs.get('bytecode_runtime', False)\n )\n opt_lll = optimizer.optimize(lll)\n asm = compile_lll.compile_to_assembly(opt_lll)\n\n def find_nested_opcode(asm_list, key):\n if key in asm_list:\n return True\n else:\n sublists = [sub for sub in asm_list if isinstance(sub, list)]\n return any(find_nested_opcode(x, key) for x in sublists)\n\n if find_nested_opcode(asm, 'DEBUG'):\n warnings.warn(\n 'This code contains DEBUG opcodes! The DEBUG opcode will only work in '\n 'a supported EVM! It will FAIL on all other nodes!'\n )\n\n c, line_number_map = compile_lll.assembly_to_evm(asm)\n return c\n\n\ndef gas_estimate(origcode, *args, **kwargs):\n o = {}\n code = optimizer.optimize(parser.parse_to_lll(origcode, *args, **kwargs))\n\n # Extract the stuff inside the LLL bracket\n if code.value == 'seq':\n if len(code.args) > 0 and code.args[-1].value == 'return':\n code = code.args[-1].args[1].args[0]\n\n assert code.value == 'seq'\n for arg in code.args:\n if arg.func_name is not None:\n o[arg.func_name] = arg.total_gas\n return o\n\n\ndef mk_full_signature(code, *args, **kwargs):\n abi = sig_utils.mk_full_signature(parser.parse_to_ast(code), *args, **kwargs)\n # Add gas estimates for each function to ABI\n gas_estimates = gas_estimate(code, *args, **kwargs)\n for func in abi:\n try:\n func_signature = func['name']\n except KeyError:\n # constructor and fallback functions don't have a name\n continue\n\n func_name, _, _ = func_signature.partition('(')\n # This check ensures we skip __init__ since it has no estimate\n if func_name in gas_estimates:\n # TODO: mutation\n func['gas'] = gas_estimates[func_name]\n return abi\n\n\ndef get_asm(asm_list):\n output_string = ''\n skip_newlines = 0\n for node in asm_list:\n if isinstance(node, list):\n output_string += get_asm(node)\n continue\n\n is_push = isinstance(node, str) and node.startswith('PUSH')\n\n output_string += str(node) + ' '\n if skip_newlines:\n skip_newlines -= 1\n elif is_push:\n skip_newlines = int(node[4:]) - 1\n else:\n output_string += '\\n'\n return output_string\n\n\ndef get_source_map(code, contract_name, interface_codes=None, runtime_only=True, source_id=0):\n asm_list = compile_lll.compile_to_assembly(\n optimizer.optimize(\n parser.parse_to_lll(\n code,\n runtime_only=runtime_only,\n interface_codes=interface_codes)))\n c, line_number_map = compile_lll.assembly_to_evm(asm_list)\n # Sort line_number_map\n out = OrderedDict()\n for k in sorted(line_number_map.keys()):\n out[k] = line_number_map[k]\n\n out['pc_pos_map_compressed'] = compress_source_map(\n code,\n out['pc_pos_map'],\n out['pc_jump_map'],\n source_id\n )\n out['pc_pos_map'] = dict((k, v) for k, v in out['pc_pos_map'].items() if v)\n return out\n\n\ndef compress_source_map(code, pos_map, jump_map, source_id):\n linenos = asttokens.LineNumbers(code)\n compressed_map = f\"-1:-1:{source_id}:-;\"\n last_pos = [-1, -1, source_id]\n\n for pc in sorted(pos_map)[1:]:\n current_pos = [-1, -1, source_id]\n if pos_map[pc]:\n current_pos[0] = linenos.line_to_offset(*pos_map[pc][:2])\n current_pos[1] = linenos.line_to_offset(*pos_map[pc][2:])-current_pos[0]\n\n if pc in jump_map:\n current_pos.append(jump_map[pc])\n\n for i in range(2, -1, -1):\n if current_pos[i] != last_pos[i]:\n last_pos[i] = current_pos[i]\n elif len(current_pos) == i+1:\n current_pos.pop()\n else:\n current_pos[i] = \"\"\n\n compressed_map += \":\".join(str(i) for i in current_pos) + \";\"\n\n return compressed_map\n\n\ndef expand_source_map(compressed_map):\n source_map = [_expand_row(i) if i else None for i in compressed_map.split(';')[:-1]]\n\n for i, value in enumerate(source_map[1:], 1):\n if value is None:\n source_map[i] = source_map[i - 1][:3] + [None]\n continue\n for x in range(3):\n if source_map[i][x] is None:\n source_map[i][x] = source_map[i - 1][x]\n\n return source_map\n\n\ndef _expand_row(row):\n result = [None] * 4\n for i, value in enumerate(row.split(':')):\n if value:\n result[i] = value if i == 3 else int(value)\n return result\n\n\ndef get_opcodes(code, contract_name, bytecodes_runtime=False, interface_codes=None):\n bytecode = __compile(\n code,\n bytecode_runtime=bytecodes_runtime,\n interface_codes=interface_codes\n ).hex().upper()\n bytecode = deque(bytecode[i:i + 2] for i in range(0, len(bytecode), 2))\n opcode_map = dict((v[0], k) for k, v in opcodes.items())\n opcode_str = \"\"\n\n while bytecode:\n op = int(bytecode.popleft(), 16)\n opcode_str += opcode_map[op] + \" \"\n if \"PUSH\" not in opcode_map[op]:\n continue\n push_len = int(opcode_map[op][4:])\n opcode_str += \"0x\" + \"\".join(bytecode.popleft() for i in range(push_len)) + \" \"\n\n return opcode_str[:-1]\n\n\ndef _mk_abi_output(code, contract_name, interface_codes, source_id):\n return mk_full_signature(code, interface_codes=interface_codes)\n\n\ndef _mk_bytecode_output(code, contract_name, interface_codes, source_id):\n return '0x' + __compile(code, interface_codes=interface_codes).hex()\n\n\ndef _mk_bytecode_runtime_output(code, contract_name, interface_codes, source_id):\n return '0x' + __compile(code, bytecode_runtime=True, interface_codes=interface_codes).hex()\n\n\ndef _mk_ir_output(code, contract_name, interface_codes, source_id):\n return optimizer.optimize(parser.parse_to_lll(code, interface_codes=interface_codes))\n\n\ndef _mk_asm_output(code, contract_name, interface_codes, source_id):\n return get_asm(compile_lll.compile_to_assembly(\n optimizer.optimize(parser.parse_to_lll(code, interface_codes=interface_codes))\n ))\n\n\ndef _mk_source_map_output(code, contract_name, interface_codes, source_id):\n return get_source_map(\n code,\n contract_name,\n interface_codes=interface_codes,\n runtime_only=True,\n source_id=source_id\n )\n\n\ndef _mk_method_identifiers_output(code, contract_name, interface_codes, source_id):\n return sig_utils.mk_method_identifiers(code, interface_codes=interface_codes)\n\n\ndef _mk_interface_output(code, contract_name, interface_codes, source_id):\n return extract_interface_str(code, contract_name, interface_codes=interface_codes)\n\n\ndef _mk_external_interface_output(code, contract_name, interface_codes, source_id):\n return extract_external_interface(code, contract_name, interface_codes=interface_codes)\n\n\ndef _mk_opcodes(code, contract_name, interface_codes, source_id):\n return get_opcodes(code, contract_name, interface_codes=interface_codes)\n\n\ndef _mk_opcodes_runtime(code, contract_name, interface_codes, source_id):\n return get_opcodes(code, contract_name, bytecodes_runtime=True, interface_codes=interface_codes)\n\n\ndef _mk_ast_dict(code, contract_name, interface_codes, source_id):\n o = {\n 'contract_name': contract_name,\n 'ast': ast_to_dict(parser.parse_to_ast(code, source_id))\n }\n return o\n\n\noutput_formats_map = {\n 'abi': _mk_abi_output,\n 'ast_dict': _mk_ast_dict,\n 'bytecode': _mk_bytecode_output,\n 'bytecode_runtime': _mk_bytecode_runtime_output,\n 'ir': _mk_ir_output,\n 'asm': _mk_asm_output,\n 'source_map': _mk_source_map_output,\n 'method_identifiers': _mk_method_identifiers_output,\n 'interface': _mk_interface_output,\n 'external_interface': _mk_external_interface_output,\n 'opcodes': _mk_opcodes,\n 'opcodes_runtime': _mk_opcodes_runtime,\n}\n\n\ndef compile_codes(contract_sources: ContractCodes,\n output_formats: Union[OutputDict, OutputFormats, None] = None,\n exc_handler: Union[Callable, None] = None,\n interface_codes: Union[InterfaceDict, InterfaceImports, None] = None,\n initial_id: int = 0) -> OrderedDict:\n\n if output_formats is None:\n output_formats = ('bytecode',)\n if isinstance(output_formats, Sequence):\n output_formats = dict((k, output_formats) for k in contract_sources.keys())\n\n out: OrderedDict = OrderedDict()\n for source_id, contract_name in enumerate(sorted(contract_sources), start=initial_id):\n code = contract_sources[contract_name]\n for output_format in output_formats[contract_name]:\n if output_format not in output_formats_map:\n raise ValueError(f'Unsupported format type {repr(output_format)}')\n\n try:\n interfaces: Any = interface_codes\n if (\n isinstance(interfaces, dict) and\n contract_name in interfaces and\n isinstance(interfaces[contract_name], dict)\n ):\n interfaces = interfaces[contract_name]\n out.setdefault(contract_name, {})\n out[contract_name][output_format] = output_formats_map[output_format](\n # trailing newline fixes python parsing bug when source ends in a comment\n # https://bugs.python.org/issue35107\n code=f\"{code}\\n\",\n contract_name=contract_name,\n interface_codes=interfaces,\n source_id=source_id\n )\n except Exception as exc:\n if exc_handler is not None:\n exc_handler(contract_name, exc)\n else:\n raise exc\n\n return out\n\n\nUNKNOWN_CONTRACT_NAME = '<unknown>'\n\n\ndef compile_code(code, output_formats=None, interface_codes=None):\n contract_sources = {UNKNOWN_CONTRACT_NAME: code}\n\n return compile_codes(\n contract_sources,\n output_formats,\n interface_codes=interface_codes,\n )[UNKNOWN_CONTRACT_NAME]\n", "path": "vyper/compiler.py"}]} | 3,848 | 141 |
gh_patches_debug_2920 | rasdani/github-patches | git_diff | encode__starlette-195 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Check directory exists when instantiating `StaticFiles`
The `StaticFiles` application should ensure that the directory exists at the point it is instantiated.
(With an optional switch to turn this behavior off)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `starlette/staticfiles.py`
Content:
```
1 import os
2 import stat
3
4 from aiofiles.os import stat as aio_stat
5
6 from starlette.responses import FileResponse, PlainTextResponse, Response
7 from starlette.types import ASGIInstance, Receive, Scope, Send
8
9
10 class StaticFiles:
11 def __init__(self, *, directory: str) -> None:
12 self.directory = directory
13 self.config_checked = False
14
15 def __call__(self, scope: Scope) -> ASGIInstance:
16 assert scope["type"] == "http"
17 if scope["method"] not in ("GET", "HEAD"):
18 return PlainTextResponse("Method Not Allowed", status_code=405)
19 path = os.path.normpath(os.path.join(*scope["path"].split("/")))
20 if path.startswith(".."):
21 return PlainTextResponse("Not Found", status_code=404)
22 path = os.path.join(self.directory, path)
23 if self.config_checked:
24 check_directory = None
25 else:
26 check_directory = self.directory
27 self.config_checked = True
28 return _StaticFilesResponder(scope, path=path, check_directory=check_directory)
29
30
31 class _StaticFilesResponder:
32 def __init__(self, scope: Scope, path: str, check_directory: str = None) -> None:
33 self.scope = scope
34 self.path = path
35 self.check_directory = check_directory
36
37 async def check_directory_configured_correctly(self) -> None:
38 """
39 Perform a one-off configuration check that StaticFiles is actually
40 pointed at a directory, so that we can raise loud errors rather than
41 just returning 404 responses.
42 """
43 directory = self.check_directory
44 try:
45 stat_result = await aio_stat(directory)
46 except FileNotFoundError:
47 raise RuntimeError("StaticFiles directory '%s' does not exist." % directory)
48 if not (stat.S_ISDIR(stat_result.st_mode) or stat.S_ISLNK(stat_result.st_mode)):
49 raise RuntimeError("StaticFiles path '%s' is not a directory." % directory)
50
51 async def __call__(self, receive: Receive, send: Send) -> None:
52 if self.check_directory is not None:
53 await self.check_directory_configured_correctly()
54
55 try:
56 stat_result = await aio_stat(self.path)
57 except FileNotFoundError:
58 response = PlainTextResponse("Not Found", status_code=404) # type: Response
59 else:
60 mode = stat_result.st_mode
61 if not stat.S_ISREG(mode):
62 response = PlainTextResponse("Not Found", status_code=404)
63 else:
64 response = FileResponse(self.path, stat_result=stat_result)
65
66 await response(receive, send)
67
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/starlette/staticfiles.py b/starlette/staticfiles.py
--- a/starlette/staticfiles.py
+++ b/starlette/staticfiles.py
@@ -8,7 +8,9 @@
class StaticFiles:
- def __init__(self, *, directory: str) -> None:
+ def __init__(self, *, directory: str, check_dir: bool = True) -> None:
+ if check_dir and not os.path.isdir(directory):
+ raise RuntimeError("Directory '%s' does not exist" % directory)
self.directory = directory
self.config_checked = False
| {"golden_diff": "diff --git a/starlette/staticfiles.py b/starlette/staticfiles.py\n--- a/starlette/staticfiles.py\n+++ b/starlette/staticfiles.py\n@@ -8,7 +8,9 @@\n \n \n class StaticFiles:\n- def __init__(self, *, directory: str) -> None:\n+ def __init__(self, *, directory: str, check_dir: bool = True) -> None:\n+ if check_dir and not os.path.isdir(directory):\n+ raise RuntimeError(\"Directory '%s' does not exist\" % directory)\n self.directory = directory\n self.config_checked = False\n", "issue": "Check directory exists when instantiating `StaticFiles`\nThe `StaticFiles` application should ensure that the directory exists at the point it is instantiated.\r\n\r\n(With an optional switch to turn this behavior off)\n", "before_files": [{"content": "import os\nimport stat\n\nfrom aiofiles.os import stat as aio_stat\n\nfrom starlette.responses import FileResponse, PlainTextResponse, Response\nfrom starlette.types import ASGIInstance, Receive, Scope, Send\n\n\nclass StaticFiles:\n def __init__(self, *, directory: str) -> None:\n self.directory = directory\n self.config_checked = False\n\n def __call__(self, scope: Scope) -> ASGIInstance:\n assert scope[\"type\"] == \"http\"\n if scope[\"method\"] not in (\"GET\", \"HEAD\"):\n return PlainTextResponse(\"Method Not Allowed\", status_code=405)\n path = os.path.normpath(os.path.join(*scope[\"path\"].split(\"/\")))\n if path.startswith(\"..\"):\n return PlainTextResponse(\"Not Found\", status_code=404)\n path = os.path.join(self.directory, path)\n if self.config_checked:\n check_directory = None\n else:\n check_directory = self.directory\n self.config_checked = True\n return _StaticFilesResponder(scope, path=path, check_directory=check_directory)\n\n\nclass _StaticFilesResponder:\n def __init__(self, scope: Scope, path: str, check_directory: str = None) -> None:\n self.scope = scope\n self.path = path\n self.check_directory = check_directory\n\n async def check_directory_configured_correctly(self) -> None:\n \"\"\"\n Perform a one-off configuration check that StaticFiles is actually\n pointed at a directory, so that we can raise loud errors rather than\n just returning 404 responses.\n \"\"\"\n directory = self.check_directory\n try:\n stat_result = await aio_stat(directory)\n except FileNotFoundError:\n raise RuntimeError(\"StaticFiles directory '%s' does not exist.\" % directory)\n if not (stat.S_ISDIR(stat_result.st_mode) or stat.S_ISLNK(stat_result.st_mode)):\n raise RuntimeError(\"StaticFiles path '%s' is not a directory.\" % directory)\n\n async def __call__(self, receive: Receive, send: Send) -> None:\n if self.check_directory is not None:\n await self.check_directory_configured_correctly()\n\n try:\n stat_result = await aio_stat(self.path)\n except FileNotFoundError:\n response = PlainTextResponse(\"Not Found\", status_code=404) # type: Response\n else:\n mode = stat_result.st_mode\n if not stat.S_ISREG(mode):\n response = PlainTextResponse(\"Not Found\", status_code=404)\n else:\n response = FileResponse(self.path, stat_result=stat_result)\n\n await response(receive, send)\n", "path": "starlette/staticfiles.py"}], "after_files": [{"content": "import os\nimport stat\n\nfrom aiofiles.os import stat as aio_stat\n\nfrom starlette.responses import FileResponse, PlainTextResponse, Response\nfrom starlette.types import ASGIInstance, Receive, Scope, Send\n\n\nclass StaticFiles:\n def __init__(self, *, directory: str, check_dir: bool = True) -> None:\n if check_dir and not os.path.isdir(directory):\n raise RuntimeError(\"Directory '%s' does not exist\" % directory)\n self.directory = directory\n self.config_checked = False\n\n def __call__(self, scope: Scope) -> ASGIInstance:\n assert scope[\"type\"] == \"http\"\n if scope[\"method\"] not in (\"GET\", \"HEAD\"):\n return PlainTextResponse(\"Method Not Allowed\", status_code=405)\n path = os.path.normpath(os.path.join(*scope[\"path\"].split(\"/\")))\n if path.startswith(\"..\"):\n return PlainTextResponse(\"Not Found\", status_code=404)\n path = os.path.join(self.directory, path)\n if self.config_checked:\n check_directory = None\n else:\n check_directory = self.directory\n self.config_checked = True\n return _StaticFilesResponder(scope, path=path, check_directory=check_directory)\n\n\nclass _StaticFilesResponder:\n def __init__(self, scope: Scope, path: str, check_directory: str = None) -> None:\n self.scope = scope\n self.path = path\n self.check_directory = check_directory\n\n async def check_directory_configured_correctly(self) -> None:\n \"\"\"\n Perform a one-off configuration check that StaticFiles is actually\n pointed at a directory, so that we can raise loud errors rather than\n just returning 404 responses.\n \"\"\"\n directory = self.check_directory\n try:\n stat_result = await aio_stat(directory)\n except FileNotFoundError:\n raise RuntimeError(\"StaticFiles directory '%s' does not exist.\" % directory)\n if not (stat.S_ISDIR(stat_result.st_mode) or stat.S_ISLNK(stat_result.st_mode)):\n raise RuntimeError(\"StaticFiles path '%s' is not a directory.\" % directory)\n\n async def __call__(self, receive: Receive, send: Send) -> None:\n if self.check_directory is not None:\n await self.check_directory_configured_correctly()\n\n try:\n stat_result = await aio_stat(self.path)\n except FileNotFoundError:\n response = PlainTextResponse(\"Not Found\", status_code=404) # type: Response\n else:\n mode = stat_result.st_mode\n if not stat.S_ISREG(mode):\n response = PlainTextResponse(\"Not Found\", status_code=404)\n else:\n response = FileResponse(self.path, stat_result=stat_result)\n\n await response(receive, send)\n", "path": "starlette/staticfiles.py"}]} | 995 | 127 |
gh_patches_debug_3809 | rasdani/github-patches | git_diff | saleor__saleor-1155 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
update_index not working with Elasticsearch 5.4
When running `python manage.py update_index` the following errors occurs:
```
elasticsearch.exceptions.RequestError: TransportError(400, 'No handler found for uri [//storefront__userprofile_user] and method [DELETE]')
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `saleor/search/backends/dashboard.py`
Content:
```
1 from collections import defaultdict
2
3 from . import get_search_backend
4 from .base import BaseSearchQuery
5 from ..index import get_indexed_models
6
7 CONTENT_TYPES_MAP = {
8 model.indexed_get_content_type(): model
9 for model in get_indexed_models()}
10
11 DEFAULT_BACKEND = get_search_backend('default')
12 DEFAULT_BACKEND_CLASS = DEFAULT_BACKEND.__class__
13 DEFAULT_BACKEND_RESULTS_CLASS = DEFAULT_BACKEND.results_class
14
15
16 class DashboardSearchQuery(BaseSearchQuery):
17 """
18 Query that will search in multiple indexes
19 """
20
21 def __init__(self, query_string,
22 fields=None, operator=None, order_by_relevance=True,
23 queryset_map=None):
24 if queryset_map:
25 queryset_map = {model.indexed_get_content_type(): queryset
26 for model, queryset in queryset_map.items()}
27 else:
28 queryset_map = {content_type: model.objects.all()
29 for content_type, model in CONTENT_TYPES_MAP.items()}
30 self.queryset_map = queryset_map
31 super(DashboardSearchQuery, self).__init__(
32 query_string=query_string, queryset=None, fields=fields,
33 operator=operator, order_by_relevance=order_by_relevance)
34
35 def get_inner_query(self):
36 if self.query_string is not None:
37 fields = self.fields or ['_all', '_partials']
38
39 if len(fields) == 1:
40 if self.operator == 'or':
41 query = {
42 'match': {
43 fields[0]: self.query_string,
44 }
45 }
46 else:
47 query = {
48 'match': {
49 fields[0]: {
50 'query': self.query_string,
51 'operator': self.operator,
52 }
53 }
54 }
55 else:
56 query = {
57 'multi_match': {
58 'query': self.query_string,
59 'fields': fields,
60 }
61 }
62
63 if self.operator != 'or':
64 query['multi_match']['operator'] = self.operator
65 else:
66 query = {
67 'match_all': {}
68 }
69
70 return query
71
72 def get_query(self):
73 return self.get_inner_query()
74
75
76 class DashboardSearchResults(DEFAULT_BACKEND_RESULTS_CLASS):
77
78 def _do_search(self):
79 # Params for elasticsearch query
80 params = dict(
81 body=self._get_es_body(),
82 _source=False,
83 from_=self.start,
84 index='{}*'.format(self.backend.get_index().name)
85 )
86 params[self.fields_param_name] = 'pk'
87
88 # Add size if set
89 if self.stop is not None:
90 params['size'] = self.stop - self.start
91 # Send to Elasticsearch
92 hits = self.backend.es.search(**params)
93 search_hits = defaultdict(list)
94 scores = {}
95 for hit in hits['hits']['hits']:
96 hit_type = hit['_type']
97 hit_pk = hit['fields']['pk'][0]
98 search_hits[hit_type].append(hit_pk)
99 scores[hit['_id']] = hit['_score']
100
101 # Group results by content type
102 results_by_model = {}
103 for content_type, hit_pks in search_hits.items():
104 queryset = self.query.queryset_map[content_type]
105 results_by_model[content_type] = queryset.filter(pk__in=hit_pks)
106
107 # Merge results back in one list ordered by search score
108 all_results = []
109 for content_type, hits in results_by_model.items():
110 for hit in hits:
111 score_key = '%s:%d' % (content_type, hit.pk)
112 setattr(hit, 'search_score', scores[score_key])
113 setattr(hit, 'content_type', content_type)
114 all_results.append(hit)
115 sorted_results = sorted(
116 all_results, key=lambda h: h.search_score, reverse=True)
117 return list(sorted_results)
118
119 def _get_es_body(self, for_count=False):
120 body = {
121 'query': self.query.get_query()
122 }
123
124 if not for_count:
125 sort = None
126
127 if sort is not None:
128 body['sort'] = sort
129
130 return body
131
132 def _do_count(self):
133 # Get count
134 hit_count = self.backend.es.count(
135 body=self._get_es_body(for_count=True),
136 index='{}*'.format(self.backend.get_index().name)
137 )['count']
138 # Add limits
139 hit_count -= self.start
140 if self.stop is not None:
141 hit_count = min(hit_count, self.stop - self.start)
142
143 return max(hit_count, 0)
144
145
146 class DashboardMultiTypeSearchBackend(DEFAULT_BACKEND_CLASS):
147 results_class = DashboardSearchResults
148 query_class = DashboardSearchQuery
149
150 def search(self, query_string,
151 model_or_queryset=None, fields=None, filters=None,
152 prefetch_related=None, operator=None, order_by_relevance=True,
153 queryset_map=None):
154 """
155 Multi-model search. Parameters that affect model or database
156 structure are skipped and not used in dashboard query implementation.
157 """
158 search_query = self.query_class(
159 query_string=query_string, fields=fields, operator=operator,
160 order_by_relevance=order_by_relevance, queryset_map=queryset_map)
161 return self.results_class(self, search_query)
162
163 SearchBackend = DashboardMultiTypeSearchBackend
164
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/saleor/search/backends/dashboard.py b/saleor/search/backends/dashboard.py
--- a/saleor/search/backends/dashboard.py
+++ b/saleor/search/backends/dashboard.py
@@ -94,7 +94,7 @@
scores = {}
for hit in hits['hits']['hits']:
hit_type = hit['_type']
- hit_pk = hit['fields']['pk'][0]
+ hit_pk = hit['_source']['pk']
search_hits[hit_type].append(hit_pk)
scores[hit['_id']] = hit['_score']
| {"golden_diff": "diff --git a/saleor/search/backends/dashboard.py b/saleor/search/backends/dashboard.py\n--- a/saleor/search/backends/dashboard.py\n+++ b/saleor/search/backends/dashboard.py\n@@ -94,7 +94,7 @@\n scores = {}\n for hit in hits['hits']['hits']:\n hit_type = hit['_type']\n- hit_pk = hit['fields']['pk'][0]\n+ hit_pk = hit['_source']['pk']\n search_hits[hit_type].append(hit_pk)\n scores[hit['_id']] = hit['_score']\n", "issue": "update_index not working with Elasticsearch 5.4\nWhen running `python manage.py update_index` the following errors occurs:\r\n```\r\nelasticsearch.exceptions.RequestError: TransportError(400, 'No handler found for uri [//storefront__userprofile_user] and method [DELETE]')\r\n```\n", "before_files": [{"content": "from collections import defaultdict\n\nfrom . import get_search_backend\nfrom .base import BaseSearchQuery\nfrom ..index import get_indexed_models\n\nCONTENT_TYPES_MAP = {\n model.indexed_get_content_type(): model\n for model in get_indexed_models()}\n\nDEFAULT_BACKEND = get_search_backend('default')\nDEFAULT_BACKEND_CLASS = DEFAULT_BACKEND.__class__\nDEFAULT_BACKEND_RESULTS_CLASS = DEFAULT_BACKEND.results_class\n\n\nclass DashboardSearchQuery(BaseSearchQuery):\n \"\"\"\n Query that will search in multiple indexes\n \"\"\"\n\n def __init__(self, query_string,\n fields=None, operator=None, order_by_relevance=True,\n queryset_map=None):\n if queryset_map:\n queryset_map = {model.indexed_get_content_type(): queryset\n for model, queryset in queryset_map.items()}\n else:\n queryset_map = {content_type: model.objects.all()\n for content_type, model in CONTENT_TYPES_MAP.items()}\n self.queryset_map = queryset_map\n super(DashboardSearchQuery, self).__init__(\n query_string=query_string, queryset=None, fields=fields,\n operator=operator, order_by_relevance=order_by_relevance)\n\n def get_inner_query(self):\n if self.query_string is not None:\n fields = self.fields or ['_all', '_partials']\n\n if len(fields) == 1:\n if self.operator == 'or':\n query = {\n 'match': {\n fields[0]: self.query_string,\n }\n }\n else:\n query = {\n 'match': {\n fields[0]: {\n 'query': self.query_string,\n 'operator': self.operator,\n }\n }\n }\n else:\n query = {\n 'multi_match': {\n 'query': self.query_string,\n 'fields': fields,\n }\n }\n\n if self.operator != 'or':\n query['multi_match']['operator'] = self.operator\n else:\n query = {\n 'match_all': {}\n }\n\n return query\n\n def get_query(self):\n return self.get_inner_query()\n\n\nclass DashboardSearchResults(DEFAULT_BACKEND_RESULTS_CLASS):\n\n def _do_search(self):\n # Params for elasticsearch query\n params = dict(\n body=self._get_es_body(),\n _source=False,\n from_=self.start,\n index='{}*'.format(self.backend.get_index().name)\n )\n params[self.fields_param_name] = 'pk'\n\n # Add size if set\n if self.stop is not None:\n params['size'] = self.stop - self.start\n # Send to Elasticsearch\n hits = self.backend.es.search(**params)\n search_hits = defaultdict(list)\n scores = {}\n for hit in hits['hits']['hits']:\n hit_type = hit['_type']\n hit_pk = hit['fields']['pk'][0]\n search_hits[hit_type].append(hit_pk)\n scores[hit['_id']] = hit['_score']\n\n # Group results by content type\n results_by_model = {}\n for content_type, hit_pks in search_hits.items():\n queryset = self.query.queryset_map[content_type]\n results_by_model[content_type] = queryset.filter(pk__in=hit_pks)\n\n # Merge results back in one list ordered by search score\n all_results = []\n for content_type, hits in results_by_model.items():\n for hit in hits:\n score_key = '%s:%d' % (content_type, hit.pk)\n setattr(hit, 'search_score', scores[score_key])\n setattr(hit, 'content_type', content_type)\n all_results.append(hit)\n sorted_results = sorted(\n all_results, key=lambda h: h.search_score, reverse=True)\n return list(sorted_results)\n\n def _get_es_body(self, for_count=False):\n body = {\n 'query': self.query.get_query()\n }\n\n if not for_count:\n sort = None\n\n if sort is not None:\n body['sort'] = sort\n\n return body\n\n def _do_count(self):\n # Get count\n hit_count = self.backend.es.count(\n body=self._get_es_body(for_count=True),\n index='{}*'.format(self.backend.get_index().name)\n )['count']\n # Add limits\n hit_count -= self.start\n if self.stop is not None:\n hit_count = min(hit_count, self.stop - self.start)\n\n return max(hit_count, 0)\n\n\nclass DashboardMultiTypeSearchBackend(DEFAULT_BACKEND_CLASS):\n results_class = DashboardSearchResults\n query_class = DashboardSearchQuery\n\n def search(self, query_string,\n model_or_queryset=None, fields=None, filters=None,\n prefetch_related=None, operator=None, order_by_relevance=True,\n queryset_map=None):\n \"\"\"\n Multi-model search. Parameters that affect model or database\n structure are skipped and not used in dashboard query implementation.\n \"\"\"\n search_query = self.query_class(\n query_string=query_string, fields=fields, operator=operator,\n order_by_relevance=order_by_relevance, queryset_map=queryset_map)\n return self.results_class(self, search_query)\n\nSearchBackend = DashboardMultiTypeSearchBackend\n", "path": "saleor/search/backends/dashboard.py"}], "after_files": [{"content": "from collections import defaultdict\n\nfrom . import get_search_backend\nfrom .base import BaseSearchQuery\nfrom ..index import get_indexed_models\n\nCONTENT_TYPES_MAP = {\n model.indexed_get_content_type(): model\n for model in get_indexed_models()}\n\nDEFAULT_BACKEND = get_search_backend('default')\nDEFAULT_BACKEND_CLASS = DEFAULT_BACKEND.__class__\nDEFAULT_BACKEND_RESULTS_CLASS = DEFAULT_BACKEND.results_class\n\n\nclass DashboardSearchQuery(BaseSearchQuery):\n \"\"\"\n Query that will search in multiple indexes\n \"\"\"\n\n def __init__(self, query_string,\n fields=None, operator=None, order_by_relevance=True,\n queryset_map=None):\n if queryset_map:\n queryset_map = {model.indexed_get_content_type(): queryset\n for model, queryset in queryset_map.items()}\n else:\n queryset_map = {content_type: model.objects.all()\n for content_type, model in CONTENT_TYPES_MAP.items()}\n self.queryset_map = queryset_map\n super(DashboardSearchQuery, self).__init__(\n query_string=query_string, queryset=None, fields=fields,\n operator=operator, order_by_relevance=order_by_relevance)\n\n def get_inner_query(self):\n if self.query_string is not None:\n fields = self.fields or ['_all', '_partials']\n\n if len(fields) == 1:\n if self.operator == 'or':\n query = {\n 'match': {\n fields[0]: self.query_string,\n }\n }\n else:\n query = {\n 'match': {\n fields[0]: {\n 'query': self.query_string,\n 'operator': self.operator,\n }\n }\n }\n else:\n query = {\n 'multi_match': {\n 'query': self.query_string,\n 'fields': fields,\n }\n }\n\n if self.operator != 'or':\n query['multi_match']['operator'] = self.operator\n else:\n query = {\n 'match_all': {}\n }\n\n return query\n\n def get_query(self):\n return self.get_inner_query()\n\n\nclass DashboardSearchResults(DEFAULT_BACKEND_RESULTS_CLASS):\n\n def _do_search(self):\n # Params for elasticsearch query\n params = dict(\n body=self._get_es_body(),\n _source=False,\n from_=self.start,\n index='{}*'.format(self.backend.get_index().name)\n )\n params[self.fields_param_name] = 'pk'\n\n # Add size if set\n if self.stop is not None:\n params['size'] = self.stop - self.start\n # Send to Elasticsearch\n hits = self.backend.es.search(**params)\n search_hits = defaultdict(list)\n scores = {}\n for hit in hits['hits']['hits']:\n hit_type = hit['_type']\n hit_pk = hit['_source']['pk']\n search_hits[hit_type].append(hit_pk)\n scores[hit['_id']] = hit['_score']\n\n # Group results by content type\n results_by_model = {}\n for content_type, hit_pks in search_hits.items():\n queryset = self.query.queryset_map[content_type]\n results_by_model[content_type] = queryset.filter(pk__in=hit_pks)\n\n # Merge results back in one list ordered by search score\n all_results = []\n for content_type, hits in results_by_model.items():\n for hit in hits:\n score_key = '%s:%d' % (content_type, hit.pk)\n setattr(hit, 'search_score', scores[score_key])\n setattr(hit, 'content_type', content_type)\n all_results.append(hit)\n sorted_results = sorted(\n all_results, key=lambda h: h.search_score, reverse=True)\n return list(sorted_results)\n\n def _get_es_body(self, for_count=False):\n body = {\n 'query': self.query.get_query()\n }\n\n if not for_count:\n sort = None\n\n if sort is not None:\n body['sort'] = sort\n\n return body\n\n def _do_count(self):\n # Get count\n hit_count = self.backend.es.count(\n body=self._get_es_body(for_count=True),\n index='{}*'.format(self.backend.get_index().name)\n )['count']\n # Add limits\n hit_count -= self.start\n if self.stop is not None:\n hit_count = min(hit_count, self.stop - self.start)\n\n return max(hit_count, 0)\n\n\nclass DashboardMultiTypeSearchBackend(DEFAULT_BACKEND_CLASS):\n results_class = DashboardSearchResults\n query_class = DashboardSearchQuery\n\n def search(self, query_string,\n model_or_queryset=None, fields=None, filters=None,\n prefetch_related=None, operator=None, order_by_relevance=True,\n queryset_map=None):\n \"\"\"\n Multi-model search. Parameters that affect model or database\n structure are skipped and not used in dashboard query implementation.\n \"\"\"\n search_query = self.query_class(\n query_string=query_string, fields=fields, operator=operator,\n order_by_relevance=order_by_relevance, queryset_map=queryset_map)\n return self.results_class(self, search_query)\n\nSearchBackend = DashboardMultiTypeSearchBackend\n", "path": "saleor/search/backends/dashboard.py"}]} | 1,811 | 125 |
gh_patches_debug_14985 | rasdani/github-patches | git_diff | aws-cloudformation__cfn-lint-2513 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Don't validate SAM transformed resources for rule I3042
### CloudFormation Lint Version
v0.71.1
### What operating system are you using?
Mac
### Describe the bug
When SAM transforms templates it can create hardcoded ARNs based on its scenario. It would make sense to not validate those ARNs against rule I3042
### Expected behavior
To not raise I3042 on resources that are created by SAM transform.
### Reproduction template
```yaml
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cfnlint/rules/resources/HardCodedArnProperties.py`
Content:
```
1 """
2 Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
3 SPDX-License-Identifier: MIT-0
4 """
5 import re
6
7 from cfnlint.rules import CloudFormationLintRule, RuleMatch
8
9
10 class HardCodedArnProperties(CloudFormationLintRule):
11 """Checks Resources if ARNs use correctly placed Pseudo Parameters instead of hardcoded Partition, Region, and Account Number"""
12
13 id = "I3042"
14 shortdesc = "ARNs should use correctly placed Pseudo Parameters"
15 description = "Checks Resources if ARNs use correctly placed Pseudo Parameters instead of hardcoded Partition, Region, and Account Number"
16 source_url = ""
17 tags = ["resources"]
18 regex = re.compile(
19 r"arn:(\$\{[^:]*::[^:]*}|[^:]*):[^:]+:(\$\{[^:]*::[^:]*}|[^:]*):(\$\{[^:]*::[^:]*}|[^:]*)"
20 )
21
22 def __init__(self):
23 """Init"""
24 super().__init__()
25 self.config_definition = {
26 "partition": {
27 "default": True,
28 "type": "boolean",
29 },
30 "region": {
31 "default": False,
32 "type": "boolean",
33 },
34 "accountId": {
35 "default": False,
36 "type": "boolean",
37 },
38 }
39 self.configure()
40
41 def _match_values(self, cfnelem, path):
42 """Recursively search for values matching the searchRegex"""
43 values = []
44 if isinstance(cfnelem, dict):
45 for key in cfnelem:
46 pathprop = path[:]
47 pathprop.append(key)
48 values.extend(self._match_values(cfnelem[key], pathprop))
49 elif isinstance(cfnelem, list):
50 for index, item in enumerate(cfnelem):
51 pathprop = path[:]
52 pathprop.append(index)
53 values.extend(self._match_values(item, pathprop))
54 else:
55 # Leaf node
56 if isinstance(cfnelem, str): # and re.match(searchRegex, cfnelem):
57 for variable in re.findall(self.regex, cfnelem):
58 if "Fn::Sub" in path:
59 values.append(path + [variable])
60
61 return values
62
63 def match_values(self, cfn):
64 """
65 Search for values in all parts of the templates that match the searchRegex
66 """
67 results = []
68 results.extend(self._match_values(cfn.template.get("Resources", {}), []))
69 # Globals are removed during a transform. They need to be checked manually
70 results.extend(self._match_values(cfn.template.get("Globals", {}), []))
71 return results
72
73 def match(self, cfn):
74 """Check CloudFormation Resources"""
75 matches = []
76
77 # Get a list of paths to every leaf node string containing at least one ${parameter}
78 parameter_string_paths = self.match_values(cfn)
79 # We want to search all of the paths to check if each one contains an 'Fn::Sub'
80 for parameter_string_path in parameter_string_paths:
81 path = ["Resources"] + parameter_string_path[:-1]
82 candidate = parameter_string_path[-1]
83
84 # !Sub arn:${AWS::Partition}:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole
85 # is valid even with aws as the account #. This handles empty string
86 if self.config["partition"] and not re.match(
87 r"^\$\{\w+}|\$\{AWS::Partition}|$", candidate[0]
88 ):
89 # or not re.match(r'^(\$\{\w+}|\$\{AWS::Region}|)$', candidate[1]) or not re.match(r'^\$\{\w+}|\$\{AWS::AccountId}|aws|$', candidate[2]):
90 message = "ARN in Resource {0} contains hardcoded Partition in ARN or incorrectly placed Pseudo Parameters"
91 matches.append(RuleMatch(path, message.format(path[1])))
92 if self.config["region"] and not re.match(
93 r"^(\$\{\w+}|\$\{AWS::Region}|)$", candidate[1]
94 ):
95 # or or not re.match(r'^\$\{\w+}|\$\{AWS::AccountId}|aws|$', candidate[2]):
96 message = "ARN in Resource {0} contains hardcoded Region in ARN or incorrectly placed Pseudo Parameters"
97 matches.append(RuleMatch(path, message.format(path[1])))
98 if self.config["accountId"] and not re.match(
99 r"^\$\{\w+}|\$\{AWS::AccountId}|aws|$", candidate[2]
100 ):
101 message = "ARN in Resource {0} contains hardcoded AccountId in ARN or incorrectly placed Pseudo Parameters"
102 matches.append(RuleMatch(path, message.format(path[1])))
103
104 return matches
105
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/cfnlint/rules/resources/HardCodedArnProperties.py b/src/cfnlint/rules/resources/HardCodedArnProperties.py
--- a/src/cfnlint/rules/resources/HardCodedArnProperties.py
+++ b/src/cfnlint/rules/resources/HardCodedArnProperties.py
@@ -71,9 +71,13 @@
return results
def match(self, cfn):
- """Check CloudFormation Resources"""
matches = []
+ transforms = cfn.transform_pre["Transform"]
+ transforms = transforms if isinstance(transforms, list) else [transforms]
+ if "AWS::Serverless-2016-10-31" in cfn.transform_pre["Transform"]:
+ return matches
+
# Get a list of paths to every leaf node string containing at least one ${parameter}
parameter_string_paths = self.match_values(cfn)
# We want to search all of the paths to check if each one contains an 'Fn::Sub'
| {"golden_diff": "diff --git a/src/cfnlint/rules/resources/HardCodedArnProperties.py b/src/cfnlint/rules/resources/HardCodedArnProperties.py\n--- a/src/cfnlint/rules/resources/HardCodedArnProperties.py\n+++ b/src/cfnlint/rules/resources/HardCodedArnProperties.py\n@@ -71,9 +71,13 @@\n return results\r\n \r\n def match(self, cfn):\r\n- \"\"\"Check CloudFormation Resources\"\"\"\r\n matches = []\r\n \r\n+ transforms = cfn.transform_pre[\"Transform\"]\r\n+ transforms = transforms if isinstance(transforms, list) else [transforms]\r\n+ if \"AWS::Serverless-2016-10-31\" in cfn.transform_pre[\"Transform\"]:\r\n+ return matches\r\n+\r\n # Get a list of paths to every leaf node string containing at least one ${parameter}\r\n parameter_string_paths = self.match_values(cfn)\r\n # We want to search all of the paths to check if each one contains an 'Fn::Sub'\n", "issue": "Don't validate SAM transformed resources for rule I3042\n### CloudFormation Lint Version\n\nv0.71.1\n\n### What operating system are you using?\n\nMac\n\n### Describe the bug\n\nWhen SAM transforms templates it can create hardcoded ARNs based on its scenario. It would make sense to not validate those ARNs against rule I3042\n\n### Expected behavior\n\nTo not raise I3042 on resources that are created by SAM transform.\n\n### Reproduction template\n\n```yaml\r\n\r\n```\n", "before_files": [{"content": "\"\"\"\r\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\r\nSPDX-License-Identifier: MIT-0\r\n\"\"\"\r\nimport re\r\n\r\nfrom cfnlint.rules import CloudFormationLintRule, RuleMatch\r\n\r\n\r\nclass HardCodedArnProperties(CloudFormationLintRule):\r\n \"\"\"Checks Resources if ARNs use correctly placed Pseudo Parameters instead of hardcoded Partition, Region, and Account Number\"\"\"\r\n\r\n id = \"I3042\"\r\n shortdesc = \"ARNs should use correctly placed Pseudo Parameters\"\r\n description = \"Checks Resources if ARNs use correctly placed Pseudo Parameters instead of hardcoded Partition, Region, and Account Number\"\r\n source_url = \"\"\r\n tags = [\"resources\"]\r\n regex = re.compile(\r\n r\"arn:(\\$\\{[^:]*::[^:]*}|[^:]*):[^:]+:(\\$\\{[^:]*::[^:]*}|[^:]*):(\\$\\{[^:]*::[^:]*}|[^:]*)\"\r\n )\r\n\r\n def __init__(self):\r\n \"\"\"Init\"\"\"\r\n super().__init__()\r\n self.config_definition = {\r\n \"partition\": {\r\n \"default\": True,\r\n \"type\": \"boolean\",\r\n },\r\n \"region\": {\r\n \"default\": False,\r\n \"type\": \"boolean\",\r\n },\r\n \"accountId\": {\r\n \"default\": False,\r\n \"type\": \"boolean\",\r\n },\r\n }\r\n self.configure()\r\n\r\n def _match_values(self, cfnelem, path):\r\n \"\"\"Recursively search for values matching the searchRegex\"\"\"\r\n values = []\r\n if isinstance(cfnelem, dict):\r\n for key in cfnelem:\r\n pathprop = path[:]\r\n pathprop.append(key)\r\n values.extend(self._match_values(cfnelem[key], pathprop))\r\n elif isinstance(cfnelem, list):\r\n for index, item in enumerate(cfnelem):\r\n pathprop = path[:]\r\n pathprop.append(index)\r\n values.extend(self._match_values(item, pathprop))\r\n else:\r\n # Leaf node\r\n if isinstance(cfnelem, str): # and re.match(searchRegex, cfnelem):\r\n for variable in re.findall(self.regex, cfnelem):\r\n if \"Fn::Sub\" in path:\r\n values.append(path + [variable])\r\n\r\n return values\r\n\r\n def match_values(self, cfn):\r\n \"\"\"\r\n Search for values in all parts of the templates that match the searchRegex\r\n \"\"\"\r\n results = []\r\n results.extend(self._match_values(cfn.template.get(\"Resources\", {}), []))\r\n # Globals are removed during a transform. They need to be checked manually\r\n results.extend(self._match_values(cfn.template.get(\"Globals\", {}), []))\r\n return results\r\n\r\n def match(self, cfn):\r\n \"\"\"Check CloudFormation Resources\"\"\"\r\n matches = []\r\n\r\n # Get a list of paths to every leaf node string containing at least one ${parameter}\r\n parameter_string_paths = self.match_values(cfn)\r\n # We want to search all of the paths to check if each one contains an 'Fn::Sub'\r\n for parameter_string_path in parameter_string_paths:\r\n path = [\"Resources\"] + parameter_string_path[:-1]\r\n candidate = parameter_string_path[-1]\r\n\r\n # !Sub arn:${AWS::Partition}:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole\r\n # is valid even with aws as the account #. This handles empty string\r\n if self.config[\"partition\"] and not re.match(\r\n r\"^\\$\\{\\w+}|\\$\\{AWS::Partition}|$\", candidate[0]\r\n ):\r\n # or not re.match(r'^(\\$\\{\\w+}|\\$\\{AWS::Region}|)$', candidate[1]) or not re.match(r'^\\$\\{\\w+}|\\$\\{AWS::AccountId}|aws|$', candidate[2]):\r\n message = \"ARN in Resource {0} contains hardcoded Partition in ARN or incorrectly placed Pseudo Parameters\"\r\n matches.append(RuleMatch(path, message.format(path[1])))\r\n if self.config[\"region\"] and not re.match(\r\n r\"^(\\$\\{\\w+}|\\$\\{AWS::Region}|)$\", candidate[1]\r\n ):\r\n # or or not re.match(r'^\\$\\{\\w+}|\\$\\{AWS::AccountId}|aws|$', candidate[2]):\r\n message = \"ARN in Resource {0} contains hardcoded Region in ARN or incorrectly placed Pseudo Parameters\"\r\n matches.append(RuleMatch(path, message.format(path[1])))\r\n if self.config[\"accountId\"] and not re.match(\r\n r\"^\\$\\{\\w+}|\\$\\{AWS::AccountId}|aws|$\", candidate[2]\r\n ):\r\n message = \"ARN in Resource {0} contains hardcoded AccountId in ARN or incorrectly placed Pseudo Parameters\"\r\n matches.append(RuleMatch(path, message.format(path[1])))\r\n\r\n return matches\r\n", "path": "src/cfnlint/rules/resources/HardCodedArnProperties.py"}], "after_files": [{"content": "\"\"\"\r\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\r\nSPDX-License-Identifier: MIT-0\r\n\"\"\"\r\nimport re\r\n\r\nfrom cfnlint.rules import CloudFormationLintRule, RuleMatch\r\n\r\n\r\nclass HardCodedArnProperties(CloudFormationLintRule):\r\n \"\"\"Checks Resources if ARNs use correctly placed Pseudo Parameters instead of hardcoded Partition, Region, and Account Number\"\"\"\r\n\r\n id = \"I3042\"\r\n shortdesc = \"ARNs should use correctly placed Pseudo Parameters\"\r\n description = \"Checks Resources if ARNs use correctly placed Pseudo Parameters instead of hardcoded Partition, Region, and Account Number\"\r\n source_url = \"\"\r\n tags = [\"resources\"]\r\n regex = re.compile(\r\n r\"arn:(\\$\\{[^:]*::[^:]*}|[^:]*):[^:]+:(\\$\\{[^:]*::[^:]*}|[^:]*):(\\$\\{[^:]*::[^:]*}|[^:]*)\"\r\n )\r\n\r\n def __init__(self):\r\n \"\"\"Init\"\"\"\r\n super().__init__()\r\n self.config_definition = {\r\n \"partition\": {\r\n \"default\": True,\r\n \"type\": \"boolean\",\r\n },\r\n \"region\": {\r\n \"default\": False,\r\n \"type\": \"boolean\",\r\n },\r\n \"accountId\": {\r\n \"default\": False,\r\n \"type\": \"boolean\",\r\n },\r\n }\r\n self.configure()\r\n\r\n def _match_values(self, cfnelem, path):\r\n \"\"\"Recursively search for values matching the searchRegex\"\"\"\r\n values = []\r\n if isinstance(cfnelem, dict):\r\n for key in cfnelem:\r\n pathprop = path[:]\r\n pathprop.append(key)\r\n values.extend(self._match_values(cfnelem[key], pathprop))\r\n elif isinstance(cfnelem, list):\r\n for index, item in enumerate(cfnelem):\r\n pathprop = path[:]\r\n pathprop.append(index)\r\n values.extend(self._match_values(item, pathprop))\r\n else:\r\n # Leaf node\r\n if isinstance(cfnelem, str): # and re.match(searchRegex, cfnelem):\r\n for variable in re.findall(self.regex, cfnelem):\r\n if \"Fn::Sub\" in path:\r\n values.append(path + [variable])\r\n\r\n return values\r\n\r\n def match_values(self, cfn):\r\n \"\"\"\r\n Search for values in all parts of the templates that match the searchRegex\r\n \"\"\"\r\n results = []\r\n results.extend(self._match_values(cfn.template.get(\"Resources\", {}), []))\r\n # Globals are removed during a transform. They need to be checked manually\r\n results.extend(self._match_values(cfn.template.get(\"Globals\", {}), []))\r\n return results\r\n\r\n def match(self, cfn):\r\n matches = []\r\n\r\n transforms = cfn.transform_pre[\"Transform\"]\r\n transforms = transforms if isinstance(transforms, list) else [transforms]\r\n if \"AWS::Serverless-2016-10-31\" in cfn.transform_pre[\"Transform\"]:\r\n return matches\r\n\r\n # Get a list of paths to every leaf node string containing at least one ${parameter}\r\n parameter_string_paths = self.match_values(cfn)\r\n # We want to search all of the paths to check if each one contains an 'Fn::Sub'\r\n for parameter_string_path in parameter_string_paths:\r\n path = [\"Resources\"] + parameter_string_path[:-1]\r\n candidate = parameter_string_path[-1]\r\n\r\n # !Sub arn:${AWS::Partition}:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole\r\n # is valid even with aws as the account #. This handles empty string\r\n if self.config[\"partition\"] and not re.match(\r\n r\"^\\$\\{\\w+}|\\$\\{AWS::Partition}|$\", candidate[0]\r\n ):\r\n # or not re.match(r'^(\\$\\{\\w+}|\\$\\{AWS::Region}|)$', candidate[1]) or not re.match(r'^\\$\\{\\w+}|\\$\\{AWS::AccountId}|aws|$', candidate[2]):\r\n message = \"ARN in Resource {0} contains hardcoded Partition in ARN or incorrectly placed Pseudo Parameters\"\r\n matches.append(RuleMatch(path, message.format(path[1])))\r\n if self.config[\"region\"] and not re.match(\r\n r\"^(\\$\\{\\w+}|\\$\\{AWS::Region}|)$\", candidate[1]\r\n ):\r\n # or or not re.match(r'^\\$\\{\\w+}|\\$\\{AWS::AccountId}|aws|$', candidate[2]):\r\n message = \"ARN in Resource {0} contains hardcoded Region in ARN or incorrectly placed Pseudo Parameters\"\r\n matches.append(RuleMatch(path, message.format(path[1])))\r\n if self.config[\"accountId\"] and not re.match(\r\n r\"^\\$\\{\\w+}|\\$\\{AWS::AccountId}|aws|$\", candidate[2]\r\n ):\r\n message = \"ARN in Resource {0} contains hardcoded AccountId in ARN or incorrectly placed Pseudo Parameters\"\r\n matches.append(RuleMatch(path, message.format(path[1])))\r\n\r\n return matches\r\n", "path": "src/cfnlint/rules/resources/HardCodedArnProperties.py"}]} | 1,616 | 218 |
gh_patches_debug_32901 | rasdani/github-patches | git_diff | zestedesavoir__zds-site-4742 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Logger les suppressions de galleries et de publications
Logger juste le fait que ce soit une suppression, le type d’objet concerné et le slug histoire qu’on puisse facilement remonter aux logs de nginx correspondantes avec la date et l’heures en cas de problème.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `zds/tutorialv2/receivers.py`
Content:
```
1 # coding: utf-8
2
3
4 import datetime
5 from django.dispatch.dispatcher import receiver
6 from django.utils.translation import ugettext_lazy as _
7 from zds.tutorialv2.models.models_database import PublishableContent
8 from zds.tutorialv2.signals import content_unpublished
9 from zds.utils import get_current_user
10 from zds.utils.models import Alert
11
12
13 @receiver(content_unpublished, sender=PublishableContent)
14 def cleanup_validation_alerts(sender, instance, **kwargs):
15 """
16 When opinions are unpublished (probably permanently), we must be sure all alerts are handled. For now we just \
17 resolve them.
18
19 :param sender: sender class
20 :param instance: object instance
21 :param kwargs: possibily moderator
22 """
23 if instance.is_opinion:
24 moderator = kwargs.get('moderator', get_current_user())
25 Alert.objects.filter(scope='CONTENT', content=instance).update(moderator=moderator,
26 resolve_reason=_('Le billet a été dépublié.'),
27 solved_date=datetime.datetime.now(),
28 solved=True)
29
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/zds/tutorialv2/receivers.py b/zds/tutorialv2/receivers.py
--- a/zds/tutorialv2/receivers.py
+++ b/zds/tutorialv2/receivers.py
@@ -2,10 +2,15 @@
import datetime
+import logging
+
from django.dispatch.dispatcher import receiver
from django.utils.translation import ugettext_lazy as _
+from django.db import models
+
from zds.tutorialv2.models.models_database import PublishableContent
from zds.tutorialv2.signals import content_unpublished
+from zds.gallery.models import Gallery
from zds.utils import get_current_user
from zds.utils.models import Alert
@@ -26,3 +31,25 @@
resolve_reason=_('Le billet a été dépublié.'),
solved_date=datetime.datetime.now(),
solved=True)
+
+
+@receiver(models.signals.post_delete, sender=Gallery)
+@receiver(models.signals.post_delete, sender=PublishableContent)
+def log_content_deletion(sender, instance, **kwargs):
+ """
+ When a content or gallery is deleted, this action is logged.
+ """
+
+ logger = logging.getLogger(__name__)
+ current_user = get_current_user()
+
+ if current_user is None:
+ logger.info('%(instance_model)s #%(instance_pk)d (%(instance_slug)s) has been deleted. User not found.',
+ {'instance_model': type(instance).__name__, 'instance_pk': instance.pk,
+ 'instance_slug': instance.slug})
+ else:
+ logger.info('%(instance_model)s #%(instance_pk)d (%(instance_slug)s) has been deleted '
+ 'by user #%(user_pk)d (%(username)s).', {'instance_model': type(instance).__name__,
+ 'instance_pk': instance.pk, 'instance_slug': instance.slug,
+ 'user_pk': current_user.pk,
+ 'username': current_user.username})
| {"golden_diff": "diff --git a/zds/tutorialv2/receivers.py b/zds/tutorialv2/receivers.py\n--- a/zds/tutorialv2/receivers.py\n+++ b/zds/tutorialv2/receivers.py\n@@ -2,10 +2,15 @@\n \n \n import datetime\n+import logging\n+\n from django.dispatch.dispatcher import receiver\n from django.utils.translation import ugettext_lazy as _\n+from django.db import models\n+\n from zds.tutorialv2.models.models_database import PublishableContent\n from zds.tutorialv2.signals import content_unpublished\n+from zds.gallery.models import Gallery\n from zds.utils import get_current_user\n from zds.utils.models import Alert\n \n@@ -26,3 +31,25 @@\n resolve_reason=_('Le billet a \u00e9t\u00e9 d\u00e9publi\u00e9.'),\n solved_date=datetime.datetime.now(),\n solved=True)\n+\n+\n+@receiver(models.signals.post_delete, sender=Gallery)\n+@receiver(models.signals.post_delete, sender=PublishableContent)\n+def log_content_deletion(sender, instance, **kwargs):\n+ \"\"\"\n+ When a content or gallery is deleted, this action is logged.\n+ \"\"\"\n+\n+ logger = logging.getLogger(__name__)\n+ current_user = get_current_user()\n+\n+ if current_user is None:\n+ logger.info('%(instance_model)s #%(instance_pk)d (%(instance_slug)s) has been deleted. User not found.',\n+ {'instance_model': type(instance).__name__, 'instance_pk': instance.pk,\n+ 'instance_slug': instance.slug})\n+ else:\n+ logger.info('%(instance_model)s #%(instance_pk)d (%(instance_slug)s) has been deleted '\n+ 'by user #%(user_pk)d (%(username)s).', {'instance_model': type(instance).__name__,\n+ 'instance_pk': instance.pk, 'instance_slug': instance.slug,\n+ 'user_pk': current_user.pk,\n+ 'username': current_user.username})\n", "issue": "Logger les suppressions de galleries et de publications\nLogger juste le fait que ce soit une suppression, le type d\u2019objet concern\u00e9 et le slug histoire qu\u2019on puisse facilement remonter aux logs de nginx correspondantes avec la date et l\u2019heures en cas de probl\u00e8me.\n", "before_files": [{"content": "# coding: utf-8\n\n\nimport datetime\nfrom django.dispatch.dispatcher import receiver\nfrom django.utils.translation import ugettext_lazy as _\nfrom zds.tutorialv2.models.models_database import PublishableContent\nfrom zds.tutorialv2.signals import content_unpublished\nfrom zds.utils import get_current_user\nfrom zds.utils.models import Alert\n\n\n@receiver(content_unpublished, sender=PublishableContent)\ndef cleanup_validation_alerts(sender, instance, **kwargs):\n \"\"\"\n When opinions are unpublished (probably permanently), we must be sure all alerts are handled. For now we just \\\n resolve them.\n\n :param sender: sender class\n :param instance: object instance\n :param kwargs: possibily moderator\n \"\"\"\n if instance.is_opinion:\n moderator = kwargs.get('moderator', get_current_user())\n Alert.objects.filter(scope='CONTENT', content=instance).update(moderator=moderator,\n resolve_reason=_('Le billet a \u00e9t\u00e9 d\u00e9publi\u00e9.'),\n solved_date=datetime.datetime.now(),\n solved=True)\n", "path": "zds/tutorialv2/receivers.py"}], "after_files": [{"content": "# coding: utf-8\n\n\nimport datetime\nimport logging\n\nfrom django.dispatch.dispatcher import receiver\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.db import models\n\nfrom zds.tutorialv2.models.models_database import PublishableContent\nfrom zds.tutorialv2.signals import content_unpublished\nfrom zds.gallery.models import Gallery\nfrom zds.utils import get_current_user\nfrom zds.utils.models import Alert\n\n\n@receiver(content_unpublished, sender=PublishableContent)\ndef cleanup_validation_alerts(sender, instance, **kwargs):\n \"\"\"\n When opinions are unpublished (probably permanently), we must be sure all alerts are handled. For now we just \\\n resolve them.\n\n :param sender: sender class\n :param instance: object instance\n :param kwargs: possibily moderator\n \"\"\"\n if instance.is_opinion:\n moderator = kwargs.get('moderator', get_current_user())\n Alert.objects.filter(scope='CONTENT', content=instance).update(moderator=moderator,\n resolve_reason=_('Le billet a \u00e9t\u00e9 d\u00e9publi\u00e9.'),\n solved_date=datetime.datetime.now(),\n solved=True)\n\n\n@receiver(models.signals.post_delete, sender=Gallery)\n@receiver(models.signals.post_delete, sender=PublishableContent)\ndef log_content_deletion(sender, instance, **kwargs):\n \"\"\"\n When a content or gallery is deleted, this action is logged.\n \"\"\"\n\n logger = logging.getLogger(__name__)\n current_user = get_current_user()\n\n if current_user is None:\n logger.info('%(instance_model)s #%(instance_pk)d (%(instance_slug)s) has been deleted. User not found.',\n {'instance_model': type(instance).__name__, 'instance_pk': instance.pk,\n 'instance_slug': instance.slug})\n else:\n logger.info('%(instance_model)s #%(instance_pk)d (%(instance_slug)s) has been deleted '\n 'by user #%(user_pk)d (%(username)s).', {'instance_model': type(instance).__name__,\n 'instance_pk': instance.pk, 'instance_slug': instance.slug,\n 'user_pk': current_user.pk,\n 'username': current_user.username})\n", "path": "zds/tutorialv2/receivers.py"}]} | 594 | 419 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.